Compare commits
10 Commits
894ebc4ec8
...
master
Author | SHA1 | Date | |
---|---|---|---|
7058c79975 | |||
d488807c57 | |||
7e026bedb2 | |||
33117cde71 | |||
7d2cde304d | |||
409b80994a | |||
6fb11cb054 | |||
7a0bb4179d | |||
fe3280b91d | |||
3e85c3829d |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -122,7 +122,3 @@ dmypy.json
|
|||||||
|
|
||||||
# Pyre type checker
|
# Pyre type checker
|
||||||
.pyre/
|
.pyre/
|
||||||
|
|
||||||
# vim
|
|
||||||
.vim
|
|
||||||
|
|
||||||
|
26
README.md
26
README.md
@@ -6,29 +6,3 @@ Cette fois ci, on utilise:
|
|||||||
- Des fichiers yaml pour les infos sur les élèves
|
- Des fichiers yaml pour les infos sur les élèves
|
||||||
- Des notebooks pour l'analyse
|
- Des notebooks pour l'analyse
|
||||||
- Papermill pour produire les notesbooks à partir de template
|
- Papermill pour produire les notesbooks à partir de template
|
||||||
|
|
||||||
## Les fichiers CSV
|
|
||||||
|
|
||||||
les paramètres sont décris dans ./recopytex/config.py
|
|
||||||
|
|
||||||
### Descriptions des questions
|
|
||||||
|
|
||||||
- Trimestre
|
|
||||||
- Nom
|
|
||||||
- Date
|
|
||||||
- Exercice
|
|
||||||
- Question
|
|
||||||
- Competence
|
|
||||||
- Domaine
|
|
||||||
- Commentaire
|
|
||||||
- Bareme
|
|
||||||
- Est_nivele
|
|
||||||
|
|
||||||
|
|
||||||
### Valeurs pour notes les élèves
|
|
||||||
|
|
||||||
- Score: 0, 1, 2, 3
|
|
||||||
- Pas de réponses: .
|
|
||||||
- Absent: a
|
|
||||||
- Dispensé: (vide)
|
|
||||||
|
|
||||||
|
@@ -1,32 +0,0 @@
|
|||||||
---
|
|
||||||
source: ./
|
|
||||||
output: ./
|
|
||||||
templates: templates/
|
|
||||||
|
|
||||||
competences:
|
|
||||||
Calculer:
|
|
||||||
name: Calculer
|
|
||||||
abrv: Cal
|
|
||||||
Représenter:
|
|
||||||
name: Représenter
|
|
||||||
abrv: Rep
|
|
||||||
Modéliser:
|
|
||||||
name: Modéliser
|
|
||||||
abrv: Mod
|
|
||||||
Raisonner:
|
|
||||||
name: Raisonner
|
|
||||||
abrv: Rai
|
|
||||||
Calculer:
|
|
||||||
name: Calculer
|
|
||||||
abrv: Cal
|
|
||||||
Communiquer:
|
|
||||||
name: Communiquer
|
|
||||||
abrv: Com
|
|
||||||
|
|
||||||
|
|
||||||
tribes:
|
|
||||||
- name: Tribe1
|
|
||||||
type: Type1
|
|
||||||
students: tribe1.csv
|
|
||||||
- name: Tribe2
|
|
||||||
students: tribe2.csv
|
|
@@ -1,21 +0,0 @@
|
|||||||
Nom,email
|
|
||||||
Star Tice,stice0@jalbum.net
|
|
||||||
Umberto Dingate,udingate1@tumblr.com
|
|
||||||
Starlin Crangle,scrangle2@wufoo.com
|
|
||||||
Humbert Bourcq,hbourcq3@g.co
|
|
||||||
Gabriella Handyside,ghandyside4@patch.com
|
|
||||||
Stewart Eaves,seaves5@ycombinator.com
|
|
||||||
Erick Going,egoing6@va.gov
|
|
||||||
Ase Praton,apraton7@va.gov
|
|
||||||
Rollins Planks,rplanks8@delicious.com
|
|
||||||
Dunstan Sarjant,dsarjant9@naver.com
|
|
||||||
Stacy Guiton,sguitona@themeforest.net
|
|
||||||
Ange Stanes,astanesb@marriott.com
|
|
||||||
Amabelle Elleton,aelletonc@squidoo.com
|
|
||||||
Darn Broomhall,dbroomhalld@cisco.com
|
|
||||||
Dyan Chatto,dchattoe@npr.org
|
|
||||||
Keane Rennebach,krennebachf@dot.gov
|
|
||||||
Nari Paulton,npaultong@gov.uk
|
|
||||||
Brandy Wase,bwaseh@ftc.gov
|
|
||||||
Jaclyn Firidolfi,jfiridolfii@reuters.com
|
|
||||||
Violette Lockney,vlockneyj@chron.com
|
|
|
@@ -1,21 +0,0 @@
|
|||||||
Nom,email
|
|
||||||
Elle McKintosh,emckintosh0@1und1.de
|
|
||||||
Ty Megany,tmegany1@reuters.com
|
|
||||||
Pippa Borrows,pborrows2@a8.net
|
|
||||||
Sonny Eskrick,seskrick3@123-reg.co.uk
|
|
||||||
Mollee Britch,mbritch4@usda.gov
|
|
||||||
Ingram Plaistowe,iplaistowe5@purevolume.com
|
|
||||||
Fay Vanyard,fvanyard6@sbwire.com
|
|
||||||
Nancy Rase,nrase7@omniture.com
|
|
||||||
Rachael Ruxton,rruxton8@bravesites.com
|
|
||||||
Tallie Rushmer,trushmer9@home.pl
|
|
||||||
Seward MacIlhagga,smacilhaggaa@hatena.ne.jp
|
|
||||||
Lizette Searl,lsearlb@list-manage.com
|
|
||||||
Talya Mannagh,tmannaghc@webnode.com
|
|
||||||
Jordan Witherbed,jwitherbedd@unesco.org
|
|
||||||
Reagan Botcherby,rbotcherbye@scientificamerican.com
|
|
||||||
Libbie Shoulder,lshoulderf@desdev.cn
|
|
||||||
Abner Khomich,akhomichg@youtube.com
|
|
||||||
Zollie Kitman,zkitmanh@forbes.com
|
|
||||||
Fiorenze Durden,fdurdeni@feedburner.com
|
|
||||||
Kevyn Race,kracej@seattletimes.com
|
|
|
@@ -2,16 +2,16 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
|
||||||
NO_ST_COLUMNS = {
|
NO_ST_COLUMNS = {
|
||||||
"term": "Trimestre",
|
|
||||||
"assessment": "Nom",
|
"assessment": "Nom",
|
||||||
|
"term": "Trimestre",
|
||||||
"date": "Date",
|
"date": "Date",
|
||||||
"exercise": "Exercice",
|
"exercise": "Exercice",
|
||||||
"question": "Question",
|
"question": "Question",
|
||||||
"competence": "Competence",
|
"competence": "Competence",
|
||||||
"theme": "Domaine",
|
"theme": "Domaine",
|
||||||
"comment": "Commentaire",
|
"comment": "Commentaire",
|
||||||
"score_rate": "Bareme",
|
|
||||||
"is_leveled": "Est_nivele",
|
"is_leveled": "Est_nivele",
|
||||||
|
"score_rate": "Bareme",
|
||||||
}
|
}
|
||||||
|
|
||||||
COLUMNS = {
|
COLUMNS = {
|
||||||
|
@@ -17,7 +17,7 @@ def try_replace(x, old, new):
|
|||||||
|
|
||||||
|
|
||||||
def extract_students(df, no_student_columns=NO_ST_COLUMNS.values()):
|
def extract_students(df, no_student_columns=NO_ST_COLUMNS.values()):
|
||||||
"""Extract the list of students from df
|
""" Extract the list of students from df
|
||||||
|
|
||||||
:param df: the dataframe
|
:param df: the dataframe
|
||||||
:param no_student_columns: columns that are not students
|
:param no_student_columns: columns that are not students
|
||||||
@@ -30,7 +30,7 @@ def extract_students(df, no_student_columns=NO_ST_COLUMNS.values()):
|
|||||||
def flat_df_students(
|
def flat_df_students(
|
||||||
df, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
df, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
||||||
):
|
):
|
||||||
"""Flat the dataframe by returning a dataframe with on student on each line
|
""" Flat the dataframe by returning a dataframe with on student on each line
|
||||||
|
|
||||||
:param df: the dataframe (one row per questions)
|
:param df: the dataframe (one row per questions)
|
||||||
:param no_student_columns: columns that are not students
|
:param no_student_columns: columns that are not students
|
||||||
@@ -63,7 +63,7 @@ def flat_df_students(
|
|||||||
def flat_df_for(
|
def flat_df_for(
|
||||||
df, student, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
df, student, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
||||||
):
|
):
|
||||||
"""Extract the data only for one student
|
""" Extract the data only for one student
|
||||||
|
|
||||||
:param df: the dataframe (one row per questions)
|
:param df: the dataframe (one row per questions)
|
||||||
:param no_student_columns: columns that are not students
|
:param no_student_columns: columns that are not students
|
||||||
@@ -88,7 +88,7 @@ def flat_df_for(
|
|||||||
|
|
||||||
|
|
||||||
def postprocess(df):
|
def postprocess(df):
|
||||||
"""Postprocessing score dataframe
|
""" Postprocessing score dataframe
|
||||||
|
|
||||||
- Replace na with an empty string
|
- Replace na with an empty string
|
||||||
- Replace "NOANSWER" with -1
|
- Replace "NOANSWER" with -1
|
||||||
|
@@ -6,9 +6,7 @@ import numpy as np
|
|||||||
from math import ceil, floor
|
from math import ceil, floor
|
||||||
from .config import COLUMNS, VALIDSCORE
|
from .config import COLUMNS, VALIDSCORE
|
||||||
|
|
||||||
"""
|
# Values manipulations
|
||||||
Functions for manipulate score dataframes
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def round_half_point(val):
|
def round_half_point(val):
|
||||||
@@ -21,13 +19,12 @@ def round_half_point(val):
|
|||||||
|
|
||||||
|
|
||||||
def score_to_mark(x):
|
def score_to_mark(x):
|
||||||
"""Compute the mark
|
""" Compute the mark
|
||||||
|
|
||||||
if the item is leveled then the score is multiply by the score_rate
|
if the item is leveled then the score is multiply by the score_rate
|
||||||
otherwise it copies the score
|
otherwise it copies the score
|
||||||
|
|
||||||
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
||||||
:return: the mark
|
|
||||||
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
@@ -46,10 +43,9 @@ def score_to_mark(x):
|
|||||||
|
|
||||||
if x[COLUMNS["is_leveled"]]:
|
if x[COLUMNS["is_leveled"]]:
|
||||||
if x[COLUMNS["score"]] not in [0, 1, 2, 3]:
|
if x[COLUMNS["score"]] not in [0, 1, 2, 3]:
|
||||||
raise ValueError(
|
raise ValueError(f"The evaluation is out of range: {x[COLUMNS['score']]} at {x}")
|
||||||
f"The evaluation is out of range: {x[COLUMNS['score']]} at {x}"
|
#return round_half_point(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3)
|
||||||
)
|
return round(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3, 2)
|
||||||
return round_half_point(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3)
|
|
||||||
|
|
||||||
if x[COLUMNS["score"]] > x[COLUMNS["score_rate"]]:
|
if x[COLUMNS["score"]] > x[COLUMNS["score_rate"]]:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@@ -59,10 +55,9 @@ def score_to_mark(x):
|
|||||||
|
|
||||||
|
|
||||||
def score_to_level(x):
|
def score_to_level(x):
|
||||||
"""Compute the level (".",0,1,2,3).
|
""" Compute the level (".",0,1,2,3).
|
||||||
|
|
||||||
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
||||||
:return: the level
|
|
||||||
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
@@ -97,9 +92,7 @@ def score_to_level(x):
|
|||||||
|
|
||||||
|
|
||||||
def compute_mark(df):
|
def compute_mark(df):
|
||||||
"""Compute the mark for the dataframe
|
""" Add Mark column to df
|
||||||
|
|
||||||
apply score_to_mark to each row
|
|
||||||
|
|
||||||
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
||||||
|
|
||||||
@@ -130,12 +123,9 @@ def compute_mark(df):
|
|||||||
|
|
||||||
|
|
||||||
def compute_level(df):
|
def compute_level(df):
|
||||||
"""Compute level for the dataframe
|
""" Add Mark column to df
|
||||||
|
|
||||||
Applies score_to_level to each row
|
|
||||||
|
|
||||||
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
||||||
:return: Columns with level
|
|
||||||
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
@@ -164,10 +154,9 @@ def compute_level(df):
|
|||||||
|
|
||||||
|
|
||||||
def compute_normalized(df):
|
def compute_normalized(df):
|
||||||
"""Compute the normalized mark (Mark / score_rate)
|
""" Compute the normalized mark (Mark / score_rate)
|
||||||
|
|
||||||
:param df: DataFrame with "Mark" and COLUMNS["score_rate"] columns
|
:param df: DataFrame with "Mark" and COLUMNS["score_rate"] columns
|
||||||
:return: column with normalized mark
|
|
||||||
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
@@ -198,9 +187,7 @@ def compute_normalized(df):
|
|||||||
|
|
||||||
|
|
||||||
def pp_q_scores(df):
|
def pp_q_scores(df):
|
||||||
"""Postprocessing questions scores dataframe
|
""" Postprocessing questions scores dataframe
|
||||||
|
|
||||||
Add 3 columns: mark, level and normalized
|
|
||||||
|
|
||||||
:param df: questions-scores dataframe
|
:param df: questions-scores dataframe
|
||||||
:return: same data frame with mark, level and normalize columns
|
:return: same data frame with mark, level and normalize columns
|
||||||
|
10
recopytex/scripts/config.py
Normal file
10
recopytex/scripts/config.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
CONFIGPATH = "recoconfig.yml"
|
||||||
|
|
||||||
|
with open(CONFIGPATH, "r") as configfile:
|
||||||
|
config = yaml.load(configfile, Loader=yaml.FullLoader)
|
||||||
|
|
@@ -1,132 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
from pathlib import Path
|
|
||||||
from prompt_toolkit import HTML
|
|
||||||
import yaml
|
|
||||||
from .getconfig import config
|
|
||||||
|
|
||||||
|
|
||||||
class Exam:
|
|
||||||
def __init__(self, name, tribename, date, term, **kwrds):
|
|
||||||
self._name = name
|
|
||||||
self._tribename = tribename
|
|
||||||
try:
|
|
||||||
self._date = datetime.strptime(date, "%y%m%d")
|
|
||||||
except:
|
|
||||||
self._date = date
|
|
||||||
|
|
||||||
self._term = term
|
|
||||||
|
|
||||||
self._exercises = {}
|
|
||||||
|
|
||||||
@property
|
|
||||||
def name(self):
|
|
||||||
return self._name
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tribename(self):
|
|
||||||
return self._tribename
|
|
||||||
|
|
||||||
@property
|
|
||||||
def date(self):
|
|
||||||
return self._date
|
|
||||||
|
|
||||||
@property
|
|
||||||
def term(self):
|
|
||||||
return self._term
|
|
||||||
|
|
||||||
def add_exercise(self, name, questions):
|
|
||||||
""" Add key with questions in ._exercises """
|
|
||||||
try:
|
|
||||||
self._exercises[name]
|
|
||||||
except KeyError:
|
|
||||||
self._exercises[name] = questions
|
|
||||||
else:
|
|
||||||
raise KeyError("The exercise already exsists. Use modify_exercise")
|
|
||||||
|
|
||||||
def modify_exercise(self, name, questions, append=False):
|
|
||||||
"""Modify questions of an exercise
|
|
||||||
|
|
||||||
If append==True, add questions to the exercise questions
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self._exercises[name]
|
|
||||||
except KeyError:
|
|
||||||
raise KeyError("The exercise already exsists. Use modify_exercise")
|
|
||||||
else:
|
|
||||||
if append:
|
|
||||||
self._exercises[name] += questions
|
|
||||||
else:
|
|
||||||
self._exercises[name] = questions
|
|
||||||
|
|
||||||
@property
|
|
||||||
def exercices(self):
|
|
||||||
return self._exercises
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tribe_path(self):
|
|
||||||
return Path(config["source"]) / self.tribename
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tribe_student_path(self):
|
|
||||||
return (
|
|
||||||
Path(config["source"])
|
|
||||||
/ [t["students"] for t in config["tribes"] if t["name"] == self.tribename][
|
|
||||||
0
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def long_name(self):
|
|
||||||
""" Get exam name with date inside """
|
|
||||||
return f"{self.date.strftime('%y%m%d')}_{self.name}"
|
|
||||||
|
|
||||||
def path(self, extention=""):
|
|
||||||
return self.tribe_path / (self.long_name + extention)
|
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
return {
|
|
||||||
"name": self.name,
|
|
||||||
"tribename": self.tribename,
|
|
||||||
"date": self.date,
|
|
||||||
"term": self.term,
|
|
||||||
"exercices": self.exercices,
|
|
||||||
}
|
|
||||||
|
|
||||||
def to_row(self):
|
|
||||||
rows = []
|
|
||||||
for ex, questions in self.exercices.items():
|
|
||||||
for q in questions:
|
|
||||||
rows.append(
|
|
||||||
{
|
|
||||||
"term": self.term,
|
|
||||||
"assessment": self.name,
|
|
||||||
"date": self.date.strftime("%d/%m/%Y"),
|
|
||||||
"exercise": ex,
|
|
||||||
"question": q["id"],
|
|
||||||
**q,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return rows
|
|
||||||
|
|
||||||
@property
|
|
||||||
def themes(self):
|
|
||||||
themes = set()
|
|
||||||
for questions in self._exercises.values():
|
|
||||||
themes.update([q["theme"] for q in questions])
|
|
||||||
return themes
|
|
||||||
|
|
||||||
def display_exercise(self, name):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def display(self, name):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def write(self):
|
|
||||||
print(f"Sauvegarde temporaire dans {self.path('.yml')}")
|
|
||||||
self.tribe_path.mkdir(exist_ok=True)
|
|
||||||
with open(self.path(".yml"), "w") as f:
|
|
||||||
f.write(yaml.dump(self.to_dict()))
|
|
@@ -1,299 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
import dash
|
|
||||||
import dash_html_components as html
|
|
||||||
import dash_core_components as dcc
|
|
||||||
import dash_table
|
|
||||||
from dash.exceptions import PreventUpdate
|
|
||||||
import plotly.graph_objects as go
|
|
||||||
from pathlib import Path
|
|
||||||
from datetime import datetime
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
from .. import flat_df_students, pp_q_scores
|
|
||||||
from ..config import NO_ST_COLUMNS
|
|
||||||
from .getconfig import config, CONFIGPATH
|
|
||||||
|
|
||||||
COLORS = {
|
|
||||||
".": "black",
|
|
||||||
0: "#E7472B",
|
|
||||||
1: "#FF712B",
|
|
||||||
2: "#F2EC4C",
|
|
||||||
3: "#68D42F",
|
|
||||||
}
|
|
||||||
|
|
||||||
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
|
|
||||||
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
|
|
||||||
# app = dash.Dash(__name__)
|
|
||||||
|
|
||||||
app.layout = html.Div(
|
|
||||||
children=[
|
|
||||||
html.H1("Analyse des notes"),
|
|
||||||
html.Div(
|
|
||||||
[
|
|
||||||
"Classe: ",
|
|
||||||
dcc.Dropdown(
|
|
||||||
id="tribe",
|
|
||||||
options=[
|
|
||||||
{"label": t["name"], "value": t["name"]}
|
|
||||||
for t in config["tribes"]
|
|
||||||
],
|
|
||||||
value=config["tribes"][0]["name"],
|
|
||||||
),
|
|
||||||
"Evaluation: ",
|
|
||||||
dcc.Dropdown(id="csv"),
|
|
||||||
],
|
|
||||||
style={"columnCount": 2},
|
|
||||||
),
|
|
||||||
html.Div(
|
|
||||||
[
|
|
||||||
dash_table.DataTable(
|
|
||||||
id="final_score_table",
|
|
||||||
columns=[
|
|
||||||
{"id": "Élève", "name": "Élève"},
|
|
||||||
{"id": "Note", "name": "Note"},
|
|
||||||
{"id": "Barème", "name": "Bareme"},
|
|
||||||
],
|
|
||||||
data=[],
|
|
||||||
style_data_conditional=[
|
|
||||||
{
|
|
||||||
"if": {"row_index": "odd"},
|
|
||||||
"backgroundColor": "rgb(248, 248, 248)",
|
|
||||||
}
|
|
||||||
],
|
|
||||||
style_header={
|
|
||||||
"backgroundColor": "rgb(230, 230, 230)",
|
|
||||||
"fontWeight": "bold",
|
|
||||||
},
|
|
||||||
style_data={
|
|
||||||
"width": "100px",
|
|
||||||
"maxWidth": "100px",
|
|
||||||
"minWidth": "100px",
|
|
||||||
},
|
|
||||||
),
|
|
||||||
html.Div(
|
|
||||||
[
|
|
||||||
dash_table.DataTable(
|
|
||||||
id="final_score_describe",
|
|
||||||
),
|
|
||||||
dcc.Graph(id="fig_assessment_hist"),
|
|
||||||
dcc.Graph(id="fig_competences"),
|
|
||||||
]
|
|
||||||
),
|
|
||||||
],
|
|
||||||
style={"columnCount": 2},
|
|
||||||
),
|
|
||||||
html.Br(),
|
|
||||||
html.Div(
|
|
||||||
[
|
|
||||||
dash_table.DataTable(
|
|
||||||
id="scores_table",
|
|
||||||
columns=[{"id": c, "name": c} for c in NO_ST_COLUMNS.values()],
|
|
||||||
style_cell={
|
|
||||||
"whiteSpace": "normal",
|
|
||||||
"height": "auto",
|
|
||||||
},
|
|
||||||
style_data_conditional=[],
|
|
||||||
editable=True,
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
html.P(id="lastsave"),
|
|
||||||
dcc.Store(id="final_score"),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("csv", "options"),
|
|
||||||
dash.dependencies.Output("csv", "value"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("tribe", "value")],
|
|
||||||
)
|
|
||||||
def update_csvs(value):
|
|
||||||
if not value:
|
|
||||||
raise PreventUpdate
|
|
||||||
p = Path(value)
|
|
||||||
csvs = list(p.glob("*.csv"))
|
|
||||||
try:
|
|
||||||
return [{"label": str(c), "value": str(c)} for c in csvs], str(csvs[0])
|
|
||||||
except IndexError:
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("final_score", "data"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("scores_table", "data")],
|
|
||||||
)
|
|
||||||
def update_final_scores(data):
|
|
||||||
if not data:
|
|
||||||
raise PreventUpdate
|
|
||||||
try:
|
|
||||||
scores = pd.DataFrame.from_records(data)
|
|
||||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
|
||||||
scores = pp_q_scores(scores)
|
|
||||||
assessment_scores = scores.groupby(["Eleve"]).agg(
|
|
||||||
{"Note": "sum", "Bareme": "sum"}
|
|
||||||
)
|
|
||||||
return [assessment_scores.reset_index().to_dict("records")]
|
|
||||||
except KeyError:
|
|
||||||
raise PreventUpdate
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("final_score_table", "columns"),
|
|
||||||
dash.dependencies.Output("final_score_table", "data"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("final_score", "data")],
|
|
||||||
)
|
|
||||||
def update_final_scores_table(data):
|
|
||||||
assessment_scores = pd.DataFrame.from_records(data)
|
|
||||||
return [
|
|
||||||
{"id": c, "name": c} for c in assessment_scores.columns
|
|
||||||
], assessment_scores.to_dict("records")
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("final_score_describe", "columns"),
|
|
||||||
dash.dependencies.Output("final_score_describe", "data"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("final_score", "data")],
|
|
||||||
)
|
|
||||||
def update_final_scores_descr(data):
|
|
||||||
desc = pd.DataFrame.from_records(data)["Note"].describe()
|
|
||||||
return [{"id": c, "name": c} for c in desc.keys()], [desc.to_dict()]
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("fig_assessment_hist", "figure"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("final_score", "data")],
|
|
||||||
)
|
|
||||||
def update_final_scores_hist(data):
|
|
||||||
assessment_scores = pd.DataFrame.from_records(data)
|
|
||||||
ranges = np.linspace(
|
|
||||||
0, assessment_scores.Bareme.max(), int(assessment_scores.Bareme.max() * 2 + 1)
|
|
||||||
)
|
|
||||||
bins = pd.cut(assessment_scores["Note"], ranges)
|
|
||||||
assessment_scores["Bin"] = bins
|
|
||||||
assessment_grouped = (
|
|
||||||
assessment_scores.reset_index()
|
|
||||||
.groupby("Bin")
|
|
||||||
.agg({"Bareme": "count", "Eleve": lambda x: "\n".join(x)})
|
|
||||||
)
|
|
||||||
assessment_grouped.index = assessment_grouped.index.map(lambda i: i.right)
|
|
||||||
fig = go.Figure()
|
|
||||||
fig.add_bar(
|
|
||||||
x=assessment_grouped.index,
|
|
||||||
y=assessment_grouped.Bareme,
|
|
||||||
text=assessment_grouped.Eleve,
|
|
||||||
textposition="auto",
|
|
||||||
hovertemplate="",
|
|
||||||
marker_color="#4E89DE",
|
|
||||||
)
|
|
||||||
return [fig]
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("fig_competences", "figure"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("scores_table", "data")],
|
|
||||||
)
|
|
||||||
def update_competence_fig(data):
|
|
||||||
scores = pd.DataFrame.from_records(data)
|
|
||||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
|
||||||
scores = pp_q_scores(scores)
|
|
||||||
pt = pd.pivot_table(
|
|
||||||
scores,
|
|
||||||
index=["Exercice", "Question", "Commentaire"],
|
|
||||||
columns="Score",
|
|
||||||
aggfunc="size",
|
|
||||||
fill_value=0,
|
|
||||||
)
|
|
||||||
for i in {i for i in pt.index.get_level_values(0)}:
|
|
||||||
pt.loc[(str(i), "", ""), :] = ""
|
|
||||||
pt.sort_index(inplace=True)
|
|
||||||
index = (
|
|
||||||
pt.index.get_level_values(0)
|
|
||||||
+ ":"
|
|
||||||
+ pt.index.get_level_values(1)
|
|
||||||
+ " "
|
|
||||||
+ pt.index.get_level_values(2)
|
|
||||||
)
|
|
||||||
|
|
||||||
fig = go.Figure()
|
|
||||||
bars = [
|
|
||||||
{"score": -1, "name":"Pas de réponse", "color": COLORS["."]},
|
|
||||||
{"score": 0, "name":"Faut", "color": COLORS[0]},
|
|
||||||
{"score": 1, "name":"Peu juste", "color": COLORS[1]},
|
|
||||||
{"score": 2, "name":"Presque juste", "color": COLORS[2]},
|
|
||||||
{"score": 3, "name":"Juste", "color": COLORS[3]},
|
|
||||||
]
|
|
||||||
for b in bars:
|
|
||||||
try:
|
|
||||||
fig.add_bar(x=index, y=pt[b["score"]], name=b["name"], marker_color=b["color"])
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
fig.update_layout(barmode="relative")
|
|
||||||
return [fig]
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[dash.dependencies.Output("lastsave", "children")],
|
|
||||||
[
|
|
||||||
dash.dependencies.Input("scores_table", "data"),
|
|
||||||
dash.dependencies.State("csv", "value"),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def save_scores(data, csv):
|
|
||||||
scores = pd.DataFrame.from_records(data)
|
|
||||||
print(f"save at {csv} ({datetime.today()})")
|
|
||||||
scores.to_csv(csv, index=False)
|
|
||||||
return [datetime.today()]
|
|
||||||
|
|
||||||
|
|
||||||
def highlight_value(df):
|
|
||||||
""" Cells style """
|
|
||||||
hight = []
|
|
||||||
for v, color in COLORS.items():
|
|
||||||
hight += [
|
|
||||||
{
|
|
||||||
"if": {"filter_query": "{{{}}} = {}".format(col, v), "column_id": col},
|
|
||||||
"backgroundColor": color,
|
|
||||||
"color": "white",
|
|
||||||
}
|
|
||||||
for col in df.columns
|
|
||||||
if col not in NO_ST_COLUMNS.values()
|
|
||||||
]
|
|
||||||
return hight
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("scores_table", "columns"),
|
|
||||||
dash.dependencies.Output("scores_table", "data"),
|
|
||||||
dash.dependencies.Output("scores_table", "style_data_conditional"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("csv", "value")],
|
|
||||||
)
|
|
||||||
def update_scores_table(value):
|
|
||||||
if not value:
|
|
||||||
raise PreventUpdate
|
|
||||||
stack = pd.read_csv(value, encoding="UTF8")
|
|
||||||
# try:
|
|
||||||
# stack = stack.drop(columns=["Nom", "Trimestre", "Date", "Competence", "Domaine", "Est_nivele", "Bareme"])
|
|
||||||
# except KeyError:
|
|
||||||
# stack = stack
|
|
||||||
return (
|
|
||||||
[{"id": c, "name": c} for c in stack.columns],
|
|
||||||
stack.to_dict("records"),
|
|
||||||
highlight_value(stack),
|
|
||||||
)
|
|
@@ -1,9 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
CONFIGPATH = "recoconfig.yml"
|
|
||||||
|
|
||||||
with open(CONFIGPATH, "r") as config:
|
|
||||||
config = yaml.load(config, Loader=yaml.FullLoader)
|
|
||||||
|
|
160
recopytex/scripts/prepare_csv.py
Normal file
160
recopytex/scripts/prepare_csv.py
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import click
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
from PyInquirer import prompt, print_json
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .config import config
|
||||||
|
from ..config import NO_ST_COLUMNS
|
||||||
|
|
||||||
|
|
||||||
|
class PromptAbortException(EOFError):
|
||||||
|
def __init__(self, message, errors=None):
|
||||||
|
|
||||||
|
# Call the base class constructor with the parameters it needs
|
||||||
|
super(PromptAbortException, self).__init__("Abort questionnary", errors)
|
||||||
|
|
||||||
|
|
||||||
|
def get_tribes(answers):
|
||||||
|
""" List tribes based on subdirectory of config["source"] which have an "eleves.csv" file inside """
|
||||||
|
return [
|
||||||
|
p.name for p in Path(config["source"]).iterdir() if (p / "eleves.csv").exists()
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_csv():
|
||||||
|
items = new_eval()
|
||||||
|
|
||||||
|
item = items[0]
|
||||||
|
# item = {"tribe": "308", "date": datetime.today(), "assessment": "plop"}
|
||||||
|
csv_output = (
|
||||||
|
Path(config["source"])
|
||||||
|
/ item["tribe"]
|
||||||
|
/ f"{item['date']:%y%m%d}_{item['assessment']}.csv"
|
||||||
|
)
|
||||||
|
|
||||||
|
students = pd.read_csv(Path(config["source"]) / item["tribe"] / "eleves.csv")["Nom"]
|
||||||
|
|
||||||
|
columns = list(NO_ST_COLUMNS.keys())
|
||||||
|
items = [[it[c] for c in columns] for it in items]
|
||||||
|
columns = list(NO_ST_COLUMNS.values())
|
||||||
|
items_df = pd.DataFrame.from_records(items, columns=columns)
|
||||||
|
for s in students:
|
||||||
|
items_df[s] = np.nan
|
||||||
|
|
||||||
|
items_df.to_csv(csv_output, index=False, date_format="%d/%m/%Y")
|
||||||
|
click.echo(f"Saving csv file to {csv_output}")
|
||||||
|
|
||||||
|
|
||||||
|
def new_eval(answers={}):
|
||||||
|
click.echo(f"Préparation d'un nouveau devoir")
|
||||||
|
|
||||||
|
eval_questions = [
|
||||||
|
{"type": "input", "name": "assessment", "message": "Nom de l'évaluation",},
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"name": "tribe",
|
||||||
|
"message": "Classe concernée",
|
||||||
|
"choices": get_tribes,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "input",
|
||||||
|
"name": "date",
|
||||||
|
"message": "Date du devoir (%y%m%d)",
|
||||||
|
"default": datetime.today().strftime("%y%m%d"),
|
||||||
|
"filter": lambda val: datetime.strptime(val, "%y%m%d"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"name": "term",
|
||||||
|
"message": "Trimestre",
|
||||||
|
"choices": ["1", "2", "3"],
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
eval_ans = prompt(eval_questions)
|
||||||
|
|
||||||
|
items = []
|
||||||
|
add_exo = True
|
||||||
|
while add_exo:
|
||||||
|
ex_items = new_exercice(eval_ans)
|
||||||
|
items += ex_items
|
||||||
|
add_exo = prompt(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"type": "confirm",
|
||||||
|
"name": "add_exo",
|
||||||
|
"message": "Ajouter un autre exercice",
|
||||||
|
"default": True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)["add_exo"]
|
||||||
|
return items
|
||||||
|
|
||||||
|
|
||||||
|
def new_exercice(answers={}):
|
||||||
|
exercise_questions = [
|
||||||
|
{"type": "input", "name": "exercise", "message": "Nom de l'exercice"},
|
||||||
|
]
|
||||||
|
|
||||||
|
click.echo(f"Nouvel exercice")
|
||||||
|
exercise_ans = prompt(exercise_questions, answers=answers)
|
||||||
|
|
||||||
|
items = []
|
||||||
|
|
||||||
|
add_item = True
|
||||||
|
while add_item:
|
||||||
|
try:
|
||||||
|
item_ans = new_item(exercise_ans)
|
||||||
|
except PromptAbortException:
|
||||||
|
click.echo("Création de l'item annulée")
|
||||||
|
else:
|
||||||
|
items.append(item_ans)
|
||||||
|
add_item = prompt(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"type": "confirm",
|
||||||
|
"name": "add_item",
|
||||||
|
"message": f"Ajouter un autre item pour l'exercice {exercise_ans['exercise']}",
|
||||||
|
"default": True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)["add_item"]
|
||||||
|
|
||||||
|
return items
|
||||||
|
|
||||||
|
|
||||||
|
def new_item(answers={}):
|
||||||
|
item_questions = [
|
||||||
|
{"type": "input", "name": "question", "message": "Nom de l'item",},
|
||||||
|
{"type": "input", "name": "comment", "message": "Commentaire",},
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"name": "competence",
|
||||||
|
"message": "Competence",
|
||||||
|
"choices": ["Cher", "Rep", "Mod", "Rai", "Cal", "Com"],
|
||||||
|
},
|
||||||
|
{"type": "input", "name": "theme", "message": "Domaine",},
|
||||||
|
{
|
||||||
|
"type": "confirm",
|
||||||
|
"name": "is_leveled",
|
||||||
|
"message": "Évaluation par niveau",
|
||||||
|
"default": True,
|
||||||
|
},
|
||||||
|
{"type": "input", "name": "score_rate", "message": "Bareme"},
|
||||||
|
{
|
||||||
|
"type": "confirm",
|
||||||
|
"name": "correct",
|
||||||
|
"message": "Tout est correct?",
|
||||||
|
"default": True,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
click.echo(f"Nouvelle question pour l'exercice {answers['exercise']}")
|
||||||
|
item_ans = prompt(item_questions, answers=answers)
|
||||||
|
if item_ans["correct"]:
|
||||||
|
return item_ans
|
||||||
|
raise PromptAbortException("Abort item creation")
|
@@ -1,233 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
|
|
||||||
from prompt_toolkit import prompt, HTML, ANSI
|
|
||||||
from prompt_toolkit import print_formatted_text as print
|
|
||||||
from prompt_toolkit.styles import Style
|
|
||||||
from prompt_toolkit.validation import Validator
|
|
||||||
from prompt_toolkit.completion import WordCompleter
|
|
||||||
from unidecode import unidecode
|
|
||||||
from datetime import datetime
|
|
||||||
from functools import wraps
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from .getconfig import config
|
|
||||||
|
|
||||||
|
|
||||||
VALIDATE = [
|
|
||||||
"o",
|
|
||||||
"ok",
|
|
||||||
"OK",
|
|
||||||
"oui",
|
|
||||||
"OUI",
|
|
||||||
"yes",
|
|
||||||
"YES",
|
|
||||||
]
|
|
||||||
REFUSE = ["n", "non", "NON", "no", "NO"]
|
|
||||||
CANCEL = ["a", "annuler"]
|
|
||||||
|
|
||||||
STYLE = Style.from_dict(
|
|
||||||
{
|
|
||||||
"": "#93A1A1",
|
|
||||||
"validation": "#884444",
|
|
||||||
"appending": "#448844",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CancelError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def prompt_validate(question, cancelable=False, empty_means=1, style="validation"):
|
|
||||||
"""Prompt for validation
|
|
||||||
|
|
||||||
:param question: Text to print to ask the question.
|
|
||||||
:param cancelable: enable cancel answer
|
|
||||||
:param empty_means: result for no answer
|
|
||||||
:return:
|
|
||||||
0 -> Refuse
|
|
||||||
1 -> Validate
|
|
||||||
-1 -> cancel
|
|
||||||
"""
|
|
||||||
question_ = question
|
|
||||||
choices = VALIDATE + REFUSE
|
|
||||||
|
|
||||||
if cancelable:
|
|
||||||
question_ += "(a ou annuler pour sortir)"
|
|
||||||
choices += CANCEL
|
|
||||||
|
|
||||||
ans = prompt(
|
|
||||||
[
|
|
||||||
(f"class:{style}", question_),
|
|
||||||
],
|
|
||||||
completer=WordCompleter(choices),
|
|
||||||
style=STYLE,
|
|
||||||
).lower()
|
|
||||||
|
|
||||||
if ans == "":
|
|
||||||
return empty_means
|
|
||||||
if ans in VALIDATE:
|
|
||||||
return 1
|
|
||||||
if cancelable and ans in CANCEL:
|
|
||||||
return -1
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def prompt_until_validate(question="C'est ok? ", cancelable=False):
|
|
||||||
def decorator(func):
|
|
||||||
@wraps(func)
|
|
||||||
def wrapper(*args, **kwrd):
|
|
||||||
ans = func(*args, **kwrd)
|
|
||||||
|
|
||||||
confirm = prompt_validate(question, cancelable)
|
|
||||||
|
|
||||||
if confirm == -1:
|
|
||||||
raise CancelError
|
|
||||||
|
|
||||||
while not confirm:
|
|
||||||
sys.stdout.flush()
|
|
||||||
ans = func(*args, **ans, **kwrd)
|
|
||||||
confirm = prompt_validate(question, cancelable)
|
|
||||||
if confirm == -1:
|
|
||||||
raise CancelError
|
|
||||||
return ans
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
@prompt_until_validate()
|
|
||||||
def prompt_exam(**kwrd):
|
|
||||||
""" Prompt questions to edit an exam """
|
|
||||||
print(HTML("<b>Nouvelle évaluation</b>"))
|
|
||||||
exam = {}
|
|
||||||
exam["name"] = prompt("Nom de l'évaluation: ", default=kwrd.get("name", "DS"))
|
|
||||||
|
|
||||||
tribes_name = [t["name"] for t in config["tribes"]]
|
|
||||||
|
|
||||||
exam["tribename"] = prompt(
|
|
||||||
"Nom de la classe: ",
|
|
||||||
default=kwrd.get("tribename", ""),
|
|
||||||
completer=WordCompleter(tribes_name),
|
|
||||||
validator=Validator.from_callable(lambda x: x in tribes_name),
|
|
||||||
)
|
|
||||||
exam["tribe"] = [t for t in config["tribes"] if t["name"] == exam["tribename"]][0]
|
|
||||||
|
|
||||||
exam["date"] = prompt(
|
|
||||||
"Date de l'évaluation (%y%m%d): ",
|
|
||||||
default=kwrd.get("date", datetime.today()).strftime("%y%m%d"),
|
|
||||||
validator=Validator.from_callable(lambda x: (len(x) == 6) and x.isdigit()),
|
|
||||||
)
|
|
||||||
exam["date"] = datetime.strptime(exam["date"], "%y%m%d")
|
|
||||||
|
|
||||||
exam["term"] = prompt(
|
|
||||||
"Trimestre: ",
|
|
||||||
validator=Validator.from_callable(lambda x: x.isdigit()),
|
|
||||||
default=kwrd.get("term", "1"),
|
|
||||||
)
|
|
||||||
|
|
||||||
return exam
|
|
||||||
|
|
||||||
|
|
||||||
@prompt_until_validate()
|
|
||||||
def prompt_exercise(number=1, completer={}, **kwrd):
|
|
||||||
exercise = {}
|
|
||||||
try:
|
|
||||||
kwrd["name"]
|
|
||||||
except KeyError:
|
|
||||||
print(HTML("<b>Nouvel exercice</b>"))
|
|
||||||
exercise["name"] = prompt(
|
|
||||||
"Nom de l'exercice: ", default=kwrd.get("name", f"Exercice {number}")
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
print(HTML(f"<b>Modification de l'exercice: {kwrd['name']}</b>"))
|
|
||||||
exercise["name"] = kwrd["name"]
|
|
||||||
|
|
||||||
exercise["questions"] = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
kwrd["questions"][0]
|
|
||||||
except KeyError:
|
|
||||||
last_question_id = "1a"
|
|
||||||
except IndexError:
|
|
||||||
last_question_id = "1a"
|
|
||||||
else:
|
|
||||||
for ques in kwrd["questions"]:
|
|
||||||
try:
|
|
||||||
exercise["questions"].append(
|
|
||||||
prompt_question(completer=completer, **ques)
|
|
||||||
)
|
|
||||||
except CancelError:
|
|
||||||
print("Cette question a été supprimée")
|
|
||||||
last_question_id = exercise["questions"][-1]["id"]
|
|
||||||
|
|
||||||
appending = prompt_validate(
|
|
||||||
question="Ajouter un élément de notation? ", style="appending"
|
|
||||||
)
|
|
||||||
while appending:
|
|
||||||
try:
|
|
||||||
exercise["questions"].append(
|
|
||||||
prompt_question(last_question_id, completer=completer)
|
|
||||||
)
|
|
||||||
except CancelError:
|
|
||||||
print("Cette question a été supprimée")
|
|
||||||
else:
|
|
||||||
last_question_id = exercise["questions"][-1]["id"]
|
|
||||||
appending = prompt_validate(
|
|
||||||
question="Ajouter un élément de notation? ", style="appending"
|
|
||||||
)
|
|
||||||
|
|
||||||
return exercise
|
|
||||||
|
|
||||||
|
|
||||||
@prompt_until_validate(cancelable=True)
|
|
||||||
def prompt_question(last_question_id="1a", completer={}, **kwrd):
|
|
||||||
try:
|
|
||||||
kwrd["id"]
|
|
||||||
except KeyError:
|
|
||||||
print(HTML("<b>Nouvel élément de notation</b>"))
|
|
||||||
else:
|
|
||||||
print(
|
|
||||||
HTML(f"<b>Modification de l'élément {kwrd['id']} ({kwrd['comment']})</b>")
|
|
||||||
)
|
|
||||||
|
|
||||||
question = {}
|
|
||||||
question["id"] = prompt(
|
|
||||||
"Identifiant de la question: ",
|
|
||||||
default=kwrd.get("id", "1a"),
|
|
||||||
)
|
|
||||||
|
|
||||||
question["competence"] = prompt(
|
|
||||||
"Competence: ",
|
|
||||||
default=kwrd.get("competence", list(config["competences"].keys())[0]),
|
|
||||||
completer=WordCompleter(config["competences"].keys()),
|
|
||||||
validator=Validator.from_callable(lambda x: x in config["competences"].keys()),
|
|
||||||
)
|
|
||||||
|
|
||||||
question["theme"] = prompt(
|
|
||||||
"Domaine: ",
|
|
||||||
default=kwrd.get("theme", ""),
|
|
||||||
completer=WordCompleter(completer.get("theme", [])),
|
|
||||||
)
|
|
||||||
|
|
||||||
question["comment"] = prompt(
|
|
||||||
"Commentaire: ",
|
|
||||||
default=kwrd.get("comment", ""),
|
|
||||||
)
|
|
||||||
|
|
||||||
question["is_leveled"] = prompt(
|
|
||||||
"Évaluation par niveau: ",
|
|
||||||
default=kwrd.get("is_leveled", "1"),
|
|
||||||
# validate
|
|
||||||
)
|
|
||||||
|
|
||||||
question["score_rate"] = prompt(
|
|
||||||
"Barème: ",
|
|
||||||
default=kwrd.get("score_rate", "1"),
|
|
||||||
# validate
|
|
||||||
)
|
|
||||||
|
|
||||||
return question
|
|
@@ -3,17 +3,13 @@
|
|||||||
|
|
||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import yaml
|
||||||
import sys
|
import sys
|
||||||
import papermill as pm
|
import papermill as pm
|
||||||
import pandas as pd
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import yaml
|
|
||||||
|
|
||||||
from .getconfig import config, CONFIGPATH
|
from .prepare_csv import prepare_csv
|
||||||
from .prompts import prompt_exam, prompt_exercise, prompt_validate
|
from .config import config
|
||||||
from ..config import NO_ST_COLUMNS
|
|
||||||
from .exam import Exam
|
|
||||||
from .exam_dash import app as exam_app
|
|
||||||
|
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
@@ -28,79 +24,8 @@ def print_config():
|
|||||||
click.echo(config)
|
click.echo(config)
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
def reporting(csv_file):
|
||||||
def setup():
|
# csv_file = Path(csv_file)
|
||||||
"""Setup the environnement using recoconfig.yml"""
|
|
||||||
for tribe in config["tribes"]:
|
|
||||||
Path(tribe["name"]).mkdir(exist_ok=True)
|
|
||||||
if not Path(tribe["students"]).exists():
|
|
||||||
print(f"The file {tribe['students']} does not exists")
|
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
|
||||||
def new_exam():
|
|
||||||
""" Create new exam csv file """
|
|
||||||
exam = Exam(**prompt_exam())
|
|
||||||
|
|
||||||
if exam.path(".yml").exists():
|
|
||||||
print(f"Fichier sauvegarde trouvé à {exam.path('.yml')} -- importation")
|
|
||||||
with open(exam.path(".yml"), "r") as f:
|
|
||||||
for name, questions in yaml.load(f, Loader=yaml.SafeLoader)[
|
|
||||||
"exercices"
|
|
||||||
].items():
|
|
||||||
exam.add_exercise(name, questions)
|
|
||||||
|
|
||||||
print(exam.themes)
|
|
||||||
# print(yaml.dump(exam.to_dict()))
|
|
||||||
|
|
||||||
exam.write()
|
|
||||||
|
|
||||||
for name, questions in exam.exercices.items():
|
|
||||||
exam.modify_exercise(
|
|
||||||
**prompt_exercise(
|
|
||||||
name=name, completer={"theme": exam.themes}, questions=questions
|
|
||||||
)
|
|
||||||
)
|
|
||||||
exam.write()
|
|
||||||
|
|
||||||
new_exercise = prompt_validate("Ajouter un exercice? ")
|
|
||||||
while new_exercise:
|
|
||||||
exam.add_exercise(
|
|
||||||
**prompt_exercise(len(exam.exercices) + 1, completer={"theme": exam.themes})
|
|
||||||
)
|
|
||||||
exam.write()
|
|
||||||
new_exercise = prompt_validate("Ajouter un exercice? ")
|
|
||||||
|
|
||||||
rows = exam.to_row()
|
|
||||||
|
|
||||||
base_df = pd.DataFrame.from_dict(rows)[NO_ST_COLUMNS.keys()]
|
|
||||||
base_df.rename(columns=NO_ST_COLUMNS, inplace=True)
|
|
||||||
|
|
||||||
students = pd.read_csv(exam.tribe_student_path)["Nom"]
|
|
||||||
for student in students:
|
|
||||||
base_df[student] = ""
|
|
||||||
|
|
||||||
exam.tribe_path.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
base_df.to_csv(exam.path(".csv"), index=False)
|
|
||||||
print(f"Le fichier note a été enregistré à {exam.path('.csv')}")
|
|
||||||
|
|
||||||
@cli.command()
|
|
||||||
def exam_analysis():
|
|
||||||
exam_app.run_server(debug=True)
|
|
||||||
|
|
||||||
@cli.command()
|
|
||||||
@click.argument("csv_file")
|
|
||||||
def report(csv_file):
|
|
||||||
csv = Path(csv_file)
|
|
||||||
if not csv.exists():
|
|
||||||
click.echo(f"{csv_file} does not exists")
|
|
||||||
sys.exit(1)
|
|
||||||
if csv.suffix != ".csv":
|
|
||||||
click.echo(f"{csv_file} has to be a csv file")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
csv_file = Path(csv_file)
|
|
||||||
tribe_dir = csv_file.parent
|
tribe_dir = csv_file.parent
|
||||||
csv_filename = csv_file.name.split(".")[0]
|
csv_filename = csv_file.name.split(".")[0]
|
||||||
|
|
||||||
@@ -109,7 +34,7 @@ def report(csv_file):
|
|||||||
try:
|
try:
|
||||||
date = datetime.strptime(date, "%y%m%d")
|
date = datetime.strptime(date, "%y%m%d")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
date = None
|
date = datetime.today().strptime(date, "%y%m%d")
|
||||||
|
|
||||||
tribe = str(tribe_dir).split("/")[-1]
|
tribe = str(tribe_dir).split("/")[-1]
|
||||||
|
|
||||||
@@ -129,3 +54,49 @@ def report(csv_file):
|
|||||||
csv_file=str(csv_file.absolute()),
|
csv_file=str(csv_file.absolute()),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.argument("target", required=False)
|
||||||
|
def report(target=""):
|
||||||
|
""" Make a report for the eval
|
||||||
|
|
||||||
|
:param target: csv file or a directory where csvs are
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if target.endswith(".csv"):
|
||||||
|
csv = Path(target)
|
||||||
|
if not csv.exists():
|
||||||
|
click.echo(f"{target} does not exists")
|
||||||
|
sys.exit(1)
|
||||||
|
if csv.suffix != ".csv":
|
||||||
|
click.echo(f"{target} has to be a csv file")
|
||||||
|
sys.exit(1)
|
||||||
|
csvs = [csv]
|
||||||
|
else:
|
||||||
|
csvs = list(Path(target).glob("**/*.csv"))
|
||||||
|
except AttributeError:
|
||||||
|
csvs = list(Path(config["source"]).glob("**/*.csv"))
|
||||||
|
|
||||||
|
for csv in csvs:
|
||||||
|
click.echo(f"Processing {csv}")
|
||||||
|
try:
|
||||||
|
reporting(csv)
|
||||||
|
except pm.exceptions.PapermillExecutionError as e:
|
||||||
|
click.echo(f"Error with {csv}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
def prepare():
|
||||||
|
""" Prepare csv file """
|
||||||
|
|
||||||
|
items = prepare_csv()
|
||||||
|
|
||||||
|
click.echo(items)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.argument("tribe")
|
||||||
|
def random_pick(tribe):
|
||||||
|
""" Randomly pick a student """
|
||||||
|
pass
|
||||||
|
@@ -1,4 +1,76 @@
|
|||||||
pandas
|
ansiwrap==0.8.4
|
||||||
click
|
appdirs==1.4.3
|
||||||
papermill
|
attrs==19.1.0
|
||||||
prompt_toolkit
|
backcall==0.1.0
|
||||||
|
black==19.10b0
|
||||||
|
bleach==3.1.0
|
||||||
|
certifi==2019.6.16
|
||||||
|
chardet==3.0.4
|
||||||
|
Click==7.0
|
||||||
|
colorama==0.4.1
|
||||||
|
cycler==0.10.0
|
||||||
|
decorator==4.4.0
|
||||||
|
defusedxml==0.6.0
|
||||||
|
entrypoints==0.3
|
||||||
|
future==0.17.1
|
||||||
|
idna==2.8
|
||||||
|
importlib-resources==1.0.2
|
||||||
|
ipykernel==5.1.3
|
||||||
|
ipython==7.11.1
|
||||||
|
ipython-genutils==0.2.0
|
||||||
|
ipywidgets==7.5.1
|
||||||
|
jedi==0.15.2
|
||||||
|
Jinja2==2.10.3
|
||||||
|
jsonschema==3.2.0
|
||||||
|
jupyter==1.0.0
|
||||||
|
jupyter-client==5.3.4
|
||||||
|
jupyter-console==6.1.0
|
||||||
|
jupyter-core==4.6.1
|
||||||
|
jupytex==0.0.3
|
||||||
|
kiwisolver==1.1.0
|
||||||
|
Markdown==3.1.1
|
||||||
|
MarkupSafe==1.1.1
|
||||||
|
matplotlib==3.1.2
|
||||||
|
mistune==0.8.4
|
||||||
|
nbconvert==5.6.1
|
||||||
|
nbformat==5.0.3
|
||||||
|
notebook==6.0.3
|
||||||
|
numpy==1.18.1
|
||||||
|
pandas==0.25.3
|
||||||
|
pandocfilters==1.4.2
|
||||||
|
papermill==1.2.1
|
||||||
|
parso==0.5.2
|
||||||
|
pathspec==0.7.0
|
||||||
|
pexpect==4.8.0
|
||||||
|
pickleshare==0.7.5
|
||||||
|
prometheus-client==0.7.1
|
||||||
|
prompt-toolkit==1.0.14
|
||||||
|
ptyprocess==0.6.0
|
||||||
|
Pygments==2.5.2
|
||||||
|
PyInquirer==1.0.3
|
||||||
|
pyparsing==2.4.6
|
||||||
|
pyrsistent==0.15.7
|
||||||
|
python-dateutil==2.8.0
|
||||||
|
pytz==2019.3
|
||||||
|
PyYAML==5.3
|
||||||
|
pyzmq==18.1.1
|
||||||
|
qtconsole==4.6.0
|
||||||
|
-e git+git_opytex:/lafrite/recopytex.git@7e026bedb24c1ca8bef3b71b3d63f8b0d6916e81#egg=Recopytex
|
||||||
|
regex==2020.1.8
|
||||||
|
requests==2.22.0
|
||||||
|
scipy==1.4.1
|
||||||
|
Send2Trash==1.5.0
|
||||||
|
six==1.12.0
|
||||||
|
tenacity==6.0.0
|
||||||
|
terminado==0.8.3
|
||||||
|
testpath==0.4.4
|
||||||
|
textwrap3==0.9.2
|
||||||
|
toml==0.10.0
|
||||||
|
tornado==6.0.3
|
||||||
|
tqdm==4.41.1
|
||||||
|
traitlets==4.3.2
|
||||||
|
typed-ast==1.4.1
|
||||||
|
urllib3==1.25.8
|
||||||
|
wcwidth==0.1.8
|
||||||
|
webencodings==0.5.1
|
||||||
|
widgetsnbextension==3.5.1
|
||||||
|
@@ -1,69 +0,0 @@
|
|||||||
ansiwrap
|
|
||||||
attrs
|
|
||||||
backcall
|
|
||||||
bleach
|
|
||||||
certifi
|
|
||||||
chardet
|
|
||||||
Click
|
|
||||||
colorama
|
|
||||||
cycler
|
|
||||||
decorator
|
|
||||||
defusedxml
|
|
||||||
entrypoints
|
|
||||||
future
|
|
||||||
idna
|
|
||||||
importlib-resources
|
|
||||||
ipykernel
|
|
||||||
ipython
|
|
||||||
ipython-genutils
|
|
||||||
ipywidgets
|
|
||||||
jedi
|
|
||||||
Jinja2
|
|
||||||
jsonschema
|
|
||||||
jupyter
|
|
||||||
jupyter-client
|
|
||||||
jupyter-console
|
|
||||||
jupyter-core
|
|
||||||
jupytex
|
|
||||||
kiwisolver
|
|
||||||
MarkupSafe
|
|
||||||
matplotlib
|
|
||||||
mistune
|
|
||||||
nbconvert
|
|
||||||
nbformat
|
|
||||||
notebook
|
|
||||||
numpy
|
|
||||||
pandas
|
|
||||||
pandocfilters
|
|
||||||
papermill
|
|
||||||
parso
|
|
||||||
pexpect
|
|
||||||
pickleshare
|
|
||||||
prometheus-client
|
|
||||||
prompt-toolkit
|
|
||||||
ptyprocess
|
|
||||||
Pygments
|
|
||||||
pyparsing
|
|
||||||
pyrsistent
|
|
||||||
python-dateutil
|
|
||||||
pytz
|
|
||||||
PyYAML
|
|
||||||
pyzmq
|
|
||||||
qtconsole
|
|
||||||
-e git+git_opytex:/lafrite/recopytex.git@e9a8310f151ead60434ae944d726a2fd22b23d06#egg=Recopytex
|
|
||||||
requests
|
|
||||||
scipy
|
|
||||||
seaborn
|
|
||||||
Send2Trash
|
|
||||||
six
|
|
||||||
tenacity
|
|
||||||
terminado
|
|
||||||
testpath
|
|
||||||
textwrap3
|
|
||||||
tornado
|
|
||||||
tqdm
|
|
||||||
traitlets
|
|
||||||
urllib3
|
|
||||||
wcwidth
|
|
||||||
webencodings
|
|
||||||
widgetsnbextension
|
|
7
setup.py
7
setup.py
@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='Recopytex',
|
name='Recopytex',
|
||||||
version='0.1',
|
version='1.1.1',
|
||||||
description='Assessment analysis',
|
description='Assessment analysis',
|
||||||
author='Benjamin Bertrand',
|
author='Benjamin Bertrand',
|
||||||
author_email='',
|
author_email='',
|
||||||
@@ -13,6 +13,11 @@ setup(
|
|||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
install_requires=[
|
install_requires=[
|
||||||
'Click',
|
'Click',
|
||||||
|
'pandas',
|
||||||
|
'numpy',
|
||||||
|
'papermill',
|
||||||
|
'pyyaml',
|
||||||
|
'PyInquirer',
|
||||||
],
|
],
|
||||||
entry_points='''
|
entry_points='''
|
||||||
[console_scripts]
|
[console_scripts]
|
||||||
|
Reference in New Issue
Block a user