Compare commits

50 Commits

Author SHA1 Message Date
98d9fd4026 Fix: save csv with nice column order 2021-11-27 17:17:18 +01:00
28fc41315f Feat: remove prompt commands 2021-11-27 17:16:12 +01:00
4b30f39354 Feat: remove pyInquier depedencie 2021-11-26 22:02:19 +01:00
58647a734c Fix: column order in score table 2021-11-22 16:33:59 +01:00
29f67cfa0c Fix: can't save in create exam 2021-05-11 09:02:09 +02:00
1ffdd8676b Merge ssh://git_opytex:/lafrite/recopytex into dev 2021-05-10 11:27:24 +02:00
8f2ae96338 Feat: add handling ppre 2021-04-18 09:59:12 +02:00
a0e94f52b1 Feat: formating questions 2021-04-05 08:31:05 +02:00
c84f9845b2 Feat: visualisation des Competences et des themes dans students 2021-02-27 10:31:52 +01:00
d9e95f2186 Feat: return empty fig 2021-02-27 10:03:24 +01:00
581b0f4f2f Feat: table des évaluations 2021-02-23 17:55:43 +01:00
3dbfc85447 Feat: filter dans store scores 2021-02-23 17:40:18 +01:00
b5bf1ac137 Feat: add students to paths 2021-02-23 17:07:05 +01:00
74d751a586 Feat: update student list 2021-02-23 17:06:55 +01:00
1855d4016d Feat: start student_analysis 2021-02-23 16:53:59 +01:00
ff94470fb4 Feat: Start feedback on eval 2021-02-23 16:14:05 +01:00
d322452a6e Feat: rename exam-analysis to dashboard 2021-02-23 16:10:16 +01:00
e1d3940e9d Feat: add total score_rate 2021-02-08 15:45:50 +01:00
7dba11996a Feat: formating and split in sections 2021-02-08 15:19:09 +01:00
3250a600c9 Feat: start the layout for create_exam 2021-01-27 16:17:44 +01:00
589d63ff29 Feat: not showing all columns in bigtable and fixe first columns 2021-01-27 16:16:54 +01:00
429fed6a1e Feat: default values for elements 2021-01-24 06:53:06 +01:00
1255bf4b9e Fix: remove useless print 2021-01-23 06:54:19 +01:00
1fe7665753 Merge branch 'dev' of git_opytex:/lafrite/recopytex into dev 2021-01-22 11:14:34 +01:00
e08e4a32a8 Feat: exam creation page 2021-01-22 11:13:35 +01:00
b737612adb Feat: Start display summary 2021-01-22 05:39:14 +01:00
9c19e2ac56 Feat: New page with input fields 2021-01-21 22:17:49 +01:00
eb60734c26 Fix: remove useless import 2021-01-21 22:17:33 +01:00
329bcc460c Fix: calculer -> chercher 2021-01-21 22:17:02 +01:00
95fc842c1d Feat: 2nd page to create exam 2021-01-21 15:12:24 +01:00
e0ca1a458b Fix: column id to see student and score_rate 2021-01-21 14:11:39 +01:00
eb1abbe868 Fix: get back exam graphs 2021-01-21 14:01:57 +01:00
412e624791 Merge remote-tracking branch 'origin/dev' into dev 2021-01-21 09:57:33 +01:00
e8bf0b3f0a Fix: name and bareme in final_score_table and describe rounding 2021-01-21 09:52:49 +01:00
c057fa11e7 Feat: stop rounding score at 0.5 2021-01-21 09:52:49 +01:00
e15119605f Merge branch 'dev' of git_opytex:/lafrite/recopytex into dev 2021-01-21 09:38:58 +01:00
494567cdb5 Merge branch 'dev' of git_opytex:/lafrite/recopytex into dev 2021-01-21 09:25:58 +01:00
84fcee625d Feat: split dashboard 2021-01-20 20:54:59 +01:00
f62c898162 Fix: remove unecessary import 2021-01-20 20:51:22 +01:00
a14d47b15c Feat: Clean empty fig 2021-01-15 17:49:30 +01:00
7058c79975 Feat: add PyInquirer to setup 2020-01-22 22:46:05 +01:00
d488807c57 Fix: Remake requirements 2020-01-22 22:44:51 +01:00
7e026bedb2 Feat: Save to csv works 2020-01-22 22:44:17 +01:00
33117cde71 Feat: Questionnary is ok 2020-01-22 21:38:52 +01:00
7d2cde304d Feat: Prepare prompt to creat new csv score file 2020-01-22 19:49:08 +01:00
409b80994a Feat: no half_point rounding in converting level to score 2020-01-01 15:29:30 +01:00
6fb11cb054 Feat: report a csv file, a directory or all 2019-09-24 15:41:57 +02:00
7a0bb4179d Feat: clean recopytex.py 2019-09-24 15:36:24 +02:00
fe3280b91d Feat: Process all csv if nothing is specify 2019-09-17 19:29:43 +02:00
3e85c3829d Feat: add dependencies 2019-09-17 19:03:21 +02:00
18 changed files with 1103 additions and 96 deletions

View File

@@ -4,9 +4,9 @@ output: ./
templates: templates/ templates: templates/
competences: competences:
Calculer: Chercher:
name: Calculer name: Chercher
abrv: Cal abrv: Cher
Représenter: Représenter:
name: Représenter name: Représenter
abrv: Rep abrv: Rep

View File

@@ -2,16 +2,16 @@
# encoding: utf-8 # encoding: utf-8
NO_ST_COLUMNS = { NO_ST_COLUMNS = {
"term": "Trimestre",
"assessment": "Nom", "assessment": "Nom",
"term": "Trimestre",
"date": "Date", "date": "Date",
"exercise": "Exercice", "exercise": "Exercice",
"question": "Question", "question": "Question",
"competence": "Competence", "competence": "Competence",
"theme": "Domaine", "theme": "Domaine",
"comment": "Commentaire", "comment": "Commentaire",
"score_rate": "Bareme",
"is_leveled": "Est_nivele", "is_leveled": "Est_nivele",
"score_rate": "Bareme",
} }
COLUMNS = { COLUMNS = {

View File

@@ -0,0 +1,5 @@
import dash
app = dash.Dash(__name__, suppress_callback_exceptions=True)
# app = dash.Dash(__name__)
server = app.server

View File

@@ -19,6 +19,14 @@ main {
margin: auto; margin: auto;
} }
section {
margin-top: 20px;
margin-bottom: 20px;
}
/* Exam analysis */
#select { #select {
margin-bottom: 20px; margin-bottom: 20px;
} }
@@ -39,3 +47,20 @@ main {
width: 45vw; width: 45vw;
margin: auto; margin: auto;
} }
/* Create new exam */
#new-exam {
display: flex;
flex-flow: row;
justify-content: space-between;
}
#new-exam label {
width: 20%;
display: flex;
flex-flow: column;
justify-content: space-between;
}

View File

@@ -0,0 +1,355 @@
#!/usr/bin/env python
# encoding: utf-8
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_table
import plotly.graph_objects as go
from datetime import date, datetime
import uuid
import pandas as pd
import yaml
from ...scripts.getconfig import config
from ...config import NO_ST_COLUMNS
from ..app import app
from ...scripts.exam import Exam
QUESTION_COLUMNS = [
{"id": "id", "name": "Question"},
{
"id": "competence",
"name": "Competence",
"presentation": "dropdown",
},
{"id": "theme", "name": "Domaine"},
{"id": "comment", "name": "Commentaire"},
{"id": "score_rate", "name": "Bareme"},
{"id": "is_leveled", "name": "Est_nivele"},
]
def get_current_year_limit():
today = date.today()
if today.month > 8:
return {
"min_date_allowed": date(today.year, 9, 1),
"max_date_allowed": date(today.year + 1, 7, 15),
"initial_visible_month": today,
}
return {
"min_date_allowed": date(today.year - 1, 9, 1),
"max_date_allowed": date(today.year, 7, 15),
"initial_visible_month": today,
}
layout = html.Div(
[
html.Header(
children=[
html.H1("Création d'une évaluation"),
html.P("Pas encore de sauvegarde", id="is-saved"),
html.Button("Enregistrer dans csv", id="save-csv"),
],
),
html.Main(
children=[
html.Section(
children=[
html.Form(
id="new-exam",
children=[
html.Label(
children=[
"Classe",
dcc.Dropdown(
id="tribe",
options=[
{"label": t["name"], "value": t["name"]}
for t in config["tribes"]
],
value=config["tribes"][0]["name"],
),
]
),
html.Label(
children=[
"Nom de l'évaluation",
dcc.Input(
id="exam_name",
type="text",
placeholder="Nom de l'évaluation",
),
]
),
html.Label(
children=[
"Date",
dcc.DatePickerSingle(
id="date",
date=date.today(),
**get_current_year_limit(),
),
]
),
html.Label(
children=[
"Trimestre",
dcc.Dropdown(
id="term",
options=[
{"label": i + 1, "value": i + 1}
for i in range(3)
],
value=1,
),
]
),
],
),
],
id="form",
),
html.Section(
children=[
html.Div(
id="exercises",
children=[],
),
html.Button(
"Ajouter un exercice",
id="add-exercise",
className="add-exercise",
),
html.Div(
id="summary",
),
],
id="exercises",
),
html.Section(
children=[
html.Div(
id="score_rate",
),
html.Div(
id="exercises-viz",
),
html.Div(
id="competences-viz",
),
html.Div(
id="themes-viz",
),
],
id="visualisation",
),
]
),
dcc.Store(id="exam_store"),
]
)
@app.callback(
dash.dependencies.Output("exercises", "children"),
dash.dependencies.Input("add-exercise", "n_clicks"),
dash.dependencies.State("exercises", "children"),
)
def add_exercise(n_clicks, children):
if n_clicks is None:
return children
element_table = pd.DataFrame(columns=[c["id"] for c in QUESTION_COLUMNS])
element_table = element_table.append(
pd.Series(
data={
"id": 1,
"competence": "Rechercher",
"theme": "",
"comment": "",
"score_rate": 1,
"is_leveled": 1,
},
name=0,
)
)
new_exercise = html.Div(
children=[
html.Div(
children=[
dcc.Input(
id={"type": "exercice", "index": str(n_clicks)},
type="text",
value=f"Exercice {len(children)+1}",
placeholder="Nom de l'exercice",
className="exercise-name",
),
html.Button(
"X",
id={"type": "rm_exercice", "index": str(n_clicks)},
className="delete-exercise",
),
],
className="exercise-head",
),
dash_table.DataTable(
id={"type": "elements", "index": str(n_clicks)},
columns=QUESTION_COLUMNS,
data=element_table.to_dict("records"),
editable=True,
row_deletable=True,
dropdown={
"competence": {
"options": [
{"label": i, "value": i} for i in config["competences"]
]
},
},
style_cell={
"whiteSpace": "normal",
"height": "auto",
},
),
html.Button(
"Ajouter un élément de notation",
id={"type": "add-element", "index": str(n_clicks)},
className="add-element",
),
],
className="exercise",
id=f"exercise-{n_clicks}",
)
children.append(new_exercise)
return children
@app.callback(
dash.dependencies.Output(
{"type": "elements", "index": dash.dependencies.MATCH}, "data"
),
dash.dependencies.Input(
{"type": "add-element", "index": dash.dependencies.MATCH}, "n_clicks"
),
[
dash.dependencies.State(
{"type": "elements", "index": dash.dependencies.MATCH}, "data"
),
],
prevent_initial_call=True,
)
def add_element(n_clicks, elements):
if n_clicks is None or n_clicks < len(elements):
return elements
df = pd.DataFrame.from_records(elements)
df = df.append(
pd.Series(
data={
"id": len(df) + 1,
"competence": "",
"theme": "",
"comment": "",
"score_rate": 1,
"is_leveled": 1,
},
name=n_clicks,
)
)
return df.to_dict("records")
def exam_generalities(tribe, exam_name, date, term, exercices=[], elements=[]):
return [
html.H1(f"{exam_name} pour les {tribe}"),
html.P(f"Fait le {date} (Trimestre {term})"),
]
def exercise_summary(identifier, name, elements=[]):
df = pd.DataFrame.from_records(elements)
return html.Div(
[
html.H2(name),
dash_table.DataTable(
columns=[{"id": c, "name": c} for c in df], data=elements
),
]
)
@app.callback(
dash.dependencies.Output("exam_store", "data"),
[
dash.dependencies.Input("tribe", "value"),
dash.dependencies.Input("exam_name", "value"),
dash.dependencies.Input("date", "date"),
dash.dependencies.Input("term", "value"),
dash.dependencies.Input(
{"type": "exercice", "index": dash.dependencies.ALL}, "value"
),
dash.dependencies.Input(
{"type": "elements", "index": dash.dependencies.ALL}, "data"
),
],
dash.dependencies.State({"type": "elements", "index": dash.dependencies.ALL}, "id"),
)
def store_exam(tribe, exam_name, date, term, exercices, elements, elements_id):
exam = Exam(exam_name, tribe, date, term)
for (i, name) in enumerate(exercices):
ex_elements_id = [el for el in elements_id if el["index"] == str(i + 1)][0]
index = elements_id.index(ex_elements_id)
ex_elements = elements[index]
exam.add_exercise(name, ex_elements)
return exam.to_dict()
@app.callback(
dash.dependencies.Output("score_rate", "children"),
dash.dependencies.Input("exam_store", "data"),
prevent_initial_call=True,
)
def score_rate(data):
exam = Exam(**data)
return [html.P(f"Barème /{exam.score_rate}")]
@app.callback(
dash.dependencies.Output("competences-viz", "figure"),
dash.dependencies.Input("exam_store", "data"),
prevent_initial_call=True,
)
def competences_viz(data):
exam = Exam(**data)
return [html.P(str(exam.competences_rate))]
@app.callback(
dash.dependencies.Output("themes-viz", "children"),
dash.dependencies.Input("exam_store", "data"),
prevent_initial_call=True,
)
def themes_viz(data):
exam = Exam(**data)
themes_rate = exam.themes_rate
fig = go.Figure()
if themes_rate:
fig.add_trace(go.Pie(labels=list(themes_rate.keys()), values=list(themes_rate.values())))
return [dcc.Graph(figure=fig)]
return []
@app.callback(
dash.dependencies.Output("is-saved", "children"),
dash.dependencies.Input("save-csv", "n_clicks"),
dash.dependencies.State("exam_store", "data"),
prevent_initial_call=True,
)
def save_to_csv(n_clicks, data):
exam = Exam(**data)
csv = exam.path(".csv")
exam.write_csv()
return [f"Dernière sauvegarde {datetime.today()} dans {csv}"]

View File

@@ -11,12 +11,12 @@ from pathlib import Path
from datetime import datetime from datetime import datetime
import pandas as pd import pandas as pd
import numpy as np import numpy as np
import dash_bootstrap_components as dbc
from .. import flat_df_students, pp_q_scores from ... import flat_df_students, pp_q_scores
from ..config import NO_ST_COLUMNS from ...config import NO_ST_COLUMNS
from ..scripts.getconfig import config, CONFIGPATH from ...scripts.getconfig import config
from ..app import app
COLORS = { COLORS = {
".": "black", ".": "black",
@@ -26,11 +26,7 @@ COLORS = {
3: "#68D42F", 3: "#68D42F",
} }
# external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"] layout = html.Div(
# app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app = dash.Dash(__name__)
app.layout = html.Div(
children=[ children=[
html.Header( html.Header(
children=[ children=[
@@ -82,9 +78,9 @@ app.layout = html.Div(
dash_table.DataTable( dash_table.DataTable(
id="final_score_table", id="final_score_table",
columns=[ columns=[
{"id": "Élève", "name": "Élève"}, {"id": "Eleve", "name": "Élève"},
{"id": "Note", "name": "Note"}, {"id": "Note", "name": "Note"},
{"id": "Barème", "name": "Barème"}, {"id": "Bareme", "name": "Barème"},
], ],
data=[], data=[],
style_data_conditional=[ style_data_conditional=[
@@ -131,12 +127,22 @@ app.layout = html.Div(
dash_table.DataTable( dash_table.DataTable(
id="scores_table", id="scores_table",
columns=[ columns=[
{"id": c, "name": c} for c in NO_ST_COLUMNS.values() {"id": "id", "name": "Question"},
{
"id": "competence",
"name": "Competence",
},
{"id": "theme", "name": "Domaine"},
{"id": "comment", "name": "Commentaire"},
{"id": "score_rate", "name": "Bareme"},
{"id": "is_leveled", "name": "Est_nivele"},
], ],
style_cell={ style_cell={
"whiteSpace": "normal", "whiteSpace": "normal",
"height": "auto", "height": "auto",
}, },
fixed_columns={"headers": True, "data": 7},
style_table={"minWidth": "100%"},
style_data_conditional=[], style_data_conditional=[],
editable=True, editable=True,
), ),
@@ -186,7 +192,7 @@ def update_final_scores(data):
scores = pd.DataFrame.from_records(data) scores = pd.DataFrame.from_records(data)
try: try:
if scores.iloc[0]["Commentaire"] == "commentaire": if scores.iloc[0]["Commentaire"] == "commentaire" or scores.iloc[0].str.contains("PPRE").any():
scores.drop([0], inplace=True) scores.drop([0], inplace=True)
except KeyError: except KeyError:
pass pass
@@ -220,7 +226,7 @@ def update_final_scores_descr(data):
scores = pd.DataFrame.from_records(data) scores = pd.DataFrame.from_records(data)
if scores.empty: if scores.empty:
return [[{}]] return [[{}]]
desc = scores["Note"].describe().T desc = scores["Note"].describe().T.round(2)
return [[desc.to_dict()]] return [[desc.to_dict()]]
@@ -234,7 +240,7 @@ def update_final_scores_hist(data):
assessment_scores = pd.DataFrame.from_records(data) assessment_scores = pd.DataFrame.from_records(data)
if assessment_scores.empty: if assessment_scores.empty:
return [{}] return [go.Figure(data=[go.Scatter(x=[], y=[])])]
ranges = np.linspace( ranges = np.linspace(
-0.5, -0.5,
@@ -274,14 +280,14 @@ def update_final_scores_hist(data):
def update_competence_fig(data): def update_competence_fig(data):
scores = pd.DataFrame.from_records(data) scores = pd.DataFrame.from_records(data)
try: try:
if scores.iloc[0]["Commentaire"] == "commentaire": if scores.iloc[0]["Commentaire"] == "commentaire" or scores.iloc[0].str.contains("PPRE").any():
scores.drop([0], inplace=True) scores.drop([0], inplace=True)
except KeyError: except KeyError:
pass pass
scores = flat_df_students(scores).dropna(subset=["Score"]) scores = flat_df_students(scores).dropna(subset=["Score"])
if scores.empty: if scores.empty:
return [{}] return [go.Figure(data=[go.Scatter(x=[], y=[])])]
scores = pp_q_scores(scores) scores = pp_q_scores(scores)
pt = pd.pivot_table( pt = pd.pivot_table(
@@ -305,7 +311,7 @@ def update_competence_fig(data):
fig = go.Figure() fig = go.Figure()
bars = [ bars = [
{"score": -1, "name": "Pas de réponse", "color": COLORS["."]}, {"score": -1, "name": "Pas de réponse", "color": COLORS["."]},
{"score": 0, "name": "Faut", "color": COLORS[0]}, {"score": 0, "name": "Faux", "color": COLORS[0]},
{"score": 1, "name": "Peu juste", "color": COLORS[1]}, {"score": 1, "name": "Peu juste", "color": COLORS[1]},
{"score": 2, "name": "Presque juste", "color": COLORS[2]}, {"score": 2, "name": "Presque juste", "color": COLORS[2]},
{"score": 3, "name": "Juste", "color": COLORS[3]}, {"score": 3, "name": "Juste", "color": COLORS[3]},
@@ -337,6 +343,7 @@ def update_competence_fig(data):
def save_scores(data, csv): def save_scores(data, csv):
try: try:
scores = pd.DataFrame.from_records(data) scores = pd.DataFrame.from_records(data)
scores = scores_table_column_order(scores)
scores.to_csv(csv, index=False) scores.to_csv(csv, index=False)
except: except:
return [f"Soucis pour sauvegarder à {datetime.today()} dans {csv}"] return [f"Soucis pour sauvegarder à {datetime.today()} dans {csv}"]
@@ -359,6 +366,11 @@ def highlight_value(df):
] ]
return hight return hight
def scores_table_column_order(df):
df_student_columns = [c for c in df.columns if c not in NO_ST_COLUMNS.values()]
order = list(NO_ST_COLUMNS.values())+df_student_columns
return df.loc[:, order]
@app.callback( @app.callback(
[ [
@@ -382,8 +394,13 @@ def update_scores_table(csv, add_element, data):
[{k: stack.iloc[-1][k] for k in NO_ST_COLUMNS.values()}] [{k: stack.iloc[-1][k] for k in NO_ST_COLUMNS.values()}]
) )
stack = stack.append(infos) stack = stack.append(infos)
stack = scores_table_column_order(stack)
return ( return (
[{"id": c, "name": c} for c in stack.columns], [
{"id": c, "name": c}
for c in stack.columns
if c not in ["Trimestre", "Nom", "Date"]
],
stack.to_dict("records"), stack.to_dict("records"),
highlight_value(stack), highlight_value(stack),
) )

View File

@@ -0,0 +1,29 @@
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from .app import app
from .exam_analysis import app as exam_analysis
from .create_exam import app as create_exam
from .student_analysis import app as student_analysis
app.layout = html.Div(
[dcc.Location(id="url", refresh=False), html.Div(id="page-content")]
)
@app.callback(Output("page-content", "children"), Input("url", "pathname"))
def display_page(pathname):
if pathname == "/":
return exam_analysis.layout
elif pathname == "/create-exam":
return create_exam.layout
elif pathname == "/students":
return student_analysis.layout
else:
return "404"
if __name__ == "__main__":
app.run_server(debug=True)

View File

@@ -0,0 +1,300 @@
#!/usr/bin/env python
# encoding: utf-8
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_table
import plotly.graph_objects as go
from datetime import date, datetime
import uuid
import pandas as pd
import yaml
from pathlib import Path
from ...scripts.getconfig import config
from ... import flat_df_students, pp_q_scores
from ...config import NO_ST_COLUMNS
from ..app import app
from ...scripts.exam import Exam
def get_students(csv):
return list(pd.read_csv(csv).T.to_dict().values())
COLORS = {
".": "black",
0: "#E7472B",
1: "#FF712B",
2: "#F2EC4C",
3: "#68D42F",
}
QUESTION_COLUMNS = [
{"id": "id", "name": "Question"},
{
"id": "competence",
"name": "Competence",
"presentation": "dropdown",
},
{"id": "theme", "name": "Domaine"},
{"id": "comment", "name": "Commentaire"},
{"id": "score_rate", "name": "Bareme"},
{"id": "is_leveled", "name": "Est_nivele"},
]
layout = html.Div(
[
html.Header(
children=[
html.H1("Bilan des élèves"),
],
),
html.Main(
children=[
html.Section(
children=[
html.Form(
id="select-student",
children=[
html.Label(
children=[
"Classe",
dcc.Dropdown(
id="tribe",
options=[
{"label": t["name"], "value": t["name"]}
for t in config["tribes"]
],
value=config["tribes"][0]["name"],
),
]
),
html.Label(
children=[
"Élève",
dcc.Dropdown(
id="student",
options=[
{"label": t["Nom"], "value": t["Nom"]}
for t in get_students(config["tribes"][0]["students"])
],
value=get_students(config["tribes"][0]["students"])[0]["Nom"],
),
]
),
html.Label(
children=[
"Trimestre",
dcc.Dropdown(
id="term",
options=[
{"label": i + 1, "value": i + 1}
for i in range(3)
],
value=1,
),
]
),
],
),
],
id="form",
),
html.Section(
children=[
html.H2("Évaluations"),
html.Div(
dash_table.DataTable(
id="exam_scores",
columns=[
{"id": "Nom", "name": "Évaluations"},
{"id": "Note", "name": "Note"},
{"id": "Bareme", "name": "Barème"},
],
data=[],
style_data_conditional=[
{
"if": {"row_index": "odd"},
"backgroundColor": "rgb(248, 248, 248)",
}
],
style_data={
"width": "100px",
"maxWidth": "100px",
"minWidth": "100px",
},
),
id="eval-table",
),
],
id="Évaluations",
),
html.Section(
children=[
html.Div(
id="competences-viz",
),
html.Div(
id="themes-vizz",
),
],
id="visualisation",
),
]
),
dcc.Store(id="student-scores"),
]
)
@app.callback(
[
dash.dependencies.Output("student", "options"),
dash.dependencies.Output("student", "value"),
],
[
dash.dependencies.Input("tribe", "value")
],)
def update_students_list(tribe):
tribe_config = [t for t in config["tribes"] if t["name"] == tribe][0]
students = get_students(tribe_config["students"])
options = [
{"label": t["Nom"], "value": t["Nom"]}
for t in students
]
value = students[0]["Nom"]
return options, value
@app.callback(
[
dash.dependencies.Output("student-scores", "data"),
],
[
dash.dependencies.Input("tribe", "value"),
dash.dependencies.Input("student", "value"),
dash.dependencies.Input("term", "value"),
],
)
def update_student_scores(tribe, student, term):
tribe_config = [t for t in config["tribes"] if t["name"] == tribe][0]
p = Path(tribe_config["name"])
csvs = list(p.glob("*.csv"))
dfs = []
for csv in csvs:
try:
scores = pd.read_csv(csv)
except pd.errors.ParserError:
pass
else:
if scores.iloc[0]["Commentaire"] == "commentaire" or scores.iloc[0].str.contains("PPRE").any():
scores.drop([0], inplace=True)
scores = flat_df_students(scores).dropna(subset=["Score"])
scores = scores[scores["Eleve"] == student]
scores = scores[scores["Trimestre"] == term]
dfs.append(scores)
df = pd.concat(dfs)
return [df.to_dict("records")]
@app.callback(
[
dash.dependencies.Output("exam_scores", "data"),
],
[
dash.dependencies.Input("student-scores", "data"),
],
)
def update_exam_scores(data):
scores = pd.DataFrame.from_records(data)
scores = pp_q_scores(scores)
assessment_scores = scores.groupby(["Nom"]).agg({"Note": "sum", "Bareme": "sum"})
return [assessment_scores.reset_index().to_dict("records")]
@app.callback(
[
dash.dependencies.Output("competences-viz", "children"),
],
[
dash.dependencies.Input("student-scores", "data"),
],
)
def update_competences_viz(data):
scores = pd.DataFrame.from_records(data)
scores = pp_q_scores(scores)
pt = pd.pivot_table(
scores,
index=["Competence"],
columns="Score",
aggfunc="size",
fill_value=0,
)
fig = go.Figure()
bars = [
{"score": -1, "name": "Pas de réponse", "color": COLORS["."]},
{"score": 0, "name": "Faux", "color": COLORS[0]},
{"score": 1, "name": "Peu juste", "color": COLORS[1]},
{"score": 2, "name": "Presque juste", "color": COLORS[2]},
{"score": 3, "name": "Juste", "color": COLORS[3]},
]
for b in bars:
try:
fig.add_bar(
x=list(config["competences"].keys()), y=pt[b["score"]], name=b["name"], marker_color=b["color"]
)
except KeyError:
pass
fig.update_layout(barmode="relative")
fig.update_layout(
height=500,
margin=dict(l=5, r=5, b=5, t=5),
)
return [dcc.Graph(figure=fig)]
@app.callback(
[
dash.dependencies.Output("themes-vizz", "children"),
],
[
dash.dependencies.Input("student-scores", "data"),
],
)
def update_themes_viz(data):
scores = pd.DataFrame.from_records(data)
scores = pp_q_scores(scores)
pt = pd.pivot_table(
scores,
index=["Domaine"],
columns="Score",
aggfunc="size",
fill_value=0,
)
fig = go.Figure()
bars = [
{"score": -1, "name": "Pas de réponse", "color": COLORS["."]},
{"score": 0, "name": "Faux", "color": COLORS[0]},
{"score": 1, "name": "Peu juste", "color": COLORS[1]},
{"score": 2, "name": "Presque juste", "color": COLORS[2]},
{"score": 3, "name": "Juste", "color": COLORS[3]},
]
for b in bars:
try:
fig.add_bar(
x=list(pt.index), y=pt[b["score"]], name=b["name"], marker_color=b["color"]
)
except KeyError:
pass
fig.update_layout(barmode="relative")
fig.update_layout(
height=500,
margin=dict(l=5, r=5, b=5, t=5),
)
return [dcc.Graph(figure=fig)]

View File

@@ -4,7 +4,7 @@
import pandas as pd import pandas as pd
import numpy as np import numpy as np
from math import ceil, floor from math import ceil, floor
from .config import COLUMNS, VALIDSCORE from .config import COLUMNS
""" """
Functions for manipulate score dataframes Functions for manipulate score dataframes
@@ -49,7 +49,7 @@ def score_to_mark(x):
raise ValueError( raise ValueError(
f"The evaluation is out of range: {x[COLUMNS['score']]} at {x}" f"The evaluation is out of range: {x[COLUMNS['score']]} at {x}"
) )
return round_half_point(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3) return round(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3, 2)
if x[COLUMNS["score"]] > x[COLUMNS["score_rate"]]: if x[COLUMNS["score"]] > x[COLUMNS["score_rate"]]:
raise ValueError( raise ValueError(

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
# encoding: utf-8
import yaml
CONFIGPATH = "recoconfig.yml"
with open(CONFIGPATH, "r") as configfile:
config = yaml.load(configfile, Loader=yaml.FullLoader)

View File

@@ -3,23 +3,43 @@
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
from prompt_toolkit import HTML
# from prompt_toolkit import HTML
from ..config import NO_ST_COLUMNS
import pandas as pd
import yaml import yaml
from .getconfig import config from .getconfig import config
def try_parsing_date(text, formats=["%Y-%m-%d", "%Y.%m.%d", "%Y/%m/%d"]):
for fmt in formats:
try:
return datetime.strptime(text[:10], fmt)
except ValueError:
pass
raise ValueError("no valid date format found")
def format_question(question):
question["score_rate"] = float(question["score_rate"])
return question
class Exam: class Exam:
def __init__(self, name, tribename, date, term, **kwrds): def __init__(self, name, tribename, date, term, **kwrds):
self._name = name self._name = name
self._tribename = tribename self._tribename = tribename
try:
self._date = datetime.strptime(date, "%y%m%d") self._date = try_parsing_date(date)
except:
self._date = date
self._term = term self._term = term
self._exercises = {} try:
kwrds["exercices"]
except KeyError:
self._exercises = {}
else:
self._exercises = kwrds["exercices"]
@property @property
def name(self): def name(self):
@@ -38,11 +58,13 @@ class Exam:
return self._term return self._term
def add_exercise(self, name, questions): def add_exercise(self, name, questions):
""" Add key with questions in ._exercises """ """Add key with questions in ._exercises"""
try: try:
self._exercises[name] self._exercises[name]
except KeyError: except KeyError:
self._exercises[name] = questions self._exercises[name] = [
format_question(question) for question in questions
]
else: else:
raise KeyError("The exercise already exsists. Use modify_exercise") raise KeyError("The exercise already exsists. Use modify_exercise")
@@ -58,9 +80,9 @@ class Exam:
raise KeyError("The exercise already exsists. Use modify_exercise") raise KeyError("The exercise already exsists. Use modify_exercise")
else: else:
if append: if append:
self._exercises[name] += questions self._exercises[name] += format_question(questions)
else: else:
self._exercises[name] = questions self._exercises[name] = format_question(questions)
@property @property
def exercices(self): def exercices(self):
@@ -81,7 +103,7 @@ class Exam:
@property @property
def long_name(self): def long_name(self):
""" Get exam name with date inside """ """Get exam name with date inside"""
return f"{self.date.strftime('%y%m%d')}_{self.name}" return f"{self.date.strftime('%y%m%d')}_{self.name}"
def path(self, extention=""): def path(self, extention=""):
@@ -125,8 +147,65 @@ class Exam:
def display(self, name): def display(self, name):
pass pass
def write(self): def write_yaml(self):
print(f"Sauvegarde temporaire dans {self.path('.yml')}") print(f"Sauvegarde temporaire dans {self.path('.yml')}")
self.tribe_path.mkdir(exist_ok=True) self.tribe_path.mkdir(exist_ok=True)
with open(self.path(".yml"), "w") as f: with open(self.path(".yml"), "w") as f:
f.write(yaml.dump(self.to_dict())) f.write(yaml.dump(self.to_dict()))
def write_csv(self):
rows = self.to_row()
print(rows)
base_df = pd.DataFrame.from_dict(rows)[NO_ST_COLUMNS.keys()]
base_df.rename(columns=NO_ST_COLUMNS, inplace=True)
students = pd.read_csv(self.tribe_student_path)["Nom"]
for student in students:
base_df[student] = ""
self.tribe_path.mkdir(exist_ok=True)
base_df.to_csv(self.path(".csv"), index=False)
@property
def score_rate(self):
total = 0
for ex, questions in self._exercises.items():
total += sum([q["score_rate"] for q in questions])
return total
@property
def competences_rate(self):
"""Dictionnary with competences as key and total rate as value"""
rates = {}
for ex, questions in self._exercises.items():
for q in questions:
try:
q["competence"]
except KeyError:
pass
else:
try:
rates[q["competence"]] += q["score_rate"]
except KeyError:
rates[q["competence"]] = q["score_rate"]
return rates
@property
def themes_rate(self):
"""Dictionnary with themes as key and total rate as value"""
rates = {}
for ex, questions in self._exercises.items():
for q in questions:
try:
q["theme"]
except KeyError:
pass
else:
if q["theme"]:
try:
rates[q["theme"]] += q["score_rate"]
except KeyError:
rates[q["theme"]] = q["score_rate"]
return rates

View File

@@ -0,0 +1,160 @@
#!/usr/bin/env python
# encoding: utf-8
import click
from pathlib import Path
from datetime import datetime
from PyInquirer import prompt, print_json
import pandas as pd
import numpy as np
from .config import config
from ..config import NO_ST_COLUMNS
class PromptAbortException(EOFError):
def __init__(self, message, errors=None):
# Call the base class constructor with the parameters it needs
super(PromptAbortException, self).__init__("Abort questionnary", errors)
def get_tribes(answers):
""" List tribes based on subdirectory of config["source"] which have an "eleves.csv" file inside """
return [
p.name for p in Path(config["source"]).iterdir() if (p / "eleves.csv").exists()
]
def prepare_csv():
items = new_eval()
item = items[0]
# item = {"tribe": "308", "date": datetime.today(), "assessment": "plop"}
csv_output = (
Path(config["source"])
/ item["tribe"]
/ f"{item['date']:%y%m%d}_{item['assessment']}.csv"
)
students = pd.read_csv(Path(config["source"]) / item["tribe"] / "eleves.csv")["Nom"]
columns = list(NO_ST_COLUMNS.keys())
items = [[it[c] for c in columns] for it in items]
columns = list(NO_ST_COLUMNS.values())
items_df = pd.DataFrame.from_records(items, columns=columns)
for s in students:
items_df[s] = np.nan
items_df.to_csv(csv_output, index=False, date_format="%d/%m/%Y")
click.echo(f"Saving csv file to {csv_output}")
def new_eval(answers={}):
click.echo(f"Préparation d'un nouveau devoir")
eval_questions = [
{"type": "input", "name": "assessment", "message": "Nom de l'évaluation",},
{
"type": "list",
"name": "tribe",
"message": "Classe concernée",
"choices": get_tribes,
},
{
"type": "input",
"name": "date",
"message": "Date du devoir (%y%m%d)",
"default": datetime.today().strftime("%y%m%d"),
"filter": lambda val: datetime.strptime(val, "%y%m%d"),
},
{
"type": "list",
"name": "term",
"message": "Trimestre",
"choices": ["1", "2", "3"],
},
]
eval_ans = prompt(eval_questions)
items = []
add_exo = True
while add_exo:
ex_items = new_exercice(eval_ans)
items += ex_items
add_exo = prompt(
[
{
"type": "confirm",
"name": "add_exo",
"message": "Ajouter un autre exercice",
"default": True,
}
]
)["add_exo"]
return items
def new_exercice(answers={}):
exercise_questions = [
{"type": "input", "name": "exercise", "message": "Nom de l'exercice"},
]
click.echo(f"Nouvel exercice")
exercise_ans = prompt(exercise_questions, answers=answers)
items = []
add_item = True
while add_item:
try:
item_ans = new_item(exercise_ans)
except PromptAbortException:
click.echo("Création de l'item annulée")
else:
items.append(item_ans)
add_item = prompt(
[
{
"type": "confirm",
"name": "add_item",
"message": f"Ajouter un autre item pour l'exercice {exercise_ans['exercise']}",
"default": True,
}
]
)["add_item"]
return items
def new_item(answers={}):
item_questions = [
{"type": "input", "name": "question", "message": "Nom de l'item",},
{"type": "input", "name": "comment", "message": "Commentaire",},
{
"type": "list",
"name": "competence",
"message": "Competence",
"choices": ["Cher", "Rep", "Mod", "Rai", "Cal", "Com"],
},
{"type": "input", "name": "theme", "message": "Domaine",},
{
"type": "confirm",
"name": "is_leveled",
"message": "Évaluation par niveau",
"default": True,
},
{"type": "input", "name": "score_rate", "message": "Bareme"},
{
"type": "confirm",
"name": "correct",
"message": "Tout est correct?",
"default": True,
},
]
click.echo(f"Nouvelle question pour l'exercice {answers['exercise']}")
item_ans = prompt(item_questions, answers=answers)
if item_ans["correct"]:
return item_ans
raise PromptAbortException("Abort item creation")

View File

@@ -10,10 +10,9 @@ from datetime import datetime
import yaml import yaml
from .getconfig import config, CONFIGPATH from .getconfig import config, CONFIGPATH
from .prompts import prompt_exam, prompt_exercise, prompt_validate
from ..config import NO_ST_COLUMNS from ..config import NO_ST_COLUMNS
from .exam import Exam from .exam import Exam
from ..dashboard.exam import app as exam_app from ..dashboard.index import app as dash
@click.group() @click.group()
@@ -37,59 +36,10 @@ def setup():
print(f"The file {tribe['students']} does not exists") print(f"The file {tribe['students']} does not exists")
@cli.command()
def new_exam():
""" Create new exam csv file """
exam = Exam(**prompt_exam())
if exam.path(".yml").exists():
print(f"Fichier sauvegarde trouvé à {exam.path('.yml')} -- importation")
with open(exam.path(".yml"), "r") as f:
for name, questions in yaml.load(f, Loader=yaml.SafeLoader)[
"exercices"
].items():
exam.add_exercise(name, questions)
print(exam.themes)
# print(yaml.dump(exam.to_dict()))
exam.write()
for name, questions in exam.exercices.items():
exam.modify_exercise(
**prompt_exercise(
name=name, completer={"theme": exam.themes}, questions=questions
)
)
exam.write()
new_exercise = prompt_validate("Ajouter un exercice? ")
while new_exercise:
exam.add_exercise(
**prompt_exercise(len(exam.exercices) + 1, completer={"theme": exam.themes})
)
exam.write()
new_exercise = prompt_validate("Ajouter un exercice? ")
rows = exam.to_row()
base_df = pd.DataFrame.from_dict(rows)[NO_ST_COLUMNS.keys()]
base_df.rename(columns=NO_ST_COLUMNS, inplace=True)
students = pd.read_csv(exam.tribe_student_path)["Nom"]
for student in students:
base_df[student] = ""
exam.tribe_path.mkdir(exist_ok=True)
base_df.to_csv(exam.path(".csv"), index=False)
print(f"Le fichier note a été enregistré à {exam.path('.csv')}")
@cli.command() @cli.command()
@click.option("--debug", default=0, help="Debug mode for dash") @click.option("--debug", default=0, help="Debug mode for dash")
def exam_analysis(debug): def dashboard(debug):
exam_app.run_server(debug=bool(debug)) dash.run_server(debug=bool(debug))
@cli.command() @cli.command()
@@ -112,7 +62,7 @@ def report(csv_file):
try: try:
date = datetime.strptime(date, "%y%m%d") date = datetime.strptime(date, "%y%m%d")
except ValueError: except ValueError:
date = None date = datetime.today().strptime(date, "%y%m%d")
tribe = str(tribe_dir).split("/")[-1] tribe = str(tribe_dir).split("/")[-1]

View File

@@ -1,4 +1,77 @@
pandas
click
papermill
prompt_toolkit prompt_toolkit
ansiwrap==0.8.4
appdirs==1.4.3
attrs==19.1.0
backcall==0.1.0
black==19.10b0
bleach==3.1.0
certifi==2019.6.16
chardet==3.0.4
Click==7.0
colorama==0.4.1
cycler==0.10.0
decorator==4.4.0
defusedxml==0.6.0
entrypoints==0.3
future==0.17.1
idna==2.8
importlib-resources==1.0.2
ipykernel==5.1.3
ipython==7.11.1
ipython-genutils==0.2.0
ipywidgets==7.5.1
jedi==0.15.2
Jinja2==2.10.3
jsonschema==3.2.0
jupyter==1.0.0
jupyter-client==5.3.4
jupyter-console==6.1.0
jupyter-core==4.6.1
jupytex==0.0.3
kiwisolver==1.1.0
Markdown==3.1.1
MarkupSafe==1.1.1
matplotlib==3.1.2
mistune==0.8.4
nbconvert==5.6.1
nbformat==5.0.3
notebook==6.0.3
numpy==1.18.1
pandas==0.25.3
pandocfilters==1.4.2
papermill==1.2.1
parso==0.5.2
pathspec==0.7.0
pexpect==4.8.0
pickleshare==0.7.5
prometheus-client==0.7.1
prompt-toolkit==1.0.14
ptyprocess==0.6.0
Pygments==2.5.2
PyInquirer==1.0.3
pyparsing==2.4.6
pyrsistent==0.15.7
python-dateutil==2.8.0
pytz==2019.3
PyYAML==5.3
pyzmq==18.1.1
qtconsole==4.6.0
-e git+git_opytex:/lafrite/recopytex.git@7e026bedb24c1ca8bef3b71b3d63f8b0d6916e81#egg=Recopytex
regex==2020.1.8
requests==2.22.0
scipy==1.4.1
Send2Trash==1.5.0
six==1.12.0
tenacity==6.0.0
terminado==0.8.3
testpath==0.4.4
textwrap3==0.9.2
toml==0.10.0
tornado==6.0.3
tqdm==4.41.1
traitlets==4.3.2
typed-ast==1.4.1
urllib3==1.25.8
wcwidth==0.1.8
webencodings==0.5.1
widgetsnbextension==3.5.1

View File

@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
setup( setup(
name='Recopytex', name='Recopytex',
version='0.1', version='1.1.1',
description='Assessment analysis', description='Assessment analysis',
author='Benjamin Bertrand', author='Benjamin Bertrand',
author_email='', author_email='',
@@ -13,6 +13,10 @@ setup(
include_package_data=True, include_package_data=True,
install_requires=[ install_requires=[
'Click', 'Click',
'pandas',
'numpy',
'papermill',
'pyyaml',
], ],
entry_points=''' entry_points='''
[console_scripts] [console_scripts]