Feat: Start filesystem loader
This commit is contained in:
parent
a0e94f52b1
commit
c1fd060707
5
example/Tribe1/210122_DS6.csv
Normal file
5
example/Tribe1/210122_DS6.csv
Normal file
@ -0,0 +1,5 @@
|
||||
Trimestre,Nom,Date,Exercice,Question,Competence,Domaine,Commentaire,Bareme,Est_nivele,Star Tice,Umberto Dingate,Starlin Crangle,Humbert Bourcq,Gabriella Handyside,Stewart Eaves,Erick Going,Ase Praton,Rollins Planks,Dunstan Sarjant,Stacy Guiton,Ange Stanes,Amabelle Elleton,Darn Broomhall,Dyan Chatto,Keane Rennebach,Nari Paulton,Brandy Wase,Jaclyn Firidolfi,Violette Lockney
|
||||
1,DS6,22/01/2021,Exercice 1,Sait pas,,,,,,,,,,,,,,,,,,,,,,,,,
|
||||
1,DS6,22/01/2021,Exercice 1,Ha,,,,,,,,,,,,,,,,,,,,,,,,,
|
||||
1,DS6,22/01/2021,Exercice 1,,,,,,,,,,,,,,,,,,,,,,,,,,
|
||||
1,DS6,22/01/2021,Exercice 2,grr,,,,,,,,,,,,,,,,,,,,,,,,,
|
|
@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from .csv_extraction import flat_df_students, flat_df_for
|
||||
from .df_marks_manip import pp_q_scores
|
@ -1,30 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
NO_ST_COLUMNS = {
|
||||
"term": "Trimestre",
|
||||
"assessment": "Nom",
|
||||
"date": "Date",
|
||||
"exercise": "Exercice",
|
||||
"question": "Question",
|
||||
"competence": "Competence",
|
||||
"theme": "Domaine",
|
||||
"comment": "Commentaire",
|
||||
"score_rate": "Bareme",
|
||||
"is_leveled": "Est_nivele",
|
||||
}
|
||||
|
||||
COLUMNS = {
|
||||
**NO_ST_COLUMNS,
|
||||
"student": "Eleve",
|
||||
"score": "Score",
|
||||
"mark": "Note",
|
||||
"level": "Niveau",
|
||||
"normalized": "Normalise",
|
||||
}
|
||||
|
||||
VALIDSCORE = {
|
||||
"NOTFILLED": "", # The item is not scored yet
|
||||
"NOANSWER": ".", # Student gives no answer (this score will impact the fianl mark)
|
||||
"ABS": "a", # Student has absent (this score won't be impact the final mark)
|
||||
}
|
@ -1,119 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
""" Extracting data from xlsx files """
|
||||
|
||||
import pandas as pd
|
||||
from .config import NO_ST_COLUMNS, COLUMNS, VALIDSCORE
|
||||
|
||||
pd.set_option("Precision", 2)
|
||||
|
||||
|
||||
def try_replace(x, old, new):
|
||||
try:
|
||||
return str(x).replace(old, new)
|
||||
except ValueError:
|
||||
return x
|
||||
|
||||
|
||||
def extract_students(df, no_student_columns=NO_ST_COLUMNS.values()):
|
||||
"""Extract the list of students from df
|
||||
|
||||
:param df: the dataframe
|
||||
:param no_student_columns: columns that are not students
|
||||
:return: list of students
|
||||
"""
|
||||
students = df.columns.difference(no_student_columns)
|
||||
return students
|
||||
|
||||
|
||||
def flat_df_students(
|
||||
df, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
||||
):
|
||||
"""Flat the dataframe by returning a dataframe with on student on each line
|
||||
|
||||
:param df: the dataframe (one row per questions)
|
||||
:param no_student_columns: columns that are not students
|
||||
:return: dataframe with one row per questions and students
|
||||
|
||||
Columns of csv files:
|
||||
|
||||
- NO_ST_COLUMNS meta data on questions
|
||||
- one for each students
|
||||
|
||||
This function flat student's columns to "student" and "score"
|
||||
"""
|
||||
students = extract_students(df, no_student_columns)
|
||||
scores = []
|
||||
for st in students:
|
||||
scores.append(
|
||||
pd.melt(
|
||||
df,
|
||||
id_vars=no_student_columns,
|
||||
value_vars=st,
|
||||
var_name=COLUMNS["student"],
|
||||
value_name=COLUMNS["score"],
|
||||
).dropna(subset=[COLUMNS["score"]])
|
||||
)
|
||||
if postprocessing:
|
||||
return postprocess(pd.concat(scores))
|
||||
return pd.concat(scores)
|
||||
|
||||
|
||||
def flat_df_for(
|
||||
df, student, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
||||
):
|
||||
"""Extract the data only for one student
|
||||
|
||||
:param df: the dataframe (one row per questions)
|
||||
:param no_student_columns: columns that are not students
|
||||
:return: dataframe with one row per questions and students
|
||||
|
||||
Columns of csv files:
|
||||
|
||||
- NO_ST_COLUMNS meta data on questions
|
||||
- one for each students
|
||||
|
||||
"""
|
||||
students = extract_students(df, no_student_columns)
|
||||
if student not in students:
|
||||
raise KeyError("This student is not in the table")
|
||||
st_df = df[list(no_student_columns) + [student]]
|
||||
st_df = st_df.rename(columns={student: COLUMNS["score"]}).dropna(
|
||||
subset=[COLUMNS["score"]]
|
||||
)
|
||||
if postprocessing:
|
||||
return postprocess(st_df)
|
||||
return st_df
|
||||
|
||||
|
||||
def postprocess(df):
|
||||
"""Postprocessing score dataframe
|
||||
|
||||
- Replace na with an empty string
|
||||
- Replace "NOANSWER" with -1
|
||||
- Turn commas number to dot numbers
|
||||
"""
|
||||
|
||||
df[COLUMNS["question"]].fillna("", inplace=True)
|
||||
df[COLUMNS["exercise"]].fillna("", inplace=True)
|
||||
df[COLUMNS["comment"]].fillna("", inplace=True)
|
||||
df[COLUMNS["competence"]].fillna("", inplace=True)
|
||||
|
||||
df[COLUMNS["score"]] = pd.to_numeric(
|
||||
df[COLUMNS["score"]]
|
||||
.replace(VALIDSCORE["NOANSWER"], -1)
|
||||
.apply(lambda x: try_replace(x, ",", "."))
|
||||
)
|
||||
df[COLUMNS["score_rate"]] = pd.to_numeric(
|
||||
df[COLUMNS["score_rate"]].apply(lambda x: try_replace(x, ",", ".")),
|
||||
errors="coerce",
|
||||
)
|
||||
|
||||
return df
|
||||
|
||||
|
||||
# -----------------------------
|
||||
# Reglages pour 'vim'
|
||||
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
||||
# cursor: 16 del
|
@ -1,5 +0,0 @@
|
||||
import dash
|
||||
|
||||
app = dash.Dash(__name__, suppress_callback_exceptions=True)
|
||||
# app = dash.Dash(__name__)
|
||||
server = app.server
|
@ -1,66 +0,0 @@
|
||||
body {
|
||||
margin: 0px;
|
||||
font-family: 'Source Sans Pro','Roboto','Open Sans','Liberation Sans','DejaVu Sans','Verdana','Helvetica','Arial',sans-serif;
|
||||
}
|
||||
|
||||
header {
|
||||
margin: 0px 0px 20px 0px;
|
||||
background-color: #333333;
|
||||
color: #ffffff;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
header > h1 {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
main {
|
||||
width: 95vw;
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
section {
|
||||
margin-top: 20px;
|
||||
margin-bottom: 20px;
|
||||
|
||||
}
|
||||
|
||||
/* Exam analysis */
|
||||
|
||||
#select {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
#select > div {
|
||||
width: 40vw;
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
#analysis {
|
||||
display: flex;
|
||||
flex-flow: row wrap;
|
||||
}
|
||||
|
||||
#analysis > * {
|
||||
display: flex;
|
||||
flex-flow: column;
|
||||
width: 45vw;
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
/* Create new exam */
|
||||
|
||||
#new-exam {
|
||||
display: flex;
|
||||
flex-flow: row;
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
#new-exam label {
|
||||
width: 20%;
|
||||
display: flex;
|
||||
flex-flow: column;
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
|
@ -1,355 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import dash
|
||||
import dash_html_components as html
|
||||
import dash_core_components as dcc
|
||||
import dash_table
|
||||
import plotly.graph_objects as go
|
||||
from datetime import date, datetime
|
||||
import uuid
|
||||
import pandas as pd
|
||||
import yaml
|
||||
|
||||
from ...scripts.getconfig import config
|
||||
from ...config import NO_ST_COLUMNS
|
||||
from ..app import app
|
||||
from ...scripts.exam import Exam
|
||||
|
||||
QUESTION_COLUMNS = [
|
||||
{"id": "id", "name": "Question"},
|
||||
{
|
||||
"id": "competence",
|
||||
"name": "Competence",
|
||||
"presentation": "dropdown",
|
||||
},
|
||||
{"id": "theme", "name": "Domaine"},
|
||||
{"id": "comment", "name": "Commentaire"},
|
||||
{"id": "score_rate", "name": "Bareme"},
|
||||
{"id": "is_leveled", "name": "Est_nivele"},
|
||||
]
|
||||
|
||||
|
||||
def get_current_year_limit():
|
||||
today = date.today()
|
||||
if today.month > 8:
|
||||
return {
|
||||
"min_date_allowed": date(today.year, 9, 1),
|
||||
"max_date_allowed": date(today.year + 1, 7, 15),
|
||||
"initial_visible_month": today,
|
||||
}
|
||||
|
||||
return {
|
||||
"min_date_allowed": date(today.year - 1, 9, 1),
|
||||
"max_date_allowed": date(today.year, 7, 15),
|
||||
"initial_visible_month": today,
|
||||
}
|
||||
|
||||
|
||||
layout = html.Div(
|
||||
[
|
||||
html.Header(
|
||||
children=[
|
||||
html.H1("Création d'une évaluation"),
|
||||
html.P("Pas encore de sauvegarde", id="is-saved"),
|
||||
html.Button("Enregistrer dans csv", id="save-csv"),
|
||||
],
|
||||
),
|
||||
html.Main(
|
||||
children=[
|
||||
html.Section(
|
||||
children=[
|
||||
html.Form(
|
||||
id="new-exam",
|
||||
children=[
|
||||
html.Label(
|
||||
children=[
|
||||
"Classe",
|
||||
dcc.Dropdown(
|
||||
id="tribe",
|
||||
options=[
|
||||
{"label": t["name"], "value": t["name"]}
|
||||
for t in config["tribes"]
|
||||
],
|
||||
value=config["tribes"][0]["name"],
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Nom de l'évaluation",
|
||||
dcc.Input(
|
||||
id="exam_name",
|
||||
type="text",
|
||||
placeholder="Nom de l'évaluation",
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Date",
|
||||
dcc.DatePickerSingle(
|
||||
id="date",
|
||||
date=date.today(),
|
||||
**get_current_year_limit(),
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Trimestre",
|
||||
dcc.Dropdown(
|
||||
id="term",
|
||||
options=[
|
||||
{"label": i + 1, "value": i + 1}
|
||||
for i in range(3)
|
||||
],
|
||||
value=1,
|
||||
),
|
||||
]
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
id="form",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
html.Div(
|
||||
id="exercises",
|
||||
children=[],
|
||||
),
|
||||
html.Button(
|
||||
"Ajouter un exercice",
|
||||
id="add-exercise",
|
||||
className="add-exercise",
|
||||
),
|
||||
html.Div(
|
||||
id="summary",
|
||||
),
|
||||
],
|
||||
id="exercises",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
html.Div(
|
||||
id="score_rate",
|
||||
),
|
||||
html.Div(
|
||||
id="exercises-viz",
|
||||
),
|
||||
html.Div(
|
||||
id="competences-viz",
|
||||
),
|
||||
html.Div(
|
||||
id="themes-viz",
|
||||
),
|
||||
],
|
||||
id="visualisation",
|
||||
),
|
||||
]
|
||||
),
|
||||
dcc.Store(id="exam_store"),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("exercises", "children"),
|
||||
dash.dependencies.Input("add-exercise", "n_clicks"),
|
||||
dash.dependencies.State("exercises", "children"),
|
||||
)
|
||||
def add_exercise(n_clicks, children):
|
||||
if n_clicks is None:
|
||||
return children
|
||||
element_table = pd.DataFrame(columns=[c["id"] for c in QUESTION_COLUMNS])
|
||||
element_table = element_table.append(
|
||||
pd.Series(
|
||||
data={
|
||||
"id": 1,
|
||||
"competence": "Rechercher",
|
||||
"theme": "",
|
||||
"comment": "",
|
||||
"score_rate": 1,
|
||||
"is_leveled": 1,
|
||||
},
|
||||
name=0,
|
||||
)
|
||||
)
|
||||
new_exercise = html.Div(
|
||||
children=[
|
||||
html.Div(
|
||||
children=[
|
||||
dcc.Input(
|
||||
id={"type": "exercice", "index": str(n_clicks)},
|
||||
type="text",
|
||||
value=f"Exercice {len(children)+1}",
|
||||
placeholder="Nom de l'exercice",
|
||||
className="exercise-name",
|
||||
),
|
||||
html.Button(
|
||||
"X",
|
||||
id={"type": "rm_exercice", "index": str(n_clicks)},
|
||||
className="delete-exercise",
|
||||
),
|
||||
],
|
||||
className="exercise-head",
|
||||
),
|
||||
dash_table.DataTable(
|
||||
id={"type": "elements", "index": str(n_clicks)},
|
||||
columns=QUESTION_COLUMNS,
|
||||
data=element_table.to_dict("records"),
|
||||
editable=True,
|
||||
row_deletable=True,
|
||||
dropdown={
|
||||
"competence": {
|
||||
"options": [
|
||||
{"label": i, "value": i} for i in config["competences"]
|
||||
]
|
||||
},
|
||||
},
|
||||
style_cell={
|
||||
"whiteSpace": "normal",
|
||||
"height": "auto",
|
||||
},
|
||||
),
|
||||
html.Button(
|
||||
"Ajouter un élément de notation",
|
||||
id={"type": "add-element", "index": str(n_clicks)},
|
||||
className="add-element",
|
||||
),
|
||||
],
|
||||
className="exercise",
|
||||
id=f"exercise-{n_clicks}",
|
||||
)
|
||||
children.append(new_exercise)
|
||||
return children
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output(
|
||||
{"type": "elements", "index": dash.dependencies.MATCH}, "data"
|
||||
),
|
||||
dash.dependencies.Input(
|
||||
{"type": "add-element", "index": dash.dependencies.MATCH}, "n_clicks"
|
||||
),
|
||||
[
|
||||
dash.dependencies.State(
|
||||
{"type": "elements", "index": dash.dependencies.MATCH}, "data"
|
||||
),
|
||||
],
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def add_element(n_clicks, elements):
|
||||
if n_clicks is None or n_clicks < len(elements):
|
||||
return elements
|
||||
|
||||
df = pd.DataFrame.from_records(elements)
|
||||
df = df.append(
|
||||
pd.Series(
|
||||
data={
|
||||
"id": len(df) + 1,
|
||||
"competence": "",
|
||||
"theme": "",
|
||||
"comment": "",
|
||||
"score_rate": 1,
|
||||
"is_leveled": 1,
|
||||
},
|
||||
name=n_clicks,
|
||||
)
|
||||
)
|
||||
return df.to_dict("records")
|
||||
|
||||
|
||||
def exam_generalities(tribe, exam_name, date, term, exercices=[], elements=[]):
|
||||
return [
|
||||
html.H1(f"{exam_name} pour les {tribe}"),
|
||||
html.P(f"Fait le {date} (Trimestre {term})"),
|
||||
]
|
||||
|
||||
|
||||
def exercise_summary(identifier, name, elements=[]):
|
||||
df = pd.DataFrame.from_records(elements)
|
||||
return html.Div(
|
||||
[
|
||||
html.H2(name),
|
||||
dash_table.DataTable(
|
||||
columns=[{"id": c, "name": c} for c in df], data=elements
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("exam_store", "data"),
|
||||
[
|
||||
dash.dependencies.Input("tribe", "value"),
|
||||
dash.dependencies.Input("exam_name", "value"),
|
||||
dash.dependencies.Input("date", "date"),
|
||||
dash.dependencies.Input("term", "value"),
|
||||
dash.dependencies.Input(
|
||||
{"type": "exercice", "index": dash.dependencies.ALL}, "value"
|
||||
),
|
||||
dash.dependencies.Input(
|
||||
{"type": "elements", "index": dash.dependencies.ALL}, "data"
|
||||
),
|
||||
],
|
||||
dash.dependencies.State({"type": "elements", "index": dash.dependencies.ALL}, "id"),
|
||||
)
|
||||
def store_exam(tribe, exam_name, date, term, exercices, elements, elements_id):
|
||||
exam = Exam(exam_name, tribe, date, term)
|
||||
for (i, name) in enumerate(exercices):
|
||||
ex_elements_id = [el for el in elements_id if el["index"] == str(i + 1)][0]
|
||||
index = elements_id.index(ex_elements_id)
|
||||
ex_elements = elements[index]
|
||||
exam.add_exercise(name, ex_elements)
|
||||
|
||||
return exam.to_dict()
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("score_rate", "children"),
|
||||
dash.dependencies.Input("exam_store", "data"),
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def score_rate(data):
|
||||
exam = Exam(**data)
|
||||
return [html.P(f"Barème /{exam.score_rate}")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("competences-viz", "figure"),
|
||||
dash.dependencies.Input("exam_store", "data"),
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def competences_viz(data):
|
||||
exam = Exam(**data)
|
||||
return [html.P(str(exam.competences_rate))]
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("themes-viz", "children"),
|
||||
dash.dependencies.Input("exam_store", "data"),
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def themes_viz(data):
|
||||
exam = Exam(**data)
|
||||
themes_rate = exam.themes_rate
|
||||
fig = go.Figure()
|
||||
if themes_rate:
|
||||
fig.add_trace(go.Pie(labels=list(themes_rate.keys()), values=list(themes_rate.values())))
|
||||
return [dcc.Graph(figure=fig)]
|
||||
return []
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("is-saved", "children"),
|
||||
dash.dependencies.Input("save-csv", "n_clicks"),
|
||||
dash.dependencies.State("exam_store", "data"),
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def save_to_csv(n_clicks, data):
|
||||
exam = Exam(**data)
|
||||
csv = exam.path(".csv")
|
||||
exam.write_csv()
|
||||
return [f"Dernière sauvegarde {datetime.today()} dans {csv}"]
|
@ -1,399 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import dash
|
||||
import dash_html_components as html
|
||||
import dash_core_components as dcc
|
||||
import dash_table
|
||||
from dash.exceptions import PreventUpdate
|
||||
import plotly.graph_objects as go
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
|
||||
from ... import flat_df_students, pp_q_scores
|
||||
from ...config import NO_ST_COLUMNS
|
||||
from ...scripts.getconfig import config
|
||||
from ..app import app
|
||||
|
||||
COLORS = {
|
||||
".": "black",
|
||||
0: "#E7472B",
|
||||
1: "#FF712B",
|
||||
2: "#F2EC4C",
|
||||
3: "#68D42F",
|
||||
}
|
||||
|
||||
layout = html.Div(
|
||||
children=[
|
||||
html.Header(
|
||||
children=[
|
||||
html.H1("Analyse des notes"),
|
||||
html.P("Dernière sauvegarde", id="lastsave"),
|
||||
],
|
||||
),
|
||||
html.Main(
|
||||
[
|
||||
html.Section(
|
||||
[
|
||||
html.Div(
|
||||
[
|
||||
"Classe: ",
|
||||
dcc.Dropdown(
|
||||
id="tribe",
|
||||
options=[
|
||||
{"label": t["name"], "value": t["name"]}
|
||||
for t in config["tribes"]
|
||||
],
|
||||
value=config["tribes"][0]["name"],
|
||||
),
|
||||
],
|
||||
style={
|
||||
"display": "flex",
|
||||
"flex-flow": "column",
|
||||
},
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
"Evaluation: ",
|
||||
dcc.Dropdown(id="csv"),
|
||||
],
|
||||
style={
|
||||
"display": "flex",
|
||||
"flex-flow": "column",
|
||||
},
|
||||
),
|
||||
],
|
||||
id="select",
|
||||
style={
|
||||
"display": "flex",
|
||||
"flex-flow": "row wrap",
|
||||
},
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
html.Div(
|
||||
dash_table.DataTable(
|
||||
id="final_score_table",
|
||||
columns=[
|
||||
{"id": "Eleve", "name": "Élève"},
|
||||
{"id": "Note", "name": "Note"},
|
||||
{"id": "Bareme", "name": "Barème"},
|
||||
],
|
||||
data=[],
|
||||
style_data_conditional=[
|
||||
{
|
||||
"if": {"row_index": "odd"},
|
||||
"backgroundColor": "rgb(248, 248, 248)",
|
||||
}
|
||||
],
|
||||
style_data={
|
||||
"width": "100px",
|
||||
"maxWidth": "100px",
|
||||
"minWidth": "100px",
|
||||
},
|
||||
),
|
||||
id="final_score_table_container",
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
dash_table.DataTable(
|
||||
id="final_score_describe",
|
||||
columns=[
|
||||
{"id": "count", "name": "count"},
|
||||
{"id": "mean", "name": "mean"},
|
||||
{"id": "std", "name": "std"},
|
||||
{"id": "min", "name": "min"},
|
||||
{"id": "25%", "name": "25%"},
|
||||
{"id": "50%", "name": "50%"},
|
||||
{"id": "75%", "name": "75%"},
|
||||
{"id": "max", "name": "max"},
|
||||
],
|
||||
),
|
||||
dcc.Graph(
|
||||
id="fig_assessment_hist",
|
||||
),
|
||||
dcc.Graph(id="fig_competences"),
|
||||
],
|
||||
id="desc_plots",
|
||||
),
|
||||
],
|
||||
id="analysis",
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
dash_table.DataTable(
|
||||
id="scores_table",
|
||||
columns=[
|
||||
{"id": "id", "name": "Question"},
|
||||
{
|
||||
"id": "competence",
|
||||
"name": "Competence",
|
||||
},
|
||||
{"id": "theme", "name": "Domaine"},
|
||||
{"id": "comment", "name": "Commentaire"},
|
||||
{"id": "score_rate", "name": "Bareme"},
|
||||
{"id": "is_leveled", "name": "Est_nivele"},
|
||||
],
|
||||
style_cell={
|
||||
"whiteSpace": "normal",
|
||||
"height": "auto",
|
||||
},
|
||||
fixed_columns={"headers": True, "data": 7},
|
||||
style_table={"minWidth": "100%"},
|
||||
style_data_conditional=[],
|
||||
editable=True,
|
||||
),
|
||||
html.Button("Ajouter un élément", id="btn_add_element"),
|
||||
],
|
||||
id="big_table",
|
||||
),
|
||||
dcc.Store(id="final_score"),
|
||||
],
|
||||
className="content",
|
||||
style={
|
||||
"width": "95vw",
|
||||
"margin": "auto",
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("csv", "options"),
|
||||
dash.dependencies.Output("csv", "value"),
|
||||
],
|
||||
[dash.dependencies.Input("tribe", "value")],
|
||||
)
|
||||
def update_csvs(value):
|
||||
if not value:
|
||||
raise PreventUpdate
|
||||
p = Path(value)
|
||||
csvs = list(p.glob("*.csv"))
|
||||
try:
|
||||
return [{"label": str(c), "value": str(c)} for c in csvs], str(csvs[0])
|
||||
except IndexError:
|
||||
return []
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("final_score", "data"),
|
||||
],
|
||||
[dash.dependencies.Input("scores_table", "data")],
|
||||
)
|
||||
def update_final_scores(data):
|
||||
if not data:
|
||||
raise PreventUpdate
|
||||
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
try:
|
||||
if scores.iloc[0]["Commentaire"] == "commentaire":
|
||||
scores.drop([0], inplace=True)
|
||||
except KeyError:
|
||||
pass
|
||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
||||
if scores.empty:
|
||||
return [{}]
|
||||
|
||||
scores = pp_q_scores(scores)
|
||||
assessment_scores = scores.groupby(["Eleve"]).agg({"Note": "sum", "Bareme": "sum"})
|
||||
return [assessment_scores.reset_index().to_dict("records")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("final_score_table", "data"),
|
||||
],
|
||||
[dash.dependencies.Input("final_score", "data")],
|
||||
)
|
||||
def update_final_scores_table(data):
|
||||
assessment_scores = pd.DataFrame.from_records(data)
|
||||
return [assessment_scores.to_dict("records")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("final_score_describe", "data"),
|
||||
],
|
||||
[dash.dependencies.Input("final_score", "data")],
|
||||
)
|
||||
def update_final_scores_descr(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
if scores.empty:
|
||||
return [[{}]]
|
||||
desc = scores["Note"].describe().T.round(2)
|
||||
return [[desc.to_dict()]]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("fig_assessment_hist", "figure"),
|
||||
],
|
||||
[dash.dependencies.Input("final_score", "data")],
|
||||
)
|
||||
def update_final_scores_hist(data):
|
||||
assessment_scores = pd.DataFrame.from_records(data)
|
||||
|
||||
if assessment_scores.empty:
|
||||
return [go.Figure(data=[go.Scatter(x=[], y=[])])]
|
||||
|
||||
ranges = np.linspace(
|
||||
-0.5,
|
||||
assessment_scores.Bareme.max(),
|
||||
int(assessment_scores.Bareme.max() * 2 + 2),
|
||||
)
|
||||
bins = pd.cut(assessment_scores["Note"], ranges)
|
||||
assessment_scores["Bin"] = bins
|
||||
assessment_grouped = (
|
||||
assessment_scores.reset_index()
|
||||
.groupby("Bin")
|
||||
.agg({"Bareme": "count", "Eleve": lambda x: "\n".join(x)})
|
||||
)
|
||||
assessment_grouped.index = assessment_grouped.index.map(lambda i: i.right)
|
||||
fig = go.Figure()
|
||||
fig.add_bar(
|
||||
x=assessment_grouped.index,
|
||||
y=assessment_grouped.Bareme,
|
||||
text=assessment_grouped.Eleve,
|
||||
textposition="auto",
|
||||
hovertemplate="",
|
||||
marker_color="#4E89DE",
|
||||
)
|
||||
fig.update_layout(
|
||||
height=300,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
)
|
||||
return [fig]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("fig_competences", "figure"),
|
||||
],
|
||||
[dash.dependencies.Input("scores_table", "data")],
|
||||
)
|
||||
def update_competence_fig(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
try:
|
||||
if scores.iloc[0]["Commentaire"] == "commentaire":
|
||||
scores.drop([0], inplace=True)
|
||||
except KeyError:
|
||||
pass
|
||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
||||
|
||||
if scores.empty:
|
||||
return [go.Figure(data=[go.Scatter(x=[], y=[])])]
|
||||
|
||||
scores = pp_q_scores(scores)
|
||||
pt = pd.pivot_table(
|
||||
scores,
|
||||
index=["Exercice", "Question", "Commentaire"],
|
||||
columns="Score",
|
||||
aggfunc="size",
|
||||
fill_value=0,
|
||||
)
|
||||
for i in {i for i in pt.index.get_level_values(0)}:
|
||||
pt.loc[(str(i), "", ""), :] = ""
|
||||
pt.sort_index(inplace=True)
|
||||
index = (
|
||||
pt.index.get_level_values(0).map(str)
|
||||
+ ":"
|
||||
+ pt.index.get_level_values(1).map(str)
|
||||
+ " "
|
||||
+ pt.index.get_level_values(2).map(str)
|
||||
)
|
||||
|
||||
fig = go.Figure()
|
||||
bars = [
|
||||
{"score": -1, "name": "Pas de réponse", "color": COLORS["."]},
|
||||
{"score": 0, "name": "Faux", "color": COLORS[0]},
|
||||
{"score": 1, "name": "Peu juste", "color": COLORS[1]},
|
||||
{"score": 2, "name": "Presque juste", "color": COLORS[2]},
|
||||
{"score": 3, "name": "Juste", "color": COLORS[3]},
|
||||
]
|
||||
for b in bars:
|
||||
try:
|
||||
fig.add_bar(
|
||||
x=index, y=pt[b["score"]], name=b["name"], marker_color=b["color"]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
fig.update_layout(barmode="relative")
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
)
|
||||
return [fig]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("lastsave", "children"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("scores_table", "data"),
|
||||
dash.dependencies.State("csv", "value"),
|
||||
],
|
||||
)
|
||||
def save_scores(data, csv):
|
||||
try:
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
scores.to_csv(csv, index=False)
|
||||
except:
|
||||
return [f"Soucis pour sauvegarder à {datetime.today()} dans {csv}"]
|
||||
else:
|
||||
return [f"Dernière sauvegarde {datetime.today()} dans {csv}"]
|
||||
|
||||
|
||||
def highlight_value(df):
|
||||
""" Cells style """
|
||||
hight = []
|
||||
for v, color in COLORS.items():
|
||||
hight += [
|
||||
{
|
||||
"if": {"filter_query": "{{{}}} = {}".format(col, v), "column_id": col},
|
||||
"backgroundColor": color,
|
||||
"color": "white",
|
||||
}
|
||||
for col in df.columns
|
||||
if col not in NO_ST_COLUMNS.values()
|
||||
]
|
||||
return hight
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("scores_table", "columns"),
|
||||
dash.dependencies.Output("scores_table", "data"),
|
||||
dash.dependencies.Output("scores_table", "style_data_conditional"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("csv", "value"),
|
||||
dash.dependencies.Input("btn_add_element", "n_clicks"),
|
||||
dash.dependencies.State("scores_table", "data"),
|
||||
],
|
||||
)
|
||||
def update_scores_table(csv, add_element, data):
|
||||
ctx = dash.callback_context
|
||||
if ctx.triggered[0]["prop_id"] == "csv.value":
|
||||
stack = pd.read_csv(csv, encoding="UTF8")
|
||||
elif ctx.triggered[0]["prop_id"] == "btn_add_element.n_clicks":
|
||||
stack = pd.DataFrame.from_records(data)
|
||||
infos = pd.DataFrame.from_records(
|
||||
[{k: stack.iloc[-1][k] for k in NO_ST_COLUMNS.values()}]
|
||||
)
|
||||
stack = stack.append(infos)
|
||||
return (
|
||||
[
|
||||
{"id": c, "name": c}
|
||||
for c in stack.columns
|
||||
if c not in ["Trimestre", "Nom", "Date"]
|
||||
],
|
||||
stack.to_dict("records"),
|
||||
highlight_value(stack),
|
||||
)
|
@ -1,29 +0,0 @@
|
||||
import dash_core_components as dcc
|
||||
import dash_html_components as html
|
||||
from dash.dependencies import Input, Output
|
||||
|
||||
from .app import app
|
||||
from .exam_analysis import app as exam_analysis
|
||||
from .create_exam import app as create_exam
|
||||
from .student_analysis import app as student_analysis
|
||||
|
||||
|
||||
app.layout = html.Div(
|
||||
[dcc.Location(id="url", refresh=False), html.Div(id="page-content")]
|
||||
)
|
||||
|
||||
|
||||
@app.callback(Output("page-content", "children"), Input("url", "pathname"))
|
||||
def display_page(pathname):
|
||||
if pathname == "/":
|
||||
return exam_analysis.layout
|
||||
elif pathname == "/create-exam":
|
||||
return create_exam.layout
|
||||
elif pathname == "/students":
|
||||
return student_analysis.layout
|
||||
else:
|
||||
return "404"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run_server(debug=True)
|
@ -1,303 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import dash
|
||||
import dash_html_components as html
|
||||
import dash_core_components as dcc
|
||||
import dash_table
|
||||
import plotly.graph_objects as go
|
||||
from datetime import date, datetime
|
||||
import uuid
|
||||
import pandas as pd
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
|
||||
from ...scripts.getconfig import config
|
||||
from ... import flat_df_students, pp_q_scores
|
||||
from ...config import NO_ST_COLUMNS
|
||||
from ..app import app
|
||||
from ...scripts.exam import Exam
|
||||
|
||||
|
||||
def get_students(csv):
|
||||
return list(pd.read_csv(csv).T.to_dict().values())
|
||||
|
||||
|
||||
COLORS = {
|
||||
".": "black",
|
||||
0: "#E7472B",
|
||||
1: "#FF712B",
|
||||
2: "#F2EC4C",
|
||||
3: "#68D42F",
|
||||
}
|
||||
|
||||
QUESTION_COLUMNS = [
|
||||
{"id": "id", "name": "Question"},
|
||||
{
|
||||
"id": "competence",
|
||||
"name": "Competence",
|
||||
"presentation": "dropdown",
|
||||
},
|
||||
{"id": "theme", "name": "Domaine"},
|
||||
{"id": "comment", "name": "Commentaire"},
|
||||
{"id": "score_rate", "name": "Bareme"},
|
||||
{"id": "is_leveled", "name": "Est_nivele"},
|
||||
]
|
||||
|
||||
layout = html.Div(
|
||||
[
|
||||
html.Header(
|
||||
children=[
|
||||
html.H1("Bilan des élèves"),
|
||||
],
|
||||
),
|
||||
html.Main(
|
||||
children=[
|
||||
html.Section(
|
||||
children=[
|
||||
html.Form(
|
||||
id="select-student",
|
||||
children=[
|
||||
html.Label(
|
||||
children=[
|
||||
"Classe",
|
||||
dcc.Dropdown(
|
||||
id="tribe",
|
||||
options=[
|
||||
{"label": t["name"], "value": t["name"]}
|
||||
for t in config["tribes"]
|
||||
],
|
||||
value=config["tribes"][0]["name"],
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Élève",
|
||||
dcc.Dropdown(
|
||||
id="student",
|
||||
options=[
|
||||
{"label": t["Nom"], "value": t["Nom"]}
|
||||
for t in get_students(config["tribes"][0]["students"])
|
||||
],
|
||||
value=get_students(config["tribes"][0]["students"])[0]["Nom"],
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Trimestre",
|
||||
dcc.Dropdown(
|
||||
id="term",
|
||||
options=[
|
||||
{"label": i + 1, "value": i + 1}
|
||||
for i in range(3)
|
||||
],
|
||||
value=1,
|
||||
),
|
||||
]
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
id="form",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
html.H2("Évaluations"),
|
||||
html.Div(
|
||||
dash_table.DataTable(
|
||||
id="exam_scores",
|
||||
columns=[
|
||||
{"id": "Nom", "name": "Évaluations"},
|
||||
{"id": "Note", "name": "Note"},
|
||||
{"id": "Bareme", "name": "Barème"},
|
||||
],
|
||||
data=[],
|
||||
style_data_conditional=[
|
||||
{
|
||||
"if": {"row_index": "odd"},
|
||||
"backgroundColor": "rgb(248, 248, 248)",
|
||||
}
|
||||
],
|
||||
style_data={
|
||||
"width": "100px",
|
||||
"maxWidth": "100px",
|
||||
"minWidth": "100px",
|
||||
},
|
||||
),
|
||||
id="eval-table",
|
||||
),
|
||||
],
|
||||
id="Évaluations",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
html.Div(
|
||||
id="competences-viz",
|
||||
),
|
||||
html.Div(
|
||||
id="themes-vizz",
|
||||
),
|
||||
],
|
||||
id="visualisation",
|
||||
),
|
||||
]
|
||||
),
|
||||
dcc.Store(id="student-scores"),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("student", "options"),
|
||||
dash.dependencies.Output("student", "value"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("tribe", "value")
|
||||
],)
|
||||
def update_students_list(tribe):
|
||||
tribe_config = [t for t in config["tribes"] if t["name"] == tribe][0]
|
||||
students = get_students(tribe_config["students"])
|
||||
options = [
|
||||
{"label": t["Nom"], "value": t["Nom"]}
|
||||
for t in students
|
||||
]
|
||||
value = students[0]["Nom"]
|
||||
return options, value
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("student-scores", "data"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("tribe", "value"),
|
||||
dash.dependencies.Input("student", "value"),
|
||||
dash.dependencies.Input("term", "value"),
|
||||
],
|
||||
)
|
||||
def update_student_scores(tribe, student, term):
|
||||
tribe_config = [t for t in config["tribes"] if t["name"] == tribe][0]
|
||||
|
||||
p = Path(tribe_config["name"])
|
||||
csvs = list(p.glob("*.csv"))
|
||||
|
||||
dfs = []
|
||||
for csv in csvs:
|
||||
try:
|
||||
scores = pd.read_csv(csv)
|
||||
except pd.errors.ParserError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
if scores.iloc[0]["Commentaire"] == "commentaire":
|
||||
scores.drop([0], inplace=True)
|
||||
except KeyError:
|
||||
pass
|
||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
||||
scores = scores[scores["Eleve"] == student]
|
||||
scores = scores[scores["Trimestre"] == term]
|
||||
dfs.append(scores)
|
||||
|
||||
df = pd.concat(dfs)
|
||||
|
||||
return [df.to_dict("records")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("exam_scores", "data"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("student-scores", "data"),
|
||||
],
|
||||
)
|
||||
def update_exam_scores(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
scores = pp_q_scores(scores)
|
||||
assessment_scores = scores.groupby(["Nom"]).agg({"Note": "sum", "Bareme": "sum"})
|
||||
return [assessment_scores.reset_index().to_dict("records")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("competences-viz", "children"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("student-scores", "data"),
|
||||
],
|
||||
)
|
||||
def update_competences_viz(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
scores = pp_q_scores(scores)
|
||||
pt = pd.pivot_table(
|
||||
scores,
|
||||
index=["Competence"],
|
||||
columns="Score",
|
||||
aggfunc="size",
|
||||
fill_value=0,
|
||||
)
|
||||
fig = go.Figure()
|
||||
bars = [
|
||||
{"score": -1, "name": "Pas de réponse", "color": COLORS["."]},
|
||||
{"score": 0, "name": "Faux", "color": COLORS[0]},
|
||||
{"score": 1, "name": "Peu juste", "color": COLORS[1]},
|
||||
{"score": 2, "name": "Presque juste", "color": COLORS[2]},
|
||||
{"score": 3, "name": "Juste", "color": COLORS[3]},
|
||||
]
|
||||
for b in bars:
|
||||
try:
|
||||
fig.add_bar(
|
||||
x=list(config["competences"].keys()), y=pt[b["score"]], name=b["name"], marker_color=b["color"]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
fig.update_layout(barmode="relative")
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
)
|
||||
return [dcc.Graph(figure=fig)]
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("themes-vizz", "children"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("student-scores", "data"),
|
||||
],
|
||||
)
|
||||
def update_themes_viz(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
scores = pp_q_scores(scores)
|
||||
pt = pd.pivot_table(
|
||||
scores,
|
||||
index=["Domaine"],
|
||||
columns="Score",
|
||||
aggfunc="size",
|
||||
fill_value=0,
|
||||
)
|
||||
fig = go.Figure()
|
||||
bars = [
|
||||
{"score": -1, "name": "Pas de réponse", "color": COLORS["."]},
|
||||
{"score": 0, "name": "Faux", "color": COLORS[0]},
|
||||
{"score": 1, "name": "Peu juste", "color": COLORS[1]},
|
||||
{"score": 2, "name": "Presque juste", "color": COLORS[2]},
|
||||
{"score": 3, "name": "Juste", "color": COLORS[3]},
|
||||
]
|
||||
for b in bars:
|
||||
try:
|
||||
fig.add_bar(
|
||||
x=list(pt.index), y=pt[b["score"]], name=b["name"], marker_color=b["color"]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
fig.update_layout(barmode="relative")
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
)
|
||||
return [dcc.Graph(figure=fig)]
|
||||
|
@ -1,220 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from math import ceil, floor
|
||||
from .config import COLUMNS
|
||||
|
||||
"""
|
||||
Functions for manipulate score dataframes
|
||||
"""
|
||||
|
||||
|
||||
def round_half_point(val):
|
||||
try:
|
||||
return 0.5 * ceil(2.0 * val)
|
||||
except ValueError:
|
||||
return val
|
||||
except TypeError:
|
||||
return val
|
||||
|
||||
|
||||
def score_to_mark(x):
|
||||
"""Compute the mark
|
||||
|
||||
if the item is leveled then the score is multiply by the score_rate
|
||||
otherwise it copies the score
|
||||
|
||||
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
||||
:return: the mark
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> score_to_mark(df.loc[0])
|
||||
1.0
|
||||
>>> score_to_mark(df.loc[10])
|
||||
1.3333333333333333
|
||||
"""
|
||||
# -1 is no answer
|
||||
if x[COLUMNS["score"]] == -1:
|
||||
return 0
|
||||
|
||||
if x[COLUMNS["is_leveled"]]:
|
||||
if x[COLUMNS["score"]] not in [0, 1, 2, 3]:
|
||||
raise ValueError(
|
||||
f"The evaluation is out of range: {x[COLUMNS['score']]} at {x}"
|
||||
)
|
||||
return round(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3, 2)
|
||||
#return round_half_point(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3)
|
||||
|
||||
if x[COLUMNS["score"]] > x[COLUMNS["score_rate"]]:
|
||||
raise ValueError(
|
||||
f"The score ({x['score']}) is greated than the rating scale ({x[COLUMNS['score_rate']]}) at {x}"
|
||||
)
|
||||
return x[COLUMNS["score"]]
|
||||
|
||||
|
||||
def score_to_level(x):
|
||||
"""Compute the level (".",0,1,2,3).
|
||||
|
||||
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
||||
:return: the level
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[1, 0.33, np.nan, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> score_to_level(df.loc[0])
|
||||
3
|
||||
>>> score_to_level(df.loc[1])
|
||||
1
|
||||
>>> score_to_level(df.loc[2])
|
||||
'na'
|
||||
>>> score_to_level(df.loc[3])
|
||||
3
|
||||
>>> score_to_level(df.loc[5])
|
||||
3
|
||||
>>> score_to_level(df.loc[10])
|
||||
2
|
||||
"""
|
||||
# negatives are no answer or negatives points
|
||||
if x[COLUMNS["score"]] <= -1:
|
||||
return np.nan
|
||||
|
||||
if x[COLUMNS["is_leveled"]]:
|
||||
return int(x[COLUMNS["score"]])
|
||||
|
||||
return int(ceil(x[COLUMNS["score"]] / x[COLUMNS["score_rate"]] * 3))
|
||||
|
||||
|
||||
# DataFrame columns manipulations
|
||||
|
||||
|
||||
def compute_mark(df):
|
||||
"""Compute the mark for the dataframe
|
||||
|
||||
apply score_to_mark to each row
|
||||
|
||||
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> compute_mark(df)
|
||||
0 1.00
|
||||
1 0.33
|
||||
2 2.00
|
||||
3 1.50
|
||||
4 0.67
|
||||
5 2.00
|
||||
6 0.67
|
||||
7 1.00
|
||||
8 1.50
|
||||
9 1.00
|
||||
10 1.33
|
||||
11 2.00
|
||||
dtype: float64
|
||||
"""
|
||||
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
|
||||
score_to_mark, axis=1
|
||||
)
|
||||
|
||||
|
||||
def compute_level(df):
|
||||
"""Compute level for the dataframe
|
||||
|
||||
Applies score_to_level to each row
|
||||
|
||||
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
||||
:return: Columns with level
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[np.nan, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> compute_level(df)
|
||||
0 na
|
||||
1 1
|
||||
2 3
|
||||
3 3
|
||||
4 1
|
||||
5 3
|
||||
6 2
|
||||
7 3
|
||||
8 3
|
||||
9 2
|
||||
10 2
|
||||
11 3
|
||||
dtype: object
|
||||
"""
|
||||
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
|
||||
score_to_level, axis=1
|
||||
)
|
||||
|
||||
|
||||
def compute_normalized(df):
|
||||
"""Compute the normalized mark (Mark / score_rate)
|
||||
|
||||
:param df: DataFrame with "Mark" and COLUMNS["score_rate"] columns
|
||||
:return: column with normalized mark
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> df["Mark"] = compute_marks(df)
|
||||
>>> compute_normalized(df)
|
||||
0 1.00
|
||||
1 0.33
|
||||
2 1.00
|
||||
3 0.75
|
||||
4 0.33
|
||||
5 1.00
|
||||
6 0.67
|
||||
7 1.00
|
||||
8 0.75
|
||||
9 0.50
|
||||
10 0.67
|
||||
11 1.00
|
||||
dtype: float64
|
||||
"""
|
||||
return df[COLUMNS["mark"]] / df[COLUMNS["score_rate"]]
|
||||
|
||||
|
||||
# Postprocessing question scores
|
||||
|
||||
|
||||
def pp_q_scores(df):
|
||||
"""Postprocessing questions scores dataframe
|
||||
|
||||
Add 3 columns: mark, level and normalized
|
||||
|
||||
:param df: questions-scores dataframe
|
||||
:return: same data frame with mark, level and normalize columns
|
||||
"""
|
||||
assign = {
|
||||
COLUMNS["mark"]: compute_mark,
|
||||
COLUMNS["level"]: compute_level,
|
||||
COLUMNS["normalized"]: compute_normalized,
|
||||
}
|
||||
return df.assign(**assign)
|
||||
|
||||
|
||||
# -----------------------------
|
||||
# Reglages pour 'vim'
|
||||
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
||||
# cursor: 16 del
|
@ -1,207 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from prompt_toolkit import HTML
|
||||
from ..config import NO_ST_COLUMNS
|
||||
import pandas as pd
|
||||
import yaml
|
||||
from .getconfig import config
|
||||
|
||||
|
||||
def try_parsing_date(text, formats=["%Y-%m-%d", "%Y.%m.%d", "%Y/%m/%d"]):
|
||||
for fmt in formats:
|
||||
try:
|
||||
return datetime.strptime(text[:10], fmt)
|
||||
except ValueError:
|
||||
pass
|
||||
raise ValueError("no valid date format found")
|
||||
|
||||
|
||||
def format_question(question):
|
||||
question["score_rate"] = float(question["score_rate"])
|
||||
return question
|
||||
|
||||
|
||||
class Exam:
|
||||
def __init__(self, name, tribename, date, term, **kwrds):
|
||||
self._name = name
|
||||
self._tribename = tribename
|
||||
|
||||
self._date = try_parsing_date(date)
|
||||
|
||||
self._term = term
|
||||
|
||||
try:
|
||||
kwrds["exercices"]
|
||||
except KeyError:
|
||||
self._exercises = {}
|
||||
else:
|
||||
self._exercises = kwrds["exercices"]
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def tribename(self):
|
||||
return self._tribename
|
||||
|
||||
@property
|
||||
def date(self):
|
||||
return self._date
|
||||
|
||||
@property
|
||||
def term(self):
|
||||
return self._term
|
||||
|
||||
def add_exercise(self, name, questions):
|
||||
""" Add key with questions in ._exercises """
|
||||
try:
|
||||
self._exercises[name]
|
||||
except KeyError:
|
||||
self._exercises[name] = format_question(questions)
|
||||
else:
|
||||
raise KeyError("The exercise already exsists. Use modify_exercise")
|
||||
|
||||
def modify_exercise(self, name, questions, append=False):
|
||||
"""Modify questions of an exercise
|
||||
|
||||
If append==True, add questions to the exercise questions
|
||||
|
||||
"""
|
||||
try:
|
||||
self._exercises[name]
|
||||
except KeyError:
|
||||
raise KeyError("The exercise already exsists. Use modify_exercise")
|
||||
else:
|
||||
if append:
|
||||
self._exercises[name] += format_question(questions)
|
||||
else:
|
||||
self._exercises[name] = format_question(questions)
|
||||
|
||||
@property
|
||||
def exercices(self):
|
||||
return self._exercises
|
||||
|
||||
@property
|
||||
def tribe_path(self):
|
||||
return Path(config["source"]) / self.tribename
|
||||
|
||||
@property
|
||||
def tribe_student_path(self):
|
||||
return (
|
||||
Path(config["source"])
|
||||
/ [t["students"] for t in config["tribes"] if t["name"] == self.tribename][
|
||||
0
|
||||
]
|
||||
)
|
||||
|
||||
@property
|
||||
def long_name(self):
|
||||
""" Get exam name with date inside """
|
||||
return f"{self.date.strftime('%y%m%d')}_{self.name}"
|
||||
|
||||
def path(self, extention=""):
|
||||
return self.tribe_path / (self.long_name + extention)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"name": self.name,
|
||||
"tribename": self.tribename,
|
||||
"date": self.date,
|
||||
"term": self.term,
|
||||
"exercices": self.exercices,
|
||||
}
|
||||
|
||||
def to_row(self):
|
||||
rows = []
|
||||
for ex, questions in self.exercices.items():
|
||||
for q in questions:
|
||||
rows.append(
|
||||
{
|
||||
"term": self.term,
|
||||
"assessment": self.name,
|
||||
"date": self.date.strftime("%d/%m/%Y"),
|
||||
"exercise": ex,
|
||||
"question": q["id"],
|
||||
**q,
|
||||
}
|
||||
)
|
||||
return rows
|
||||
|
||||
@property
|
||||
def themes(self):
|
||||
themes = set()
|
||||
for questions in self._exercises.values():
|
||||
themes.update([q["theme"] for q in questions])
|
||||
return themes
|
||||
|
||||
def display_exercise(self, name):
|
||||
pass
|
||||
|
||||
def display(self, name):
|
||||
pass
|
||||
|
||||
def write_yaml(self):
|
||||
print(f"Sauvegarde temporaire dans {self.path('.yml')}")
|
||||
self.tribe_path.mkdir(exist_ok=True)
|
||||
with open(self.path(".yml"), "w") as f:
|
||||
f.write(yaml.dump(self.to_dict()))
|
||||
|
||||
def write_csv(self):
|
||||
rows = self.to_row()
|
||||
|
||||
base_df = pd.DataFrame.from_dict(rows)[NO_ST_COLUMNS.keys()]
|
||||
base_df.rename(columns=NO_ST_COLUMNS, inplace=True)
|
||||
|
||||
students = pd.read_csv(self.tribe_student_path)["Nom"]
|
||||
for student in students:
|
||||
base_df[student] = ""
|
||||
|
||||
self.tribe_path.mkdir(exist_ok=True)
|
||||
base_df.to_csv(self.path(".csv"), index=False)
|
||||
|
||||
@property
|
||||
def score_rate(self):
|
||||
total = 0
|
||||
for ex, questions in self._exercises.items():
|
||||
total += sum([q["score_rate"] for q in questions])
|
||||
|
||||
return total
|
||||
|
||||
@property
|
||||
def competences_rate(self):
|
||||
""" Dictionnary with competences as key and total rate as value"""
|
||||
rates = {}
|
||||
for ex, questions in self._exercises.items():
|
||||
for q in questions:
|
||||
try:
|
||||
q["competence"]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
rates[q["competence"]] += q["score_rate"]
|
||||
except KeyError:
|
||||
rates[q["competence"]] = q["score_rate"]
|
||||
return rates
|
||||
|
||||
@property
|
||||
def themes_rate(self):
|
||||
""" Dictionnary with themes as key and total rate as value"""
|
||||
rates = {}
|
||||
for ex, questions in self._exercises.items():
|
||||
for q in questions:
|
||||
try:
|
||||
q["theme"]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
if q["theme"]:
|
||||
try:
|
||||
rates[q["theme"]] += q["score_rate"]
|
||||
except KeyError:
|
||||
rates[q["theme"]] = q["score_rate"]
|
||||
return rates
|
@ -1,9 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
import yaml
|
||||
|
||||
CONFIGPATH = "recoconfig.yml"
|
||||
|
||||
with open(CONFIGPATH, "r") as config:
|
||||
config = yaml.load(config, Loader=yaml.FullLoader)
|
||||
|
@ -1,233 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
|
||||
from prompt_toolkit import prompt, HTML, ANSI
|
||||
from prompt_toolkit import print_formatted_text as print
|
||||
from prompt_toolkit.styles import Style
|
||||
from prompt_toolkit.validation import Validator
|
||||
from prompt_toolkit.completion import WordCompleter
|
||||
from unidecode import unidecode
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
import sys
|
||||
|
||||
from .getconfig import config
|
||||
|
||||
|
||||
VALIDATE = [
|
||||
"o",
|
||||
"ok",
|
||||
"OK",
|
||||
"oui",
|
||||
"OUI",
|
||||
"yes",
|
||||
"YES",
|
||||
]
|
||||
REFUSE = ["n", "non", "NON", "no", "NO"]
|
||||
CANCEL = ["a", "annuler"]
|
||||
|
||||
STYLE = Style.from_dict(
|
||||
{
|
||||
"": "#93A1A1",
|
||||
"validation": "#884444",
|
||||
"appending": "#448844",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class CancelError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def prompt_validate(question, cancelable=False, empty_means=1, style="validation"):
|
||||
"""Prompt for validation
|
||||
|
||||
:param question: Text to print to ask the question.
|
||||
:param cancelable: enable cancel answer
|
||||
:param empty_means: result for no answer
|
||||
:return:
|
||||
0 -> Refuse
|
||||
1 -> Validate
|
||||
-1 -> cancel
|
||||
"""
|
||||
question_ = question
|
||||
choices = VALIDATE + REFUSE
|
||||
|
||||
if cancelable:
|
||||
question_ += "(a ou annuler pour sortir)"
|
||||
choices += CANCEL
|
||||
|
||||
ans = prompt(
|
||||
[
|
||||
(f"class:{style}", question_),
|
||||
],
|
||||
completer=WordCompleter(choices),
|
||||
style=STYLE,
|
||||
).lower()
|
||||
|
||||
if ans == "":
|
||||
return empty_means
|
||||
if ans in VALIDATE:
|
||||
return 1
|
||||
if cancelable and ans in CANCEL:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
|
||||
def prompt_until_validate(question="C'est ok? ", cancelable=False):
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwrd):
|
||||
ans = func(*args, **kwrd)
|
||||
|
||||
confirm = prompt_validate(question, cancelable)
|
||||
|
||||
if confirm == -1:
|
||||
raise CancelError
|
||||
|
||||
while not confirm:
|
||||
sys.stdout.flush()
|
||||
ans = func(*args, **ans, **kwrd)
|
||||
confirm = prompt_validate(question, cancelable)
|
||||
if confirm == -1:
|
||||
raise CancelError
|
||||
return ans
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@prompt_until_validate()
|
||||
def prompt_exam(**kwrd):
|
||||
""" Prompt questions to edit an exam """
|
||||
print(HTML("<b>Nouvelle évaluation</b>"))
|
||||
exam = {}
|
||||
exam["name"] = prompt("Nom de l'évaluation: ", default=kwrd.get("name", "DS"))
|
||||
|
||||
tribes_name = [t["name"] for t in config["tribes"]]
|
||||
|
||||
exam["tribename"] = prompt(
|
||||
"Nom de la classe: ",
|
||||
default=kwrd.get("tribename", ""),
|
||||
completer=WordCompleter(tribes_name),
|
||||
validator=Validator.from_callable(lambda x: x in tribes_name),
|
||||
)
|
||||
exam["tribe"] = [t for t in config["tribes"] if t["name"] == exam["tribename"]][0]
|
||||
|
||||
exam["date"] = prompt(
|
||||
"Date de l'évaluation (%y%m%d): ",
|
||||
default=kwrd.get("date", datetime.today()).strftime("%y%m%d"),
|
||||
validator=Validator.from_callable(lambda x: (len(x) == 6) and x.isdigit()),
|
||||
)
|
||||
exam["date"] = datetime.strptime(exam["date"], "%y%m%d")
|
||||
|
||||
exam["term"] = prompt(
|
||||
"Trimestre: ",
|
||||
validator=Validator.from_callable(lambda x: x.isdigit()),
|
||||
default=kwrd.get("term", "1"),
|
||||
)
|
||||
|
||||
return exam
|
||||
|
||||
|
||||
@prompt_until_validate()
|
||||
def prompt_exercise(number=1, completer={}, **kwrd):
|
||||
exercise = {}
|
||||
try:
|
||||
kwrd["name"]
|
||||
except KeyError:
|
||||
print(HTML("<b>Nouvel exercice</b>"))
|
||||
exercise["name"] = prompt(
|
||||
"Nom de l'exercice: ", default=kwrd.get("name", f"Exercice {number}")
|
||||
)
|
||||
else:
|
||||
print(HTML(f"<b>Modification de l'exercice: {kwrd['name']}</b>"))
|
||||
exercise["name"] = kwrd["name"]
|
||||
|
||||
exercise["questions"] = []
|
||||
|
||||
try:
|
||||
kwrd["questions"][0]
|
||||
except KeyError:
|
||||
last_question_id = "1a"
|
||||
except IndexError:
|
||||
last_question_id = "1a"
|
||||
else:
|
||||
for ques in kwrd["questions"]:
|
||||
try:
|
||||
exercise["questions"].append(
|
||||
prompt_question(completer=completer, **ques)
|
||||
)
|
||||
except CancelError:
|
||||
print("Cette question a été supprimée")
|
||||
last_question_id = exercise["questions"][-1]["id"]
|
||||
|
||||
appending = prompt_validate(
|
||||
question="Ajouter un élément de notation? ", style="appending"
|
||||
)
|
||||
while appending:
|
||||
try:
|
||||
exercise["questions"].append(
|
||||
prompt_question(last_question_id, completer=completer)
|
||||
)
|
||||
except CancelError:
|
||||
print("Cette question a été supprimée")
|
||||
else:
|
||||
last_question_id = exercise["questions"][-1]["id"]
|
||||
appending = prompt_validate(
|
||||
question="Ajouter un élément de notation? ", style="appending"
|
||||
)
|
||||
|
||||
return exercise
|
||||
|
||||
|
||||
@prompt_until_validate(cancelable=True)
|
||||
def prompt_question(last_question_id="1a", completer={}, **kwrd):
|
||||
try:
|
||||
kwrd["id"]
|
||||
except KeyError:
|
||||
print(HTML("<b>Nouvel élément de notation</b>"))
|
||||
else:
|
||||
print(
|
||||
HTML(f"<b>Modification de l'élément {kwrd['id']} ({kwrd['comment']})</b>")
|
||||
)
|
||||
|
||||
question = {}
|
||||
question["id"] = prompt(
|
||||
"Identifiant de la question: ",
|
||||
default=kwrd.get("id", "1a"),
|
||||
)
|
||||
|
||||
question["competence"] = prompt(
|
||||
"Competence: ",
|
||||
default=kwrd.get("competence", list(config["competences"].keys())[0]),
|
||||
completer=WordCompleter(config["competences"].keys()),
|
||||
validator=Validator.from_callable(lambda x: x in config["competences"].keys()),
|
||||
)
|
||||
|
||||
question["theme"] = prompt(
|
||||
"Domaine: ",
|
||||
default=kwrd.get("theme", ""),
|
||||
completer=WordCompleter(completer.get("theme", [])),
|
||||
)
|
||||
|
||||
question["comment"] = prompt(
|
||||
"Commentaire: ",
|
||||
default=kwrd.get("comment", ""),
|
||||
)
|
||||
|
||||
question["is_leveled"] = prompt(
|
||||
"Évaluation par niveau: ",
|
||||
default=kwrd.get("is_leveled", "1"),
|
||||
# validate
|
||||
)
|
||||
|
||||
question["score_rate"] = prompt(
|
||||
"Barème: ",
|
||||
default=kwrd.get("score_rate", "1"),
|
||||
# validate
|
||||
)
|
||||
|
||||
return question
|
@ -1,134 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import click
|
||||
from pathlib import Path
|
||||
import sys
|
||||
import papermill as pm
|
||||
import pandas as pd
|
||||
from datetime import datetime
|
||||
import yaml
|
||||
|
||||
from .getconfig import config, CONFIGPATH
|
||||
from .prompts import prompt_exam, prompt_exercise, prompt_validate
|
||||
from ..config import NO_ST_COLUMNS
|
||||
from .exam import Exam
|
||||
from ..dashboard.index import app as dash
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
def print_config():
|
||||
click.echo(f"Config file is {CONFIGPATH}")
|
||||
click.echo("It contains")
|
||||
click.echo(config)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def setup():
|
||||
"""Setup the environnement using recoconfig.yml"""
|
||||
for tribe in config["tribes"]:
|
||||
Path(tribe["name"]).mkdir(exist_ok=True)
|
||||
if not Path(tribe["students"]).exists():
|
||||
print(f"The file {tribe['students']} does not exists")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def new_exam():
|
||||
""" Create new exam csv file """
|
||||
exam = Exam(**prompt_exam())
|
||||
|
||||
if exam.path(".yml").exists():
|
||||
print(f"Fichier sauvegarde trouvé à {exam.path('.yml')} -- importation")
|
||||
with open(exam.path(".yml"), "r") as f:
|
||||
for name, questions in yaml.load(f, Loader=yaml.SafeLoader)[
|
||||
"exercices"
|
||||
].items():
|
||||
exam.add_exercise(name, questions)
|
||||
|
||||
print(exam.themes)
|
||||
# print(yaml.dump(exam.to_dict()))
|
||||
|
||||
exam.write()
|
||||
|
||||
for name, questions in exam.exercices.items():
|
||||
exam.modify_exercise(
|
||||
**prompt_exercise(
|
||||
name=name, completer={"theme": exam.themes}, questions=questions
|
||||
)
|
||||
)
|
||||
exam.write()
|
||||
|
||||
new_exercise = prompt_validate("Ajouter un exercice? ")
|
||||
while new_exercise:
|
||||
exam.add_exercise(
|
||||
**prompt_exercise(len(exam.exercices) + 1, completer={"theme": exam.themes})
|
||||
)
|
||||
exam.write()
|
||||
new_exercise = prompt_validate("Ajouter un exercice? ")
|
||||
|
||||
rows = exam.to_row()
|
||||
|
||||
base_df = pd.DataFrame.from_dict(rows)[NO_ST_COLUMNS.keys()]
|
||||
base_df.rename(columns=NO_ST_COLUMNS, inplace=True)
|
||||
|
||||
students = pd.read_csv(exam.tribe_student_path)["Nom"]
|
||||
for student in students:
|
||||
base_df[student] = ""
|
||||
|
||||
exam.tribe_path.mkdir(exist_ok=True)
|
||||
|
||||
base_df.to_csv(exam.path(".csv"), index=False)
|
||||
print(f"Le fichier note a été enregistré à {exam.path('.csv')}")
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--debug", default=0, help="Debug mode for dash")
|
||||
def dashboard(debug):
|
||||
dash.run_server(debug=bool(debug))
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("csv_file")
|
||||
def report(csv_file):
|
||||
csv = Path(csv_file)
|
||||
if not csv.exists():
|
||||
click.echo(f"{csv_file} does not exists")
|
||||
sys.exit(1)
|
||||
if csv.suffix != ".csv":
|
||||
click.echo(f"{csv_file} has to be a csv file")
|
||||
sys.exit(1)
|
||||
|
||||
csv_file = Path(csv_file)
|
||||
tribe_dir = csv_file.parent
|
||||
csv_filename = csv_file.name.split(".")[0]
|
||||
|
||||
assessment = str(csv_filename).split("_")[-1].capitalize()
|
||||
date = str(csv_filename).split("_")[0]
|
||||
try:
|
||||
date = datetime.strptime(date, "%y%m%d")
|
||||
except ValueError:
|
||||
date = None
|
||||
|
||||
tribe = str(tribe_dir).split("/")[-1]
|
||||
|
||||
template = Path(config["templates"]) / "tpl_evaluation.ipynb"
|
||||
|
||||
dest = Path(config["output"]) / tribe / csv_filename
|
||||
dest.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
click.echo(f"Building {assessment} ({date:%d/%m/%y}) report")
|
||||
pm.execute_notebook(
|
||||
str(template),
|
||||
str(dest / f"{assessment}.ipynb"),
|
||||
parameters=dict(
|
||||
tribe=tribe,
|
||||
assessment=assessment,
|
||||
date=f"{date:%d/%m/%y}",
|
||||
csv_file=str(csv_file.absolute()),
|
||||
),
|
||||
)
|
82
recopytex/store/__init__.py
Normal file
82
recopytex/store/__init__.py
Normal file
@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
import yaml
|
||||
|
||||
"""
|
||||
|
||||
Adapter to pull data from the filesystem
|
||||
|
||||
# Loader
|
||||
|
||||
# Writer
|
||||
"""
|
||||
|
||||
|
||||
class Loader(ABC):
|
||||
|
||||
"""Load data from source"""
|
||||
|
||||
def __init__(self, configfile="recoconfig.yml"):
|
||||
"""Init loader
|
||||
|
||||
:param configfile: yaml file with informations on data source
|
||||
|
||||
"""
|
||||
with open(CONFIGPATH, "r") as config:
|
||||
sefl._config = yaml.load(config, Loader=yaml.FullLoader)
|
||||
|
||||
@abstractmethod
|
||||
def get_tribes(self):
|
||||
""" Get tribes list """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_exams(self, tribes=[]):
|
||||
"""Get exams list
|
||||
|
||||
:param tribes: get only exams for those tribes
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_students(self, tribes=[]):
|
||||
"""Get student list
|
||||
|
||||
:param filters: list of filters
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_exam_questions(self, exams=[]):
|
||||
"""Get questions for the exam
|
||||
|
||||
:param exams: questions for those exams only
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_questions_scores(self, questions=[]):
|
||||
"""Get scores of those questions
|
||||
|
||||
:param questions: score for those questions
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_student_scores(self, student):
|
||||
"""Get scores of the student
|
||||
|
||||
:param student:
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Writer(ABC):
|
||||
|
||||
""" Write datas to the source """
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self):
|
||||
pass
|
15
recopytex/store/filesystem/__init__.py
Normal file
15
recopytex/store/filesystem/__init__.py
Normal file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""
|
||||
Store data using filesystem for organisation, csv for scores
|
||||
|
||||
## Organisation
|
||||
|
||||
- tribe1.csv # list of students for the tribe
|
||||
- tribe1/
|
||||
- exam1.csv # questions and scores for exam1
|
||||
- exam1.yml # Extra information about exam1
|
||||
- exam2.csv # questions and scores for exam2
|
||||
"""
|
||||
|
33
recopytex/store/filesystem/lib.py
Normal file
33
recopytex/store/filesystem/lib.py
Normal file
@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
__all__ = ["list_csvs", "extract_exam"]
|
||||
|
||||
|
||||
def list_csvs(path):
|
||||
""" list csv files in path """
|
||||
return list(Path(path).glob("*.csv"))
|
||||
|
||||
|
||||
def extract_fields(csv_filename, fields=[], remove_duplicates=True):
|
||||
"""Extract fields in csv
|
||||
|
||||
:param csv_filename: csv filename (with header)
|
||||
:param fields: list of fields to extract (all fields if empty list - default)
|
||||
:param remove_duplicates: keep uniques rows (default True)
|
||||
|
||||
:example:
|
||||
>>> extract_fields("./example/Tribe1/210122_DS6.csv", ["Trimestre", "Nom", "Date"])
|
||||
Trimestre Nom Date
|
||||
0 1 DS6 22/01/2021
|
||||
"""
|
||||
df = pd.read_csv(csv_filename)
|
||||
if fields:
|
||||
df = df[fields]
|
||||
if remove_duplicates:
|
||||
return df.drop_duplicates()
|
||||
return df
|
126
recopytex/store/filesystem/loader.py
Normal file
126
recopytex/store/filesystem/loader.py
Normal file
@ -0,0 +1,126 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from .. import Loader
|
||||
import yaml
|
||||
|
||||
|
||||
def list_csvs(path):
|
||||
""" list csv files in path """
|
||||
pass
|
||||
|
||||
|
||||
class CSVLoader(Loader):
|
||||
|
||||
"""Loader when scores and metadatas are stored in csv files
|
||||
|
||||
## configfile (`recoconfig.yml` by default
|
||||
|
||||
source: ./ # basepath where to start (default value)
|
||||
templates: # directory where templates are stored
|
||||
|
||||
tribes: # All the tribes (required)
|
||||
Tribe1: # Tribe name
|
||||
directory: tribe1 # tribe directory
|
||||
type: type1 # Type of tribe (2nd, 1, T...)
|
||||
students: tribe1.csv # csv with infos on students
|
||||
|
||||
competences: # Competences (default values)
|
||||
Chercher:
|
||||
name: Chercher
|
||||
abrv: Cher
|
||||
Représenter:
|
||||
name: Représenter
|
||||
abrv: Rep
|
||||
Modéliser:
|
||||
name: Modéliser
|
||||
abrv: Mod
|
||||
Raisonner:
|
||||
name: Raisonner
|
||||
abrv: Rai
|
||||
Calculer:
|
||||
name: Calculer
|
||||
abrv: Cal
|
||||
Communiquer:
|
||||
name: Communiquer
|
||||
abrv: Com
|
||||
|
||||
valid_scores: # (default values)
|
||||
BAD: 0 # Everything is bad
|
||||
FEW: 1 # Few good things
|
||||
NEARLY: 2 # Nearly good but things are missing
|
||||
GOOD: 3 # Everything is good
|
||||
NOTFILLED: # The item is not scored yet
|
||||
NOANSWER: . # Student gives no answer (count as 0)
|
||||
ABS: "a" # Student has absent (this score won't be impact the final mark)
|
||||
|
||||
csv_fields: # dataframe_field: csv_field (default values)
|
||||
term: Trimestre,
|
||||
exam: Nom,
|
||||
date: Date,
|
||||
exercise: Exercice,
|
||||
question: Question,
|
||||
competence: Competence,
|
||||
theme: Domaine,
|
||||
comment: Commentaire,
|
||||
score_rate: Bareme,
|
||||
is_leveled: Est_nivele,
|
||||
"""
|
||||
|
||||
def get_config(self):
|
||||
""" Get config"""
|
||||
return self._config
|
||||
|
||||
def get_tribes(self, only_names=False):
|
||||
""" Get tribes list """
|
||||
if only_names:
|
||||
return list(self._config["tribes"].keys())
|
||||
return self._config["tribes"]
|
||||
|
||||
def get_exams(self, tribes=[]):
|
||||
"""Get exams list
|
||||
|
||||
:param tribes: get only exams for those tribes
|
||||
:return: list of dictionaries of exams (fields: `["name", "tribe", "date", "term"])
|
||||
"""
|
||||
exams = []
|
||||
for tribe in tribes:
|
||||
csvs = list_csvs()
|
||||
for csv in csvs:
|
||||
fields = [
|
||||
self._config["csv_fields"][k] for k in ["exam", "date", "term"]
|
||||
]
|
||||
exam = extract_fields(csv, fields)
|
||||
exam.rename(columns=self._config["csv_fields"], inplace=True).rename(
|
||||
columns={"exam": "name"}, inplace=True
|
||||
)
|
||||
|
||||
return df.concate(exams)
|
||||
|
||||
def get_students(self, tribes=[]):
|
||||
"""Get student list
|
||||
|
||||
:param filters: list of filters
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_exam_questions(self, exams=[]):
|
||||
"""Get questions for the exam
|
||||
|
||||
:param exams: questions for those exams only
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_questions_scores(self, questions=[]):
|
||||
"""Get scores of those questions
|
||||
|
||||
:param questions: score for those questions
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_student_scores(self, student):
|
||||
"""Get scores of the student
|
||||
|
||||
:param student:
|
||||
"""
|
||||
pass
|
7
recopytex/store/filesystem/writer.py
Normal file
7
recopytex/store/filesystem/writer.py
Normal file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""
|
||||
|
||||
"""
|
||||
|
Loading…
Reference in New Issue
Block a user