Compare commits
82 Commits
894ebc4ec8
...
refact
Author | SHA1 | Date | |
---|---|---|---|
83eb9c327b | |||
ff1ecfef25 | |||
921292a0d2 | |||
2d08671247 | |||
a16211cbd4 | |||
876f583d51 | |||
97b97af2de | |||
d8d84690c6 | |||
18f855ab83 | |||
36425e587e | |||
8cdeecfc53 | |||
1a7c97d869 | |||
ab5de2711e | |||
235019102b | |||
8ec24a24b3 | |||
2e86b3a0a2 | |||
7e6b24aaea | |||
6889ddd97c | |||
10b9954c05 | |||
7553628306 | |||
effc049578 | |||
411f910ce6 | |||
00d81d694a | |||
a8b2ac455d | |||
9e0ea14d05 | |||
2031ade1ab | |||
6ed55c07d4 | |||
1d234ea5fc | |||
646314ad88 | |||
0739cfdae7 | |||
a50901556e | |||
abc5513268 | |||
598086ddb0 | |||
94f8080acd | |||
6331573940 | |||
a292fe23e0 | |||
3005d49a1d | |||
9fce390304 | |||
94f94dae84 | |||
596a52b1d0 | |||
37c95d75de | |||
bd91bf51d6 | |||
c1fd060707 | |||
a0e94f52b1 | |||
c84f9845b2 | |||
d9e95f2186 | |||
581b0f4f2f | |||
3dbfc85447 | |||
b5bf1ac137 | |||
74d751a586 | |||
1855d4016d | |||
ff94470fb4 | |||
d322452a6e | |||
e1d3940e9d | |||
7dba11996a | |||
3250a600c9 | |||
589d63ff29 | |||
429fed6a1e | |||
1255bf4b9e | |||
1fe7665753 | |||
e08e4a32a8 | |||
b737612adb | |||
9c19e2ac56 | |||
eb60734c26 | |||
329bcc460c | |||
95fc842c1d | |||
e0ca1a458b | |||
eb1abbe868 | |||
412e624791 | |||
e8bf0b3f0a | |||
c057fa11e7 | |||
e15119605f | |||
494567cdb5 | |||
84fcee625d | |||
f62c898162 | |||
7955b989b4 | |||
4f14e3518c | |||
4bf8f4003e | |||
a14d47b15c | |||
09ac9f01f8 | |||
0a5a931d01 | |||
21397272c9 |
5
example/Tribe1/210112_DS.csv
Normal file
5
example/Tribe1/210112_DS.csv
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
Trimestre,Nom,Date,Exercice,Question,Competence,Domaine,Commentaire,Bareme,Est_nivele,Star Tice,Umberto Dingate,Starlin Crangle,Humbert Bourcq,Gabriella Handyside,Stewart Eaves,Erick Going,Ase Praton,Rollins Planks,Dunstan Sarjant,Stacy Guiton,Ange Stanes,Amabelle Elleton,Darn Broomhall,Dyan Chatto,Keane Rennebach,Nari Paulton,Brandy Wase,Jaclyn Firidolfi,Violette Lockney
|
||||||
|
1,DS,12/01/2021,Exercice 1,1,Calculer,Plop,Coucou,1,1,,,1,0,1,2,3,0,3,3,2,,1,,,,,,,
|
||||||
|
1,DS,12/01/2021,Exercice 1,2,Calculer,C'est trop chouette!,Coucou,1,1,,,1,2,,,3,3,,,,,2,,,,,,,
|
||||||
|
1,DS,12/01/2021,Exercice 1,3,Calculer,Null,Coucou,1,1,,,,3,2,,,,,,,,3,,,,,,,
|
||||||
|
1,DS,12/01/2021,Exercice 1,3,Calculer,Nié,DChic,1,1,,,,2,.,,,,,,,,,,,,,,,
|
|
5
example/Tribe1/210122_DS6.csv
Normal file
5
example/Tribe1/210122_DS6.csv
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
Trimestre,Nom,Date,Exercice,Question,Competence,Domaine,Commentaire,Bareme,Est_nivele,Star Tice,Umberto Dingate,Starlin Crangle,Humbert Bourcq,Gabriella Handyside,Stewart Eaves,Erick Going,Ase Praton,Rollins Planks,Dunstan Sarjant,Stacy Guiton,Ange Stanes,Amabelle Elleton,Darn Broomhall,Dyan Chatto,Keane Rennebach,Nari Paulton,Brandy Wase,Jaclyn Firidolfi,Violette Lockney
|
||||||
|
1,DS6,22/01/2021,Exercice 1,Sait pas,,,,,,,,,,,,,,,,,,,,,,,,,
|
||||||
|
1,DS6,22/01/2021,Exercice 1,Ha,,,,,,,,,,,,,,,,,,,,,,,,,
|
||||||
|
1,DS6,22/01/2021,Exercice 1,,,,,,,,,,,,,,,,,,,,,,,,,,
|
||||||
|
1,DS6,22/01/2021,Exercice 2,grr,,,,,,,,,,,,,,,,,,,,,,,,,
|
|
@@ -3,30 +3,11 @@ source: ./
|
|||||||
output: ./
|
output: ./
|
||||||
templates: templates/
|
templates: templates/
|
||||||
|
|
||||||
competences:
|
|
||||||
Calculer:
|
|
||||||
name: Calculer
|
|
||||||
abrv: Cal
|
|
||||||
Représenter:
|
|
||||||
name: Représenter
|
|
||||||
abrv: Rep
|
|
||||||
Modéliser:
|
|
||||||
name: Modéliser
|
|
||||||
abrv: Mod
|
|
||||||
Raisonner:
|
|
||||||
name: Raisonner
|
|
||||||
abrv: Rai
|
|
||||||
Calculer:
|
|
||||||
name: Calculer
|
|
||||||
abrv: Cal
|
|
||||||
Communiquer:
|
|
||||||
name: Communiquer
|
|
||||||
abrv: Com
|
|
||||||
|
|
||||||
|
|
||||||
tribes:
|
tribes:
|
||||||
- name: Tribe1
|
Tribe1:
|
||||||
type: Type1
|
name: Tribe1
|
||||||
students: tribe1.csv
|
type: Type1
|
||||||
- name: Tribe2
|
students: tribe1.csv
|
||||||
students: tribe2.csv
|
Tribe2:
|
||||||
|
name: Tribe2
|
||||||
|
students: tribe2.csv
|
||||||
|
@@ -1,4 +0,0 @@
|
|||||||
---
|
|
||||||
source: sheets/
|
|
||||||
output: reports/
|
|
||||||
templates: templates/
|
|
@@ -1,5 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from .csv_extraction import flat_df_students, flat_df_for
|
|
||||||
from .df_marks_manip import pp_q_scores
|
|
||||||
|
@@ -1,30 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
NO_ST_COLUMNS = {
|
|
||||||
"term": "Trimestre",
|
|
||||||
"assessment": "Nom",
|
|
||||||
"date": "Date",
|
|
||||||
"exercise": "Exercice",
|
|
||||||
"question": "Question",
|
|
||||||
"competence": "Competence",
|
|
||||||
"theme": "Domaine",
|
|
||||||
"comment": "Commentaire",
|
|
||||||
"score_rate": "Bareme",
|
|
||||||
"is_leveled": "Est_nivele",
|
|
||||||
}
|
|
||||||
|
|
||||||
COLUMNS = {
|
|
||||||
**NO_ST_COLUMNS,
|
|
||||||
"student": "Eleve",
|
|
||||||
"score": "Score",
|
|
||||||
"mark": "Note",
|
|
||||||
"level": "Niveau",
|
|
||||||
"normalized": "Normalise",
|
|
||||||
}
|
|
||||||
|
|
||||||
VALIDSCORE = {
|
|
||||||
"NOTFILLED": "", # The item is not scored yet
|
|
||||||
"NOANSWER": ".", # Student gives no answer (this score will impact the fianl mark)
|
|
||||||
"ABS": "a", # Student has absent (this score won't be impact the final mark)
|
|
||||||
}
|
|
@@ -1,119 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
""" Extracting data from xlsx files """
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
from .config import NO_ST_COLUMNS, COLUMNS, VALIDSCORE
|
|
||||||
|
|
||||||
pd.set_option("Precision", 2)
|
|
||||||
|
|
||||||
|
|
||||||
def try_replace(x, old, new):
|
|
||||||
try:
|
|
||||||
return str(x).replace(old, new)
|
|
||||||
except ValueError:
|
|
||||||
return x
|
|
||||||
|
|
||||||
|
|
||||||
def extract_students(df, no_student_columns=NO_ST_COLUMNS.values()):
|
|
||||||
"""Extract the list of students from df
|
|
||||||
|
|
||||||
:param df: the dataframe
|
|
||||||
:param no_student_columns: columns that are not students
|
|
||||||
:return: list of students
|
|
||||||
"""
|
|
||||||
students = df.columns.difference(no_student_columns)
|
|
||||||
return students
|
|
||||||
|
|
||||||
|
|
||||||
def flat_df_students(
|
|
||||||
df, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
|
||||||
):
|
|
||||||
"""Flat the dataframe by returning a dataframe with on student on each line
|
|
||||||
|
|
||||||
:param df: the dataframe (one row per questions)
|
|
||||||
:param no_student_columns: columns that are not students
|
|
||||||
:return: dataframe with one row per questions and students
|
|
||||||
|
|
||||||
Columns of csv files:
|
|
||||||
|
|
||||||
- NO_ST_COLUMNS meta data on questions
|
|
||||||
- one for each students
|
|
||||||
|
|
||||||
This function flat student's columns to "student" and "score"
|
|
||||||
"""
|
|
||||||
students = extract_students(df, no_student_columns)
|
|
||||||
scores = []
|
|
||||||
for st in students:
|
|
||||||
scores.append(
|
|
||||||
pd.melt(
|
|
||||||
df,
|
|
||||||
id_vars=no_student_columns,
|
|
||||||
value_vars=st,
|
|
||||||
var_name=COLUMNS["student"],
|
|
||||||
value_name=COLUMNS["score"],
|
|
||||||
).dropna(subset=[COLUMNS["score"]])
|
|
||||||
)
|
|
||||||
if postprocessing:
|
|
||||||
return postprocess(pd.concat(scores))
|
|
||||||
return pd.concat(scores)
|
|
||||||
|
|
||||||
|
|
||||||
def flat_df_for(
|
|
||||||
df, student, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
|
||||||
):
|
|
||||||
"""Extract the data only for one student
|
|
||||||
|
|
||||||
:param df: the dataframe (one row per questions)
|
|
||||||
:param no_student_columns: columns that are not students
|
|
||||||
:return: dataframe with one row per questions and students
|
|
||||||
|
|
||||||
Columns of csv files:
|
|
||||||
|
|
||||||
- NO_ST_COLUMNS meta data on questions
|
|
||||||
- one for each students
|
|
||||||
|
|
||||||
"""
|
|
||||||
students = extract_students(df, no_student_columns)
|
|
||||||
if student not in students:
|
|
||||||
raise KeyError("This student is not in the table")
|
|
||||||
st_df = df[list(no_student_columns) + [student]]
|
|
||||||
st_df = st_df.rename(columns={student: COLUMNS["score"]}).dropna(
|
|
||||||
subset=[COLUMNS["score"]]
|
|
||||||
)
|
|
||||||
if postprocessing:
|
|
||||||
return postprocess(st_df)
|
|
||||||
return st_df
|
|
||||||
|
|
||||||
|
|
||||||
def postprocess(df):
|
|
||||||
"""Postprocessing score dataframe
|
|
||||||
|
|
||||||
- Replace na with an empty string
|
|
||||||
- Replace "NOANSWER" with -1
|
|
||||||
- Turn commas number to dot numbers
|
|
||||||
"""
|
|
||||||
|
|
||||||
df[COLUMNS["question"]].fillna("", inplace=True)
|
|
||||||
df[COLUMNS["exercise"]].fillna("", inplace=True)
|
|
||||||
df[COLUMNS["comment"]].fillna("", inplace=True)
|
|
||||||
df[COLUMNS["competence"]].fillna("", inplace=True)
|
|
||||||
|
|
||||||
df[COLUMNS["score"]] = pd.to_numeric(
|
|
||||||
df[COLUMNS["score"]]
|
|
||||||
.replace(VALIDSCORE["NOANSWER"], -1)
|
|
||||||
.apply(lambda x: try_replace(x, ",", "."))
|
|
||||||
)
|
|
||||||
df[COLUMNS["score_rate"]] = pd.to_numeric(
|
|
||||||
df[COLUMNS["score_rate"]].apply(lambda x: try_replace(x, ",", ".")),
|
|
||||||
errors="coerce",
|
|
||||||
)
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# Reglages pour 'vim'
|
|
||||||
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
|
||||||
# cursor: 16 del
|
|
20
recopytex/dashboard/app.py
Normal file
20
recopytex/dashboard/app.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import dash
|
||||||
|
import flask
|
||||||
|
|
||||||
|
from .layout.layout import layout
|
||||||
|
|
||||||
|
server = flask.Flask(__name__)
|
||||||
|
app = dash.Dash(
|
||||||
|
__name__,
|
||||||
|
server=server,
|
||||||
|
suppress_callback_exceptions=True,
|
||||||
|
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
app.layout = layout
|
||||||
|
server = app.server
|
||||||
|
|
23
recopytex/dashboard/common/formating.py
Normal file
23
recopytex/dashboard/common/formating.py
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
|
||||||
|
def highlight_scores(highlight_columns, score_color):
|
||||||
|
""" Cells style in a datatable for scores
|
||||||
|
|
||||||
|
:param highlight_columns: columns to highlight
|
||||||
|
:param value_color: dictionnary {"score": "color"}
|
||||||
|
|
||||||
|
"""
|
||||||
|
hight = []
|
||||||
|
for v, color in score_color.items():
|
||||||
|
if v:
|
||||||
|
hight += [
|
||||||
|
{
|
||||||
|
"if": {"filter_query": "{{{}}} = {}".format(col, v), "column_id": col},
|
||||||
|
"backgroundColor": color,
|
||||||
|
"color": "white",
|
||||||
|
}
|
||||||
|
for col in highlight_columns
|
||||||
|
]
|
||||||
|
return hight
|
8
recopytex/dashboard/index.py
Normal file
8
recopytex/dashboard/index.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from .app import app, server
|
||||||
|
from .routes import render_page_content
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
app.run_server(debug=True)
|
9
recopytex/dashboard/layout/layout.py
Normal file
9
recopytex/dashboard/layout/layout.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import dash_html_components as html
|
||||||
|
import dash_core_components as dcc
|
||||||
|
|
||||||
|
content = html.Div(id="page-content")
|
||||||
|
|
||||||
|
layout = html.Div([dcc.Location(id="url"), content])
|
0
recopytex/dashboard/pages/create_exam/__init__.py
Normal file
0
recopytex/dashboard/pages/create_exam/__init__.py
Normal file
0
recopytex/dashboard/pages/create_exam/app.py
Normal file
0
recopytex/dashboard/pages/create_exam/app.py
Normal file
0
recopytex/dashboard/pages/create_exam/callbacks.py
Normal file
0
recopytex/dashboard/pages/create_exam/callbacks.py
Normal file
0
recopytex/dashboard/pages/create_exam/models.py
Normal file
0
recopytex/dashboard/pages/create_exam/models.py
Normal file
0
recopytex/dashboard/pages/exams_scores/__init__.py
Normal file
0
recopytex/dashboard/pages/exams_scores/__init__.py
Normal file
112
recopytex/dashboard/pages/exams_scores/app.py
Normal file
112
recopytex/dashboard/pages/exams_scores/app.py
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import dash_html_components as html
|
||||||
|
import dash_core_components as dcc
|
||||||
|
from .models import get_tribes, get_exams
|
||||||
|
from .callbacks import *
|
||||||
|
|
||||||
|
layout = html.Div(
|
||||||
|
children=[
|
||||||
|
html.Header(
|
||||||
|
children=[
|
||||||
|
html.H1("Analyse des notes"),
|
||||||
|
html.P("Dernière sauvegarde", id="lastsave"),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
html.Main(
|
||||||
|
children=[
|
||||||
|
html.Section(
|
||||||
|
children=[
|
||||||
|
html.Div(
|
||||||
|
children=[
|
||||||
|
"Classe: ",
|
||||||
|
dcc.Dropdown(
|
||||||
|
id="tribe",
|
||||||
|
options=[
|
||||||
|
{"label": t["name"], "value": t["name"]}
|
||||||
|
for t in get_tribes().values()
|
||||||
|
],
|
||||||
|
value=next(iter(get_tribes().values()))["name"],
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
html.Div(
|
||||||
|
children=[
|
||||||
|
"Evaluation: ",
|
||||||
|
dcc.Dropdown(id="exam_select"),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
],
|
||||||
|
id="selects",
|
||||||
|
),
|
||||||
|
html.Section(
|
||||||
|
children=[
|
||||||
|
html.Div(
|
||||||
|
children=[
|
||||||
|
dash_table.DataTable(
|
||||||
|
id="final_score_table",
|
||||||
|
columns=[
|
||||||
|
{"name": "Étudiant", "id": "student_name"},
|
||||||
|
{"name": "Note", "id": "mark"},
|
||||||
|
{"name": "Barème", "id": "score_rate"},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
],
|
||||||
|
id="final_score_table_container",
|
||||||
|
),
|
||||||
|
html.Div(
|
||||||
|
children=[
|
||||||
|
dash_table.DataTable(
|
||||||
|
id="score_statistics_table",
|
||||||
|
columns=[],
|
||||||
|
)
|
||||||
|
],
|
||||||
|
id="score_statistics_table_container",
|
||||||
|
),
|
||||||
|
html.Div(
|
||||||
|
children=[
|
||||||
|
dcc.Graph(
|
||||||
|
id="fig_exam_histo",
|
||||||
|
config={"displayModeBar": False},
|
||||||
|
)
|
||||||
|
],
|
||||||
|
id="fig_exam_histo_container",
|
||||||
|
),
|
||||||
|
html.Div(
|
||||||
|
children=[
|
||||||
|
dcc.Graph(
|
||||||
|
id="fig_questions_bar",
|
||||||
|
config={"displayModeBar": False},
|
||||||
|
)
|
||||||
|
],
|
||||||
|
id="fig_questions_bar_container",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
id="analysis",
|
||||||
|
),
|
||||||
|
html.Section(
|
||||||
|
children=[
|
||||||
|
dash_table.DataTable(
|
||||||
|
id="scores_table",
|
||||||
|
columns=[],
|
||||||
|
style_data_conditional=[],
|
||||||
|
fixed_columns={},
|
||||||
|
editable=True,
|
||||||
|
style_table={"minWidth": "100%"},
|
||||||
|
style_cell={
|
||||||
|
"minWidth": "100px",
|
||||||
|
"width": "100px",
|
||||||
|
"maxWidth": "100px",
|
||||||
|
"overflow": "hidden",
|
||||||
|
"textOverflow": "ellipsis",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
],
|
||||||
|
id="edit",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
dcc.Store(id="scores"),
|
||||||
|
],
|
||||||
|
)
|
216
recopytex/dashboard/pages/exams_scores/callbacks.py
Normal file
216
recopytex/dashboard/pages/exams_scores/callbacks.py
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from dash.dependencies import Input, Output, State
|
||||||
|
from dash.exceptions import PreventUpdate
|
||||||
|
import plotly.graph_objects as go
|
||||||
|
import dash_table
|
||||||
|
import json
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from recopytex.dashboard.app import app
|
||||||
|
from recopytex.dashboard.common.formating import highlight_scores
|
||||||
|
|
||||||
|
from .models import (
|
||||||
|
get_tribes,
|
||||||
|
get_exams,
|
||||||
|
get_unstack_scores,
|
||||||
|
get_students_from_exam,
|
||||||
|
get_score_colors,
|
||||||
|
get_level_color_bar,
|
||||||
|
score_to_final_mark,
|
||||||
|
stack_scores,
|
||||||
|
pivot_score_on,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.callback(
|
||||||
|
[
|
||||||
|
Output("exam_select", "options"),
|
||||||
|
Output("exam_select", "value"),
|
||||||
|
],
|
||||||
|
[Input("tribe", "value")],
|
||||||
|
)
|
||||||
|
def update_exams_choices(tribe):
|
||||||
|
if not tribe:
|
||||||
|
raise PreventUpdate
|
||||||
|
exams = get_exams(tribe)
|
||||||
|
exams.reset_index(inplace=True)
|
||||||
|
if not exams.empty:
|
||||||
|
return [
|
||||||
|
{"label": e["name"], "value": e.to_json()} for i, e in exams.iterrows()
|
||||||
|
], exams.loc[0].to_json()
|
||||||
|
return [], None
|
||||||
|
|
||||||
|
|
||||||
|
@app.callback(
|
||||||
|
[
|
||||||
|
Output("scores_table", "columns"),
|
||||||
|
Output("scores_table", "data"),
|
||||||
|
Output("scores_table", "style_data_conditional"),
|
||||||
|
Output("scores_table", "fixed_columns"),
|
||||||
|
],
|
||||||
|
[
|
||||||
|
Input("exam_select", "value"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def update_scores_store(exam):
|
||||||
|
if not exam:
|
||||||
|
return [[], [], [], {}]
|
||||||
|
exam = pd.DataFrame.from_dict([json.loads(exam)])
|
||||||
|
scores = get_unstack_scores(exam)
|
||||||
|
fixed_columns = [
|
||||||
|
"exercise",
|
||||||
|
"question",
|
||||||
|
"competence",
|
||||||
|
"theme",
|
||||||
|
"comment",
|
||||||
|
"score_rate",
|
||||||
|
"is_leveled",
|
||||||
|
]
|
||||||
|
|
||||||
|
students = list(get_students_from_exam(exam))
|
||||||
|
columns = fixed_columns + students
|
||||||
|
|
||||||
|
score_color = get_score_colors()
|
||||||
|
|
||||||
|
return [
|
||||||
|
[{"id": c, "name": c} for c in columns],
|
||||||
|
scores.to_dict("records"),
|
||||||
|
highlight_scores(students, score_color),
|
||||||
|
{"headers": True, "data": len(fixed_columns)},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@app.callback(
|
||||||
|
[
|
||||||
|
Output("final_score_table", "data"),
|
||||||
|
],
|
||||||
|
[
|
||||||
|
Input("scores_table", "data"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def update_finale_score_table(scores):
|
||||||
|
scores_df = pd.DataFrame.from_records(scores)
|
||||||
|
stacked_scores = stack_scores(scores_df)
|
||||||
|
return score_to_final_mark(stacked_scores)
|
||||||
|
|
||||||
|
|
||||||
|
@app.callback(
|
||||||
|
[
|
||||||
|
Output("score_statistics_table", "columns"),
|
||||||
|
Output("score_statistics_table", "data"),
|
||||||
|
],
|
||||||
|
[
|
||||||
|
Input("final_score_table", "data"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def update_statictics_table(finale_score):
|
||||||
|
df = pd.DataFrame.from_records(finale_score)
|
||||||
|
statistics = df["mark"].describe().to_frame().T
|
||||||
|
return [
|
||||||
|
[{"id": c, "name": c} for c in statistics.columns],
|
||||||
|
statistics.to_dict("records"),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@app.callback(
|
||||||
|
[
|
||||||
|
Output("fig_exam_histo", "figure"),
|
||||||
|
],
|
||||||
|
[
|
||||||
|
Input("final_score_table", "data"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def update_exam_histo(finale_scores):
|
||||||
|
scores = pd.DataFrame.from_records(finale_scores)
|
||||||
|
|
||||||
|
if scores.empty:
|
||||||
|
return [go.Figure(data=[go.Scatter(x=[], y=[])])]
|
||||||
|
|
||||||
|
ranges = np.linspace(
|
||||||
|
-0.5,
|
||||||
|
scores["score_rate"].max(),
|
||||||
|
int(scores["score_rate"].max() * 2 + 2),
|
||||||
|
)
|
||||||
|
|
||||||
|
bins = pd.cut(scores["mark"], ranges)
|
||||||
|
scores["Bin"] = bins
|
||||||
|
grouped = (
|
||||||
|
scores.reset_index()
|
||||||
|
.groupby("Bin")
|
||||||
|
.agg({"score_rate": "count", "student_name": lambda x: "\n".join(x)})
|
||||||
|
)
|
||||||
|
grouped.index = grouped.index.map(lambda i: i.right)
|
||||||
|
fig = go.Figure()
|
||||||
|
fig.add_bar(
|
||||||
|
x=grouped.index,
|
||||||
|
y=grouped["score_rate"],
|
||||||
|
text=grouped["student_name"],
|
||||||
|
textposition="auto",
|
||||||
|
hovertemplate="",
|
||||||
|
marker_color="#4E89DE",
|
||||||
|
)
|
||||||
|
fig.update_layout(
|
||||||
|
height=300,
|
||||||
|
margin=dict(l=5, r=5, b=5, t=5),
|
||||||
|
)
|
||||||
|
return [fig]
|
||||||
|
|
||||||
|
|
||||||
|
@app.callback(
|
||||||
|
[
|
||||||
|
Output("fig_questions_bar", "figure"),
|
||||||
|
],
|
||||||
|
[
|
||||||
|
Input("scores_table", "data"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def update_questions_bar(finale_scores):
|
||||||
|
scores = pd.DataFrame.from_records(finale_scores)
|
||||||
|
scores = stack_scores(scores)
|
||||||
|
|
||||||
|
if scores.empty:
|
||||||
|
return [go.Figure(data=[go.Scatter(x=[], y=[])])]
|
||||||
|
|
||||||
|
pt = pivot_score_on(scores, ["exercise", "question", "comment"], "score")
|
||||||
|
|
||||||
|
# separation between exercises
|
||||||
|
for i in {i for i in pt.index.get_level_values(0)}:
|
||||||
|
pt.loc[(str(i), "", ""), :] = ""
|
||||||
|
pt.sort_index(inplace=True)
|
||||||
|
|
||||||
|
# Bar label
|
||||||
|
index = (
|
||||||
|
pt.index.get_level_values(0).map(str)
|
||||||
|
+ ":"
|
||||||
|
+ pt.index.get_level_values(1).map(str)
|
||||||
|
+ " "
|
||||||
|
+ pt.index.get_level_values(2).map(str)
|
||||||
|
)
|
||||||
|
|
||||||
|
fig = go.Figure()
|
||||||
|
|
||||||
|
bars = get_level_color_bar()
|
||||||
|
|
||||||
|
for b in bars:
|
||||||
|
try:
|
||||||
|
fig.add_bar(
|
||||||
|
x=index, y=pt[b["score"]], name=b["name"], marker_color=b["color"]
|
||||||
|
)
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
fig.update_layout(barmode="relative")
|
||||||
|
fig.update_layout(
|
||||||
|
height=500,
|
||||||
|
margin=dict(l=5, r=5, b=5, t=5),
|
||||||
|
legend=dict(
|
||||||
|
orientation="h",
|
||||||
|
yanchor="bottom",
|
||||||
|
y=1.02,
|
||||||
|
xanchor="right",
|
||||||
|
x=1
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return [fig]
|
128
recopytex/dashboard/pages/exams_scores/models.py
Normal file
128
recopytex/dashboard/pages/exams_scores/models.py
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
#!/use/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from recopytex.database.filesystem.loader import CSVLoader
|
||||||
|
from recopytex.datalib.dataframe import column_values_to_column
|
||||||
|
import recopytex.datalib.on_score_column as on_column
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
LOADER = CSVLoader("./test_confia.ml")
|
||||||
|
SCORES_CONFIG = LOADER.get_config()["scores"]
|
||||||
|
|
||||||
|
|
||||||
|
def unstack_scores(scores):
|
||||||
|
"""Put student_name values to columns
|
||||||
|
|
||||||
|
:param scores: Score dataframe with one line per score
|
||||||
|
:returns: Scrore dataframe with student_name in columns
|
||||||
|
|
||||||
|
"""
|
||||||
|
kept_columns = [col for col in LOADER.score_columns if col != "score"]
|
||||||
|
return column_values_to_column("student_name", "score", kept_columns, scores)
|
||||||
|
|
||||||
|
|
||||||
|
def stack_scores(scores):
|
||||||
|
"""Student columns are melt to rows with student_name column
|
||||||
|
|
||||||
|
:param scores: Score dataframe with student_name in columns
|
||||||
|
:returns: Scrore dataframe with one line per score
|
||||||
|
|
||||||
|
"""
|
||||||
|
kept_columns = [
|
||||||
|
c for c in LOADER.score_columns if c not in ["score", "student_name"]
|
||||||
|
]
|
||||||
|
student_names = [c for c in scores.columns if c not in kept_columns]
|
||||||
|
return pd.melt(
|
||||||
|
scores,
|
||||||
|
id_vars=kept_columns,
|
||||||
|
value_vars=student_names,
|
||||||
|
var_name="student_name",
|
||||||
|
value_name="score",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_tribes():
|
||||||
|
return LOADER.get_tribes()
|
||||||
|
|
||||||
|
|
||||||
|
def get_exams(tribe):
|
||||||
|
return LOADER.get_exams([tribe])
|
||||||
|
|
||||||
|
|
||||||
|
def get_record_scores(exam):
|
||||||
|
return LOADER.get_exam_scores(exam)
|
||||||
|
|
||||||
|
|
||||||
|
def get_unstack_scores(exam):
|
||||||
|
flat_scores = LOADER.get_exam_scores(exam)
|
||||||
|
return unstack_scores(flat_scores)
|
||||||
|
|
||||||
|
|
||||||
|
def get_students_from_exam(exam):
|
||||||
|
flat_scores = LOADER.get_exam_scores(exam)
|
||||||
|
return flat_scores["student_name"].unique()
|
||||||
|
|
||||||
|
|
||||||
|
def get_score_colors():
|
||||||
|
score_color = {}
|
||||||
|
for key, score in SCORES_CONFIG.items():
|
||||||
|
score_color[score["value"]] = score["color"]
|
||||||
|
return score_color
|
||||||
|
|
||||||
|
|
||||||
|
def get_level_color_bar():
|
||||||
|
return [
|
||||||
|
{"score": str(s["value"]), "name": s["comment"], "color": s["color"]}
|
||||||
|
for s in SCORES_CONFIG.values()
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
is_none_score = lambda x: on_column.is_none_score(x, SCORES_CONFIG)
|
||||||
|
format_score = lambda x: on_column.format_score(x, SCORES_CONFIG)
|
||||||
|
score_to_numeric_score = lambda x: on_column.score_to_numeric_score(x, SCORES_CONFIG)
|
||||||
|
score_to_mark = lambda x: on_column.score_to_mark(
|
||||||
|
x, max([v["value"] for v in SCORES_CONFIG.values() if isinstance(v["value"], int)])
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def filter_clean_score(scores):
|
||||||
|
filtered_scores = scores[~scores.apply(is_none_score, axis=1)]
|
||||||
|
filtered_scores = filtered_scores.assign(
|
||||||
|
score=filtered_scores.apply(format_score, axis=1)
|
||||||
|
)
|
||||||
|
return filtered_scores
|
||||||
|
|
||||||
|
|
||||||
|
def score_to_final_mark(scores):
|
||||||
|
""" Compute marks then reduce to final mark per student """
|
||||||
|
|
||||||
|
filtered_scores = filter_clean_score(scores)
|
||||||
|
filtered_scores = filtered_scores.assign(
|
||||||
|
score=filtered_scores.apply(score_to_numeric_score, axis=1)
|
||||||
|
)
|
||||||
|
filtered_scores = filtered_scores.assign(
|
||||||
|
mark=filtered_scores.apply(score_to_mark, axis=1)
|
||||||
|
)
|
||||||
|
final_score = filtered_scores.groupby(["student_name"])[
|
||||||
|
["mark", "score_rate"]
|
||||||
|
].sum()
|
||||||
|
return [final_score.reset_index().to_dict("records")]
|
||||||
|
|
||||||
|
|
||||||
|
def pivot_score_on(scores, index, columns, aggfunc="size"):
|
||||||
|
"""Pivot scores on index, columns with aggfunc
|
||||||
|
|
||||||
|
It assumes thant scores are levels
|
||||||
|
|
||||||
|
"""
|
||||||
|
filtered_scores = filter_clean_score(scores)
|
||||||
|
filtered_scores["score"] = filtered_scores["score"].astype(str)
|
||||||
|
pt = pd.pivot_table(
|
||||||
|
filtered_scores,
|
||||||
|
index=index,
|
||||||
|
columns=columns,
|
||||||
|
aggfunc=aggfunc,
|
||||||
|
fill_value=0,
|
||||||
|
)
|
||||||
|
return pt
|
||||||
|
|
0
recopytex/dashboard/pages/home/__init__.py
Normal file
0
recopytex/dashboard/pages/home/__init__.py
Normal file
50
recopytex/dashboard/pages/home/app.py
Normal file
50
recopytex/dashboard/pages/home/app.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import dash_html_components as html
|
||||||
|
from recopytex.database.filesystem.loader import CSVLoader
|
||||||
|
from .models import get_tribes, get_exams, get_students
|
||||||
|
|
||||||
|
loader = CSVLoader("./test_config.yml")
|
||||||
|
|
||||||
|
|
||||||
|
def listing(elements, formating=lambda x: x):
|
||||||
|
|
||||||
|
return html.Ul(
|
||||||
|
children=[html.Li(children=formating(element)) for element in elements]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def format_tribe(tribe):
|
||||||
|
children = [html.H3(tribe["name"])]
|
||||||
|
exams = loader.get_exams([tribe["name"]])
|
||||||
|
if exams.empty:
|
||||||
|
children.append(html.P("Pas d'évaluation"))
|
||||||
|
else:
|
||||||
|
exams_html = listing([exam for id, exam in exams.iterrows()], format_exam)
|
||||||
|
children.append(exams_html)
|
||||||
|
return children
|
||||||
|
|
||||||
|
|
||||||
|
def format_exam(exam):
|
||||||
|
children = [html.P(exam["name"])]
|
||||||
|
return children
|
||||||
|
|
||||||
|
|
||||||
|
layout = html.Div(
|
||||||
|
children=[
|
||||||
|
html.H1("Recopytex"),
|
||||||
|
html.H2("Tribes"),
|
||||||
|
html.Div(
|
||||||
|
children=[listing(loader.get_tribes().values(), format_tribe)],
|
||||||
|
id="tribes",
|
||||||
|
),
|
||||||
|
html.H2("Config"),
|
||||||
|
html.Div(
|
||||||
|
children=[
|
||||||
|
html.P(str(loader.get_config())),
|
||||||
|
],
|
||||||
|
id="config",
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
6
recopytex/dashboard/pages/home/callbacks.py
Normal file
6
recopytex/dashboard/pages/home/callbacks.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from dash.dependencies import Input, Output
|
||||||
|
from recopytex.dashboard.app import app
|
||||||
|
|
14
recopytex/dashboard/pages/home/models.py
Normal file
14
recopytex/dashboard/pages/home/models.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
|
||||||
|
def get_tribes(loader):
|
||||||
|
return loader.get_tribes()
|
||||||
|
|
||||||
|
|
||||||
|
def get_exams(loader, tribe):
|
||||||
|
return loader.get_exams([tribe])
|
||||||
|
|
||||||
|
|
||||||
|
def get_students(loader, tribe):
|
||||||
|
return loader.get_students([tribe])
|
27
recopytex/dashboard/routes.py
Normal file
27
recopytex/dashboard/routes.py
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from dash.dependencies import Input, Output
|
||||||
|
|
||||||
|
from .app import app
|
||||||
|
from .pages.home import app as home
|
||||||
|
from .pages.exams_scores import app as exams_scores
|
||||||
|
import dash_html_components as html
|
||||||
|
|
||||||
|
|
||||||
|
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
|
||||||
|
def render_page_content(pathname):
|
||||||
|
if pathname == "/":
|
||||||
|
return home.layout
|
||||||
|
elif pathname == "/exams/scores/":
|
||||||
|
return exams_scores.layout
|
||||||
|
# elif pathname == iris_page_location:
|
||||||
|
# return iris.layout
|
||||||
|
# # If the user tries to reach a different page, return a 404 message
|
||||||
|
return html.Div(
|
||||||
|
[
|
||||||
|
html.H1("404: Not found", className="text-danger"),
|
||||||
|
html.Hr(),
|
||||||
|
html.P(f"The pathname {pathname} was not recognised..."),
|
||||||
|
]
|
||||||
|
)
|
88
recopytex/database/__init__.py
Normal file
88
recopytex/database/__init__.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
Adapter to pull data from the filesystem
|
||||||
|
|
||||||
|
# Loader
|
||||||
|
|
||||||
|
# Writer
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class Loader(ABC):
|
||||||
|
|
||||||
|
"""Load data from source"""
|
||||||
|
|
||||||
|
CONFIG = {}
|
||||||
|
|
||||||
|
def __init__(self, configfile=""):
|
||||||
|
"""Init loader
|
||||||
|
|
||||||
|
:param configfile: yaml file with informations on data source
|
||||||
|
"""
|
||||||
|
self._config = self.CONFIG
|
||||||
|
if configfile.endswith(".yml"):
|
||||||
|
with open(configfile, "r") as config:
|
||||||
|
self._config.update(yaml.load(config, Loader=yaml.FullLoader))
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
""" Get config"""
|
||||||
|
return self._config
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_tribes(self):
|
||||||
|
""" Get tribes list """
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_exams(self, tribes=[]):
|
||||||
|
"""Get exams list
|
||||||
|
|
||||||
|
:param tribes: get only exams for those tribes
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_students(self, tribes=[]):
|
||||||
|
"""Get student list
|
||||||
|
|
||||||
|
:param filters: list of filters
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_exam_questions(self, exams=[]):
|
||||||
|
"""Get questions for the exam
|
||||||
|
|
||||||
|
:param exams: questions for those exams only
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_questions_scores(self, questions=[]):
|
||||||
|
"""Get scores of those questions
|
||||||
|
|
||||||
|
:param questions: score for those questions
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# @abstractmethod
|
||||||
|
# def get_student_scores(self, student):
|
||||||
|
# """Get scores of the student
|
||||||
|
|
||||||
|
# :param student:
|
||||||
|
# """
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
class Writer(ABC):
|
||||||
|
|
||||||
|
""" Write datas to the source """
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
15
recopytex/database/filesystem/__init__.py
Normal file
15
recopytex/database/filesystem/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
"""
|
||||||
|
Store data using filesystem for organisation, csv for scores
|
||||||
|
|
||||||
|
## Organisation
|
||||||
|
|
||||||
|
- tribe1.csv # list of students for the tribe
|
||||||
|
- tribe1/
|
||||||
|
- exam1.csv # questions and scores for exam1
|
||||||
|
- exam1.yml # Extra information about exam1
|
||||||
|
- exam2.csv # questions and scores for exam2
|
||||||
|
"""
|
||||||
|
|
75
recopytex/database/filesystem/default_config.yml
Normal file
75
recopytex/database/filesystem/default_config.yml
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
---
|
||||||
|
source: ./ # basepath where to start
|
||||||
|
|
||||||
|
competences: # Competences
|
||||||
|
Chercher:
|
||||||
|
name: Chercher
|
||||||
|
abrv: Cher
|
||||||
|
Représenter:
|
||||||
|
name: Représenter
|
||||||
|
abrv: Rep
|
||||||
|
Modéliser:
|
||||||
|
name: Modéliser
|
||||||
|
abrv: Mod
|
||||||
|
Raisonner:
|
||||||
|
name: Raisonner
|
||||||
|
abrv: Rai
|
||||||
|
Calculer:
|
||||||
|
name: Calculer
|
||||||
|
abrv: Cal
|
||||||
|
Communiquer:
|
||||||
|
name: Communiquer
|
||||||
|
abrv: Com
|
||||||
|
|
||||||
|
scores: #
|
||||||
|
BAD: # Everything is bad
|
||||||
|
value: 0
|
||||||
|
numeric_value: 0
|
||||||
|
color: "#E7472B"
|
||||||
|
comment: Faux
|
||||||
|
FEW: # Few good things
|
||||||
|
value: 1
|
||||||
|
numeric_value: 1
|
||||||
|
color: "#FF712B"
|
||||||
|
comment: Peu juste
|
||||||
|
NEARLY: # Nearly good but things are missing
|
||||||
|
value: 2
|
||||||
|
numeric_value: 2
|
||||||
|
color: "#F2EC4C"
|
||||||
|
comment: Presque juste
|
||||||
|
GOOD: # Everything is good
|
||||||
|
value: 3
|
||||||
|
numeric_value: 3
|
||||||
|
color: "#68D42F"
|
||||||
|
comment: Juste
|
||||||
|
NOTFILLED: # The item is not scored yet
|
||||||
|
value: ""
|
||||||
|
numeric_value: None
|
||||||
|
color: white
|
||||||
|
comment: En attente
|
||||||
|
NOANSWER: # Student gives no answer (count as 0)
|
||||||
|
value: "."
|
||||||
|
numeric_value: 0
|
||||||
|
color: black
|
||||||
|
comment: Pas de réponse
|
||||||
|
ABS: # Student has absent (this score won't be impact the final mark)
|
||||||
|
value: a
|
||||||
|
numeric_value: None
|
||||||
|
color: lightgray
|
||||||
|
comment: Non noté
|
||||||
|
|
||||||
|
csv_fields: # dataframe_field: csv_field
|
||||||
|
term: Trimestre
|
||||||
|
exam: Nom
|
||||||
|
date: Date
|
||||||
|
exercise: Exercice
|
||||||
|
question: Question
|
||||||
|
competence: Competence
|
||||||
|
theme: Domaine
|
||||||
|
comment: Commentaire
|
||||||
|
score_rate: Bareme
|
||||||
|
is_leveled: Est_nivele
|
||||||
|
|
||||||
|
id_templates:
|
||||||
|
exam: "{name}_{tribe}"
|
||||||
|
question: "{exam_id}_{exercise}_{question}_{comment}"
|
52
recopytex/database/filesystem/lib.py
Normal file
52
recopytex/database/filesystem/lib.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from pathlib import Path
|
||||||
|
from unidecode import unidecode
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ["list_csvs", "extract_fields"]
|
||||||
|
|
||||||
|
|
||||||
|
def list_csvs(path):
|
||||||
|
"""list csv files in path
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> list_csvs("./example/Tribe1/")
|
||||||
|
[PosixPath('example/Tribe1/210112_DS.csv'), PosixPath('example/Tribe1/210122_DS6.csv')]
|
||||||
|
>>> list_csvs("./example/Tribe1")
|
||||||
|
[PosixPath('example/Tribe1/210112_DS.csv'), PosixPath('example/Tribe1/210122_DS6.csv')]
|
||||||
|
"""
|
||||||
|
return list(Path(path).glob("*.csv"))
|
||||||
|
|
||||||
|
|
||||||
|
def extract_fields(csv_filename, fields=[], remove_duplicates=True):
|
||||||
|
"""Extract fields in csv
|
||||||
|
|
||||||
|
:param csv_filename: csv filename (with header)
|
||||||
|
:param fields: list of fields to extract (all fields if empty list - default)
|
||||||
|
:param remove_duplicates: keep uniques rows (default True)
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> extract_fields("./example/Tribe1/210122_DS6.csv", ["Trimestre", "Nom", "Date"])
|
||||||
|
Trimestre Nom Date
|
||||||
|
0 1 DS6 22/01/2021
|
||||||
|
"""
|
||||||
|
df = pd.read_csv(csv_filename)
|
||||||
|
if fields:
|
||||||
|
df = df[fields]
|
||||||
|
if remove_duplicates:
|
||||||
|
return df.drop_duplicates()
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
def build_id(template, element):
|
||||||
|
"""Build an id from template to the element
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> element = {"name": "pléà", "place": "here", "foo":"bar"}
|
||||||
|
>>> build_id("{name} {place}", element)
|
||||||
|
'plea_here'
|
||||||
|
"""
|
||||||
|
return unidecode(template.format(**element)).replace(" ", "_")
|
298
recopytex/database/filesystem/loader.py
Normal file
298
recopytex/database/filesystem/loader.py
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
from pathlib import Path
|
||||||
|
import pandas as pd
|
||||||
|
from .. import Loader
|
||||||
|
from .lib import list_csvs, extract_fields, build_id
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "default_config.yml")
|
||||||
|
with open(DEFAULT_CONFIG_FILE, "r") as config:
|
||||||
|
DEFAULT_CONFIG = yaml.load(config, Loader=yaml.FullLoader)
|
||||||
|
|
||||||
|
|
||||||
|
def maybe_dataframe(datas):
|
||||||
|
try:
|
||||||
|
return [e[1] for e in datas.iterrows()]
|
||||||
|
except AttributeError:
|
||||||
|
return datas
|
||||||
|
|
||||||
|
|
||||||
|
class CSVLoader(Loader):
|
||||||
|
|
||||||
|
"""Loader when scores and metadatas are stored in csv files
|
||||||
|
|
||||||
|
:config:
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> loader = CSVLoader()
|
||||||
|
>>> loader.get_config()
|
||||||
|
{'source': './', 'competences': {'Chercher': {'name': 'Chercher', 'abrv': 'Cher'}, 'Représenter': {'name': 'Représenter', 'abrv': 'Rep'}, 'Modéliser': {'name': 'Modéliser', 'abrv': 'Mod'}, 'Raisonner': {'name': 'Raisonner', 'abrv': 'Rai'}, 'Calculer': {'name': 'Calculer', 'abrv': 'Cal'}, 'Communiquer': {'name': 'Communiquer', 'abrv': 'Com'}}, 'scores': {'BAD': {'value': 0, 'numeric_value': 0, 'color': '#E7472B', 'comment': 'Faux'}, 'FEW': {'value': 1, 'numeric_value': 1, 'color': '#FF712B', 'comment': 'Peu juste'}, 'NEARLY': {'value': 2, 'numeric_value': 2, 'color': '#F2EC4C', 'comment': 'Presque juste'}, 'GOOD': {'value': 3, 'numeric_value': 3, 'color': '#68D42F', 'comment': 'Juste'}, 'NOTFILLED': {'value': '', 'numeric_value': 'None', 'color': 'white', 'comment': 'En attente'}, 'NOANSWER': {'value': '.', 'numeric_value': 0, 'color': 'black', 'comment': 'Pas de réponse'}, 'ABS': {'value': 'a', 'numeric_value': 'None', 'color': 'lightgray', 'comment': 'Non noté'}}, 'csv_fields': {'term': 'Trimestre', 'exam': 'Nom', 'date': 'Date', 'exercise': 'Exercice', 'question': 'Question', 'competence': 'Competence', 'theme': 'Domaine', 'comment': 'Commentaire', 'score_rate': 'Bareme', 'is_leveled': 'Est_nivele'}, 'id_templates': {'exam': '{name}_{tribe}', 'question': '{exam_id}_{exercise}_{question}_{comment}'}}
|
||||||
|
|
||||||
|
>>> loader = CSVLoader("./test_config.yml")
|
||||||
|
>>> loader.get_config()
|
||||||
|
{'source': './example', 'competences': {'Chercher': {'name': 'Chercher', 'abrv': 'Cher'}, 'Représenter': {'name': 'Représenter', 'abrv': 'Rep'}, 'Modéliser': {'name': 'Modéliser', 'abrv': 'Mod'}, 'Raisonner': {'name': 'Raisonner', 'abrv': 'Rai'}, 'Calculer': {'name': 'Calculer', 'abrv': 'Cal'}, 'Communiquer': {'name': 'Communiquer', 'abrv': 'Com'}}, 'scores': {'BAD': {'value': 0, 'numeric_value': 0, 'color': '#E7472B', 'comment': 'Faux'}, 'FEW': {'value': 1, 'numeric_value': 1, 'color': '#FF712B', 'comment': 'Peu juste'}, 'NEARLY': {'value': 2, 'numeric_value': 2, 'color': '#F2EC4C', 'comment': 'Presque juste'}, 'GOOD': {'value': 3, 'numeric_value': 3, 'color': '#68D42F', 'comment': 'Juste'}, 'NOTFILLED': {'value': '', 'numeric_value': 'None', 'color': 'white', 'comment': 'En attente'}, 'NOANSWER': {'value': '.', 'numeric_value': 0, 'color': 'black', 'comment': 'Pas de réponse'}, 'ABS': {'value': 'a', 'numeric_value': 'None', 'color': 'lightgray', 'comment': 'Non noté'}}, 'csv_fields': {'term': 'Trimestre', 'exam': 'Nom', 'date': 'Date', 'exercise': 'Exercice', 'question': 'Question', 'competence': 'Competence', 'theme': 'Domaine', 'comment': 'Commentaire', 'score_rate': 'Bareme', 'is_leveled': 'Est_nivele'}, 'id_templates': {'exam': '{name}_{tribe}', 'question': '{exam_id}_{exercise}_{question}_{comment}'}, 'output': './output', 'templates': 'templates/', 'tribes': {'Tribe1': {'name': 'Tribe1', 'type': 'Type1', 'students': 'tribe1.csv'}, 'Tribe2': {'name': 'Tribe2', 'students': 'tribe2.csv'}}}
|
||||||
|
"""
|
||||||
|
|
||||||
|
CONFIG = DEFAULT_CONFIG
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
""" Get config """
|
||||||
|
return self._config
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exam_columns(self):
|
||||||
|
return pd.Index(["name", "date", "term", "origin", "tribe", "id"])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def question_columns(self):
|
||||||
|
return pd.Index(
|
||||||
|
[
|
||||||
|
"exercise",
|
||||||
|
"question",
|
||||||
|
"competence",
|
||||||
|
"theme",
|
||||||
|
"comment",
|
||||||
|
"score_rate",
|
||||||
|
"is_leveled",
|
||||||
|
"origin",
|
||||||
|
"exam_id",
|
||||||
|
"id",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def score_columns(self):
|
||||||
|
return pd.Index(
|
||||||
|
[
|
||||||
|
"term",
|
||||||
|
"exam",
|
||||||
|
"date",
|
||||||
|
"exercise",
|
||||||
|
"question",
|
||||||
|
"competence",
|
||||||
|
"theme",
|
||||||
|
"comment",
|
||||||
|
"score_rate",
|
||||||
|
"is_leveled",
|
||||||
|
"origin",
|
||||||
|
"exam_id",
|
||||||
|
"question_id",
|
||||||
|
"student_name",
|
||||||
|
"score",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def rename_columns(self, dataframe):
|
||||||
|
"""Rename dataframe column to match with `csv_fields`
|
||||||
|
|
||||||
|
:param dataframe: the dataframe
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> loader = CSVLoader()
|
||||||
|
>>>
|
||||||
|
|
||||||
|
"""
|
||||||
|
return dataframe.rename(
|
||||||
|
columns={v: k for k, v in self._config["csv_fields"].items()}
|
||||||
|
)
|
||||||
|
|
||||||
|
def reverse_csv_field(self, keys):
|
||||||
|
""" Reverse csv field from keys """
|
||||||
|
return [self._config["csv_fields"][k] for k in keys]
|
||||||
|
|
||||||
|
def get_tribes(self, only_names=False):
|
||||||
|
"""Get tribes list
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> loader = CSVLoader("./test_config.yml")
|
||||||
|
>>> loader.get_tribes()
|
||||||
|
{'Tribe1': {'name': 'Tribe1', 'type': 'Type1', 'students': 'tribe1.csv'}, 'Tribe2': {'name': 'Tribe2', 'students': 'tribe2.csv'}}
|
||||||
|
>>> loader.get_tribes(only_names=True)
|
||||||
|
['Tribe1', 'Tribe2']
|
||||||
|
"""
|
||||||
|
if only_names:
|
||||||
|
return list(self._config["tribes"].keys())
|
||||||
|
return self._config["tribes"]
|
||||||
|
|
||||||
|
def get_exams(self, tribes=[]):
|
||||||
|
"""Get exams list
|
||||||
|
|
||||||
|
:param tribes: get only exams for those tribes
|
||||||
|
:return: dataframe of exams
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> loader = CSVLoader("./test_config.yml")
|
||||||
|
>>> exams = loader.get_exams(["Tribe1"])
|
||||||
|
>>> all(exams.columns == loader.exam_columns)
|
||||||
|
True
|
||||||
|
>>> exams
|
||||||
|
name date term origin tribe id
|
||||||
|
0 DS 12/01/2021 1 example/Tribe1/210112_DS.csv Tribe1 DS_Tribe1
|
||||||
|
0 DS6 22/01/2021 1 example/Tribe1/210122_DS6.csv Tribe1 DS6_Tribe1
|
||||||
|
"""
|
||||||
|
exams = []
|
||||||
|
for tribe in tribes:
|
||||||
|
tribe_path = Path(self._config["source"]) / tribe
|
||||||
|
csvs = list_csvs(tribe_path)
|
||||||
|
for csv in csvs:
|
||||||
|
fields = self.reverse_csv_field(["exam", "date", "term"])
|
||||||
|
exam = extract_fields(csv, fields)
|
||||||
|
exam = self.rename_columns(exam)
|
||||||
|
exam = exam.rename(columns={"exam": "name"})
|
||||||
|
exam["origin"] = str(csv)
|
||||||
|
exam["tribe"] = tribe
|
||||||
|
exam["id"] = build_id(
|
||||||
|
self._config["id_templates"]["exam"], exam.iloc[0]
|
||||||
|
)
|
||||||
|
exams.append(exam)
|
||||||
|
if exams:
|
||||||
|
return pd.concat(exams)
|
||||||
|
return pd.DataFrame(columns=["name", "date", "term", "origin", "tribe", "id"])
|
||||||
|
|
||||||
|
def get_exam_questions(self, exams=[]):
|
||||||
|
"""Get questions for exams stored in score_files
|
||||||
|
|
||||||
|
:param exams: list or dataframe of exams metadatas (need origin field to find the csv)
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> loader = CSVLoader("./test_config.yml")
|
||||||
|
>>> exams = loader.get_exams(["Tribe1"])
|
||||||
|
>>> all(loader.get_exam_questions([exams.iloc[0]]).columns == loader.question_columns)
|
||||||
|
True
|
||||||
|
>>> questions = loader.get_exam_questions(exams)
|
||||||
|
>>> questions.iloc[0]
|
||||||
|
exercise Exercice 1
|
||||||
|
question 1
|
||||||
|
competence Calculer
|
||||||
|
theme Plop
|
||||||
|
comment Coucou
|
||||||
|
score_rate 1.0
|
||||||
|
is_leveled 1.0
|
||||||
|
origin example/Tribe1/210112_DS.csv
|
||||||
|
exam_id DS_Tribe1
|
||||||
|
id DS_Tribe1_Exercice_1_1_Coucou
|
||||||
|
Name: 0, dtype: object
|
||||||
|
"""
|
||||||
|
_exams = maybe_dataframe(exams)
|
||||||
|
|
||||||
|
questions = []
|
||||||
|
for exam in _exams:
|
||||||
|
fields = self.reverse_csv_field(
|
||||||
|
[
|
||||||
|
"exercise",
|
||||||
|
"question",
|
||||||
|
"competence",
|
||||||
|
"theme",
|
||||||
|
"comment",
|
||||||
|
"score_rate",
|
||||||
|
"is_leveled",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
question = extract_fields(exam["origin"], fields)
|
||||||
|
question = self.rename_columns(question)
|
||||||
|
question["origin"] = exam["origin"]
|
||||||
|
question["exam_id"] = exam["id"]
|
||||||
|
question["id"] = build_id(
|
||||||
|
self._config["id_templates"]["question"], question.iloc[0]
|
||||||
|
)
|
||||||
|
questions.append(question)
|
||||||
|
|
||||||
|
return pd.concat(questions)
|
||||||
|
|
||||||
|
def get_questions_scores(self, questions=[]):
|
||||||
|
"""Get scores of those questions
|
||||||
|
|
||||||
|
:param questions: list or dataframe of questions metadatas (need origin field to find the csv)
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> loader = CSVLoader("./test_config.yml")
|
||||||
|
>>> exams = loader.get_exams(["Tribe1"])
|
||||||
|
>>> questions = loader.get_exam_questions(exams)
|
||||||
|
>>> scores = loader.get_questions_scores(questions)
|
||||||
|
>>> all(scores.columns == loader.score_columns)
|
||||||
|
True
|
||||||
|
>>> scores["student_name"].unique()
|
||||||
|
array(['Star Tice', 'Umberto Dingate', 'Starlin Crangle',
|
||||||
|
'Humbert Bourcq', 'Gabriella Handyside', 'Stewart Eaves',
|
||||||
|
'Erick Going', 'Ase Praton', 'Rollins Planks', 'Dunstan Sarjant',
|
||||||
|
'Stacy Guiton', 'Ange Stanes', 'Amabelle Elleton',
|
||||||
|
'Darn Broomhall', 'Dyan Chatto', 'Keane Rennebach', 'Nari Paulton',
|
||||||
|
'Brandy Wase', 'Jaclyn Firidolfi', 'Violette Lockney'],
|
||||||
|
dtype=object)
|
||||||
|
"""
|
||||||
|
scores = []
|
||||||
|
group_questions = questions.groupby("origin")
|
||||||
|
for origin, questions_df in group_questions:
|
||||||
|
scores_df = pd.read_csv(origin)
|
||||||
|
scores_df = self.rename_columns(scores_df)
|
||||||
|
student_names = [
|
||||||
|
c
|
||||||
|
for c in scores_df.columns
|
||||||
|
if c not in self._config["csv_fields"].keys()
|
||||||
|
]
|
||||||
|
|
||||||
|
common_columns = [c for c in questions_df.columns if c in scores_df.columns]
|
||||||
|
scores_df = pd.merge(scores_df, questions_df, on=common_columns)
|
||||||
|
|
||||||
|
kept_columns = [c for c in scores_df if c not in student_names]
|
||||||
|
scores_df = pd.melt(
|
||||||
|
scores_df,
|
||||||
|
id_vars=kept_columns,
|
||||||
|
value_vars=student_names,
|
||||||
|
var_name="student_name",
|
||||||
|
value_name="score",
|
||||||
|
)
|
||||||
|
|
||||||
|
scores_df = scores_df.rename(columns={"id": "question_id"})
|
||||||
|
scores.append(scores_df)
|
||||||
|
|
||||||
|
return pd.concat(scores)
|
||||||
|
|
||||||
|
def get_exam_scores(self, exams=[]):
|
||||||
|
"""Get scores for all question of the exam
|
||||||
|
|
||||||
|
:param exams: list or dataframe of exams metadatas (need origin field to find the csv)
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> loader = CSVLoader("./test_config.yml")
|
||||||
|
>>> exams = loader.get_exams(["Tribe1"])
|
||||||
|
>>> scores = loader.get_exam_scores(exams)
|
||||||
|
>>> scores.columns
|
||||||
|
Index(['term', 'exam', 'date', 'exercise', 'question', 'competence', 'theme',
|
||||||
|
'comment', 'score_rate', 'is_leveled', 'origin', 'exam_id',
|
||||||
|
'question_id', 'student_name', 'score'],
|
||||||
|
dtype='object')
|
||||||
|
"""
|
||||||
|
questions = self.get_exam_questions(exams)
|
||||||
|
return self.get_questions_scores(questions)
|
||||||
|
|
||||||
|
def get_students(self, tribes=[]):
|
||||||
|
"""Get student list
|
||||||
|
|
||||||
|
:param tribes: concerned tribes
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> loader = CSVLoader("./test_config.yml")
|
||||||
|
>>> tribes = loader.get_tribes()
|
||||||
|
>>> students = loader.get_students([tribes["Tribe1"]])
|
||||||
|
>>> students.columns
|
||||||
|
Index(['Nom', 'email', 'origin', 'tribe'], dtype='object')
|
||||||
|
"""
|
||||||
|
students = []
|
||||||
|
for tribe in tribes:
|
||||||
|
students_csv = Path(self._config["source"]) / tribe["students"]
|
||||||
|
students_df = pd.read_csv(students_csv)
|
||||||
|
students_df["origin"] = students_csv
|
||||||
|
students_df["tribe"] = tribe["name"]
|
||||||
|
students.append(students_df)
|
||||||
|
|
||||||
|
return pd.concat(students)
|
||||||
|
|
||||||
|
def get_student_scores(self, student=[]):
|
||||||
|
"""Get all scores for students"""
|
||||||
|
pass
|
7
recopytex/database/filesystem/writer.py
Normal file
7
recopytex/database/filesystem/writer.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
0
recopytex/datalib/__init__.py
Normal file
0
recopytex/datalib/__init__.py
Normal file
21
recopytex/datalib/dataframe.py
Normal file
21
recopytex/datalib/dataframe.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
|
||||||
|
def column_values_to_column(pivot_column, value_column, kept_columns, df):
|
||||||
|
"""Pivot_column's values go to column with value_column under it, keeping kept_columns
|
||||||
|
|
||||||
|
:param pivot_column: column name where value will become columns
|
||||||
|
:param value_column: column name where value will be under pivot_column
|
||||||
|
:param kept_columns: unchanged columns
|
||||||
|
:param df: DataFrame to work with
|
||||||
|
|
||||||
|
:return: Stack dataframe
|
||||||
|
|
||||||
|
"""
|
||||||
|
if pivot_column in kept_columns:
|
||||||
|
pivot_columns = kept_columns
|
||||||
|
else:
|
||||||
|
pivot_columns = kept_columns + [pivot_column]
|
||||||
|
|
||||||
|
return df.set_index(pivot_columns).unstack(pivot_column)[value_column].reset_index()
|
257
recopytex/datalib/on_score_column.py
Normal file
257
recopytex/datalib/on_score_column.py
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from math import ceil
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
def is_none_score(x, score_config):
|
||||||
|
"""Is a score correspond to a None numeric_value which
|
||||||
|
|
||||||
|
>>> import pandas as pd
|
||||||
|
>>> d = {"Eleve":["E1"]*7,
|
||||||
|
... "score_rate": [1]*7,
|
||||||
|
... "is_leveled":[0]+[1]*6,
|
||||||
|
... "score":[0.33, "", ".", "a", 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> score_config = {
|
||||||
|
... 'BAD': {'value': 0, 'numeric_value': 0},
|
||||||
|
... 'FEW': {'value': 1, 'numeric_value': 1},
|
||||||
|
... 'NEARLY': {'value': 2, 'numeric_value': 2},
|
||||||
|
... 'GOOD': {'value': 3, 'numeric_value': 3},
|
||||||
|
... 'NOTFILLED': {'value': '', 'numeric_value': 'None'},
|
||||||
|
... 'NOANSWER': {'value': '.', 'numeric_value': 0},
|
||||||
|
... 'ABS': {'value': 'a', 'numeric_value': 'None'}
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> df.apply(lambda x:is_none_score(x, score_config), axis=1)
|
||||||
|
0 False
|
||||||
|
1 True
|
||||||
|
2 False
|
||||||
|
3 True
|
||||||
|
4 False
|
||||||
|
5 False
|
||||||
|
6 False
|
||||||
|
dtype: bool
|
||||||
|
|
||||||
|
"""
|
||||||
|
none_values = [
|
||||||
|
v["value"]
|
||||||
|
for v in score_config.values()
|
||||||
|
if str(v["numeric_value"]).lower() == "none"
|
||||||
|
]
|
||||||
|
return x["score"] in none_values or pd.isnull(x["score"])
|
||||||
|
|
||||||
|
|
||||||
|
def format_score(x, score_config):
|
||||||
|
"""Make sure that score have the appropriate format
|
||||||
|
|
||||||
|
>>> import pandas as pd
|
||||||
|
>>> d = {"Eleve":["E1"]*6,
|
||||||
|
... "score_rate": [1]*6,
|
||||||
|
... "is_leveled":[0]+[1]*5,
|
||||||
|
... "score":[0.33, ".", "a", 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> score_config = {
|
||||||
|
... 'BAD': {'value': 0, 'numeric_value': 0},
|
||||||
|
... 'FEW': {'value': 1, 'numeric_value': 1},
|
||||||
|
... 'NEARLY': {'value': 2, 'numeric_value': 2},
|
||||||
|
... 'GOOD': {'value': 3, 'numeric_value': 3},
|
||||||
|
... 'NOTFILLED': {'value': '', 'numeric_value': 'None'},
|
||||||
|
... 'NOANSWER': {'value': '.', 'numeric_value': 0},
|
||||||
|
... 'ABS': {'value': 'a', 'numeric_value': 'None'}
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> df.apply(lambda x:format_score(x, score_config), axis=1)
|
||||||
|
0 0.33
|
||||||
|
1 .
|
||||||
|
2 a
|
||||||
|
3 1
|
||||||
|
4 2
|
||||||
|
5 3
|
||||||
|
dtype: object
|
||||||
|
>>> format_score({"score": "1.0", "is_leveled": 1}, score_config)
|
||||||
|
1
|
||||||
|
>>> format_score({"score": "3.0", "is_leveled": 1}, score_config)
|
||||||
|
3
|
||||||
|
>>> format_score({"score": 4, "is_leveled": 1}, score_config)
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
ValueError: 4 (<class 'int'>) can't be a score
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not x["is_leveled"]:
|
||||||
|
return float(x["score"])
|
||||||
|
|
||||||
|
try:
|
||||||
|
score = int(float(x["score"]))
|
||||||
|
except ValueError:
|
||||||
|
score = str(x["score"])
|
||||||
|
|
||||||
|
if score in [v["value"] for v in score_config.values()]:
|
||||||
|
return score
|
||||||
|
|
||||||
|
raise ValueError(f"{x['score']} ({type(x['score'])}) can't be a score")
|
||||||
|
|
||||||
|
|
||||||
|
def score_to_numeric_score(x, score_config):
|
||||||
|
"""Convert a score to the corresponding numeric value
|
||||||
|
|
||||||
|
>>> import pandas as pd
|
||||||
|
>>> d = {"Eleve":["E1"]*7,
|
||||||
|
... "score_rate": [1]*7,
|
||||||
|
... "is_leveled":[0]+[1]*6,
|
||||||
|
... "score":[0.33, "", ".", "a", 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> score_config = {
|
||||||
|
... 'BAD': {'value': 0, 'numeric_value': 0},
|
||||||
|
... 'FEW': {'value': 1, 'numeric_value': 1},
|
||||||
|
... 'NEARLY': {'value': 2, 'numeric_value': 2},
|
||||||
|
... 'GOOD': {'value': 3, 'numeric_value': 3},
|
||||||
|
... 'NOTFILLED': {'value': '', 'numeric_value': 'None'},
|
||||||
|
... 'NOANSWER': {'value': '.', 'numeric_value': 0},
|
||||||
|
... 'ABS': {'value': 'a', 'numeric_value': 'None'}
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> df.apply(lambda x:score_to_numeric_score(x, score_config), axis=1)
|
||||||
|
0 0.33
|
||||||
|
1 None
|
||||||
|
2 0
|
||||||
|
3 None
|
||||||
|
4 1
|
||||||
|
5 2
|
||||||
|
6 3
|
||||||
|
dtype: object
|
||||||
|
|
||||||
|
"""
|
||||||
|
if x["is_leveled"]:
|
||||||
|
replacements = {v["value"]: v["numeric_value"] for v in score_config.values()}
|
||||||
|
return replacements[x["score"]]
|
||||||
|
|
||||||
|
return x["score"]
|
||||||
|
|
||||||
|
|
||||||
|
def score_to_mark(x, score_max, rounding=lambda x: round(x, 2)):
|
||||||
|
"""Compute the mark from "score" which have to be filtered and in numeric form
|
||||||
|
|
||||||
|
if the item is leveled then the score is multiply by the score_rate
|
||||||
|
otherwise it copies the score
|
||||||
|
|
||||||
|
:param x: dictionnary with "is_leveled", "score" (need to be number) and "score_rate" keys
|
||||||
|
:param score_max:
|
||||||
|
:param rounding: rounding mark function
|
||||||
|
:return: the mark
|
||||||
|
|
||||||
|
>>> import pandas as pd
|
||||||
|
>>> d = {"Eleve":["E1"]*7,
|
||||||
|
... "score_rate": [1]*7,
|
||||||
|
... "is_leveled":[0]+[1]*6,
|
||||||
|
... "score":[0.33, "", ".", "a", 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> score_config = {
|
||||||
|
... 'BAD': {'value': 0, 'numeric_value': 0},
|
||||||
|
... 'FEW': {'value': 1, 'numeric_value': 1},
|
||||||
|
... 'NEARLY': {'value': 2, 'numeric_value': 2},
|
||||||
|
... 'GOOD': {'value': 3, 'numeric_value': 3},
|
||||||
|
... 'NOTFILLED': {'value': '', 'numeric_value': 'None'},
|
||||||
|
... 'NOANSWER': {'value': '.', 'numeric_value': 0},
|
||||||
|
... 'ABS': {'value': 'a', 'numeric_value': 'None'}
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> df = df[~df.apply(lambda x:is_none_score(x, score_config), axis=1)]
|
||||||
|
>>> df["score"] = df.apply(lambda x:score_to_numeric_score(x, score_config), axis=1)
|
||||||
|
>>> df.apply(lambda x:score_to_mark(x, 3), axis=1)
|
||||||
|
0 0.33
|
||||||
|
2 0.00
|
||||||
|
4 0.33
|
||||||
|
5 0.67
|
||||||
|
6 1.00
|
||||||
|
dtype: float64
|
||||||
|
>>> from .on_value import round_half_point
|
||||||
|
>>> df.apply(lambda x:score_to_mark(x, 3, round_half_point), axis=1)
|
||||||
|
0 0.5
|
||||||
|
2 0.0
|
||||||
|
4 0.5
|
||||||
|
5 0.5
|
||||||
|
6 1.0
|
||||||
|
dtype: float64
|
||||||
|
"""
|
||||||
|
if x["is_leveled"]:
|
||||||
|
if x["score"] not in list(range(score_max + 1)):
|
||||||
|
raise ValueError(f"The evaluation is out of range: {x['score']} at {x}")
|
||||||
|
return rounding(x["score"] * x["score_rate"] / score_max)
|
||||||
|
|
||||||
|
return rounding(x["score"])
|
||||||
|
|
||||||
|
|
||||||
|
def score_to_level(x, level_max=3):
|
||||||
|
"""Compute the level (".",0,1,2,3).
|
||||||
|
|
||||||
|
:param x: dictionnary with "is_leveled", "score" and "score_rate" keys
|
||||||
|
:return: the level
|
||||||
|
|
||||||
|
>>> import pandas as pd
|
||||||
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
|
... "score_rate":[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
|
... "is_leveled":[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||||
|
... "score":[1, 0.33, 0, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> df
|
||||||
|
Eleve score_rate is_leveled score
|
||||||
|
0 E1 1 0 1.000
|
||||||
|
1 E1 1 0 0.330
|
||||||
|
2 E1 2 0 0.000
|
||||||
|
3 E1 2 0 1.500
|
||||||
|
4 E1 2 1 1.000
|
||||||
|
5 E1 2 1 3.000
|
||||||
|
6 E2 1 0 0.666
|
||||||
|
7 E2 1 0 1.000
|
||||||
|
8 E2 2 0 1.500
|
||||||
|
9 E2 2 0 1.000
|
||||||
|
10 E2 2 1 2.000
|
||||||
|
11 E2 2 1 3.000
|
||||||
|
>>> df.apply(score_to_level, axis=1)
|
||||||
|
0 3
|
||||||
|
1 1
|
||||||
|
2 0
|
||||||
|
3 3
|
||||||
|
4 1
|
||||||
|
5 3
|
||||||
|
6 2
|
||||||
|
7 3
|
||||||
|
8 3
|
||||||
|
9 2
|
||||||
|
10 2
|
||||||
|
11 3
|
||||||
|
dtype: int64
|
||||||
|
>>> df.apply(lambda x: score_to_level(x, 5), axis=1)
|
||||||
|
0 5
|
||||||
|
1 2
|
||||||
|
2 0
|
||||||
|
3 4
|
||||||
|
4 1
|
||||||
|
5 3
|
||||||
|
6 4
|
||||||
|
7 5
|
||||||
|
8 4
|
||||||
|
9 3
|
||||||
|
10 2
|
||||||
|
11 3
|
||||||
|
dtype: int64
|
||||||
|
"""
|
||||||
|
if x["is_leveled"]:
|
||||||
|
return int(x["score"])
|
||||||
|
|
||||||
|
if x["score"] > x["score_rate"]:
|
||||||
|
raise ValueError(
|
||||||
|
f"score is higher than score_rate ({x['score']} > {x['score_rate']}) for {x}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return int(ceil(x["score"] / x["score_rate"] * level_max))
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# Reglages pour 'vim'
|
||||||
|
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
||||||
|
# cursor: 16 del
|
40
recopytex/datalib/on_value.py
Normal file
40
recopytex/datalib/on_value.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from math import ceil, floor
|
||||||
|
|
||||||
|
|
||||||
|
def round_with_base(x, base=0.5):
|
||||||
|
"""Round to a multiple of base
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> round_with_base(1.33, 0.1)
|
||||||
|
1.3
|
||||||
|
>>> round_with_base(1.33, 0.2)
|
||||||
|
1.4
|
||||||
|
>>> round_with_base(1.33, 1)
|
||||||
|
1
|
||||||
|
>>> round_with_base(1.33, 2)
|
||||||
|
2
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
prec = len(str(base).split(".")[1])
|
||||||
|
except IndexError:
|
||||||
|
prec = 0
|
||||||
|
return round(base * round(float(x) / base), prec)
|
||||||
|
|
||||||
|
|
||||||
|
def round_half_point(x):
|
||||||
|
"""Round to nearest half point
|
||||||
|
|
||||||
|
:example:
|
||||||
|
>>> round_half_point(1.33)
|
||||||
|
1.5
|
||||||
|
>>> round_half_point(1.1)
|
||||||
|
1.0
|
||||||
|
>>> round_half_point(1.66)
|
||||||
|
1.5
|
||||||
|
>>> round_half_point(1.76)
|
||||||
|
2.0
|
||||||
|
"""
|
||||||
|
return round_with_base(x, base=0.5)
|
@@ -1,219 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
from math import ceil, floor
|
|
||||||
from .config import COLUMNS, VALIDSCORE
|
|
||||||
|
|
||||||
"""
|
|
||||||
Functions for manipulate score dataframes
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def round_half_point(val):
|
|
||||||
try:
|
|
||||||
return 0.5 * ceil(2.0 * val)
|
|
||||||
except ValueError:
|
|
||||||
return val
|
|
||||||
except TypeError:
|
|
||||||
return val
|
|
||||||
|
|
||||||
|
|
||||||
def score_to_mark(x):
|
|
||||||
"""Compute the mark
|
|
||||||
|
|
||||||
if the item is leveled then the score is multiply by the score_rate
|
|
||||||
otherwise it copies the score
|
|
||||||
|
|
||||||
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
|
||||||
:return: the mark
|
|
||||||
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> score_to_mark(df.loc[0])
|
|
||||||
1.0
|
|
||||||
>>> score_to_mark(df.loc[10])
|
|
||||||
1.3333333333333333
|
|
||||||
"""
|
|
||||||
# -1 is no answer
|
|
||||||
if x[COLUMNS["score"]] == -1:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if x[COLUMNS["is_leveled"]]:
|
|
||||||
if x[COLUMNS["score"]] not in [0, 1, 2, 3]:
|
|
||||||
raise ValueError(
|
|
||||||
f"The evaluation is out of range: {x[COLUMNS['score']]} at {x}"
|
|
||||||
)
|
|
||||||
return round_half_point(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3)
|
|
||||||
|
|
||||||
if x[COLUMNS["score"]] > x[COLUMNS["score_rate"]]:
|
|
||||||
raise ValueError(
|
|
||||||
f"The score ({x['score']}) is greated than the rating scale ({x[COLUMNS['score_rate']]}) at {x}"
|
|
||||||
)
|
|
||||||
return x[COLUMNS["score"]]
|
|
||||||
|
|
||||||
|
|
||||||
def score_to_level(x):
|
|
||||||
"""Compute the level (".",0,1,2,3).
|
|
||||||
|
|
||||||
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
|
||||||
:return: the level
|
|
||||||
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... COLUMNS["score"]:[1, 0.33, np.nan, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> score_to_level(df.loc[0])
|
|
||||||
3
|
|
||||||
>>> score_to_level(df.loc[1])
|
|
||||||
1
|
|
||||||
>>> score_to_level(df.loc[2])
|
|
||||||
'na'
|
|
||||||
>>> score_to_level(df.loc[3])
|
|
||||||
3
|
|
||||||
>>> score_to_level(df.loc[5])
|
|
||||||
3
|
|
||||||
>>> score_to_level(df.loc[10])
|
|
||||||
2
|
|
||||||
"""
|
|
||||||
# negatives are no answer or negatives points
|
|
||||||
if x[COLUMNS["score"]] <= -1:
|
|
||||||
return np.nan
|
|
||||||
|
|
||||||
if x[COLUMNS["is_leveled"]]:
|
|
||||||
return int(x[COLUMNS["score"]])
|
|
||||||
|
|
||||||
return int(ceil(x[COLUMNS["score"]] / x[COLUMNS["score_rate"]] * 3))
|
|
||||||
|
|
||||||
|
|
||||||
# DataFrame columns manipulations
|
|
||||||
|
|
||||||
|
|
||||||
def compute_mark(df):
|
|
||||||
"""Compute the mark for the dataframe
|
|
||||||
|
|
||||||
apply score_to_mark to each row
|
|
||||||
|
|
||||||
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
|
||||||
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> compute_mark(df)
|
|
||||||
0 1.00
|
|
||||||
1 0.33
|
|
||||||
2 2.00
|
|
||||||
3 1.50
|
|
||||||
4 0.67
|
|
||||||
5 2.00
|
|
||||||
6 0.67
|
|
||||||
7 1.00
|
|
||||||
8 1.50
|
|
||||||
9 1.00
|
|
||||||
10 1.33
|
|
||||||
11 2.00
|
|
||||||
dtype: float64
|
|
||||||
"""
|
|
||||||
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
|
|
||||||
score_to_mark, axis=1
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def compute_level(df):
|
|
||||||
"""Compute level for the dataframe
|
|
||||||
|
|
||||||
Applies score_to_level to each row
|
|
||||||
|
|
||||||
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
|
||||||
:return: Columns with level
|
|
||||||
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... COLUMNS["score"]:[np.nan, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> compute_level(df)
|
|
||||||
0 na
|
|
||||||
1 1
|
|
||||||
2 3
|
|
||||||
3 3
|
|
||||||
4 1
|
|
||||||
5 3
|
|
||||||
6 2
|
|
||||||
7 3
|
|
||||||
8 3
|
|
||||||
9 2
|
|
||||||
10 2
|
|
||||||
11 3
|
|
||||||
dtype: object
|
|
||||||
"""
|
|
||||||
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
|
|
||||||
score_to_level, axis=1
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def compute_normalized(df):
|
|
||||||
"""Compute the normalized mark (Mark / score_rate)
|
|
||||||
|
|
||||||
:param df: DataFrame with "Mark" and COLUMNS["score_rate"] columns
|
|
||||||
:return: column with normalized mark
|
|
||||||
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> df["Mark"] = compute_marks(df)
|
|
||||||
>>> compute_normalized(df)
|
|
||||||
0 1.00
|
|
||||||
1 0.33
|
|
||||||
2 1.00
|
|
||||||
3 0.75
|
|
||||||
4 0.33
|
|
||||||
5 1.00
|
|
||||||
6 0.67
|
|
||||||
7 1.00
|
|
||||||
8 0.75
|
|
||||||
9 0.50
|
|
||||||
10 0.67
|
|
||||||
11 1.00
|
|
||||||
dtype: float64
|
|
||||||
"""
|
|
||||||
return df[COLUMNS["mark"]] / df[COLUMNS["score_rate"]]
|
|
||||||
|
|
||||||
|
|
||||||
# Postprocessing question scores
|
|
||||||
|
|
||||||
|
|
||||||
def pp_q_scores(df):
|
|
||||||
"""Postprocessing questions scores dataframe
|
|
||||||
|
|
||||||
Add 3 columns: mark, level and normalized
|
|
||||||
|
|
||||||
:param df: questions-scores dataframe
|
|
||||||
:return: same data frame with mark, level and normalize columns
|
|
||||||
"""
|
|
||||||
assign = {
|
|
||||||
COLUMNS["mark"]: compute_mark,
|
|
||||||
COLUMNS["level"]: compute_level,
|
|
||||||
COLUMNS["normalized"]: compute_normalized,
|
|
||||||
}
|
|
||||||
return df.assign(**assign)
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# Reglages pour 'vim'
|
|
||||||
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
|
||||||
# cursor: 16 del
|
|
@@ -1,132 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
from pathlib import Path
|
|
||||||
from prompt_toolkit import HTML
|
|
||||||
import yaml
|
|
||||||
from .getconfig import config
|
|
||||||
|
|
||||||
|
|
||||||
class Exam:
|
|
||||||
def __init__(self, name, tribename, date, term, **kwrds):
|
|
||||||
self._name = name
|
|
||||||
self._tribename = tribename
|
|
||||||
try:
|
|
||||||
self._date = datetime.strptime(date, "%y%m%d")
|
|
||||||
except:
|
|
||||||
self._date = date
|
|
||||||
|
|
||||||
self._term = term
|
|
||||||
|
|
||||||
self._exercises = {}
|
|
||||||
|
|
||||||
@property
|
|
||||||
def name(self):
|
|
||||||
return self._name
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tribename(self):
|
|
||||||
return self._tribename
|
|
||||||
|
|
||||||
@property
|
|
||||||
def date(self):
|
|
||||||
return self._date
|
|
||||||
|
|
||||||
@property
|
|
||||||
def term(self):
|
|
||||||
return self._term
|
|
||||||
|
|
||||||
def add_exercise(self, name, questions):
|
|
||||||
""" Add key with questions in ._exercises """
|
|
||||||
try:
|
|
||||||
self._exercises[name]
|
|
||||||
except KeyError:
|
|
||||||
self._exercises[name] = questions
|
|
||||||
else:
|
|
||||||
raise KeyError("The exercise already exsists. Use modify_exercise")
|
|
||||||
|
|
||||||
def modify_exercise(self, name, questions, append=False):
|
|
||||||
"""Modify questions of an exercise
|
|
||||||
|
|
||||||
If append==True, add questions to the exercise questions
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self._exercises[name]
|
|
||||||
except KeyError:
|
|
||||||
raise KeyError("The exercise already exsists. Use modify_exercise")
|
|
||||||
else:
|
|
||||||
if append:
|
|
||||||
self._exercises[name] += questions
|
|
||||||
else:
|
|
||||||
self._exercises[name] = questions
|
|
||||||
|
|
||||||
@property
|
|
||||||
def exercices(self):
|
|
||||||
return self._exercises
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tribe_path(self):
|
|
||||||
return Path(config["source"]) / self.tribename
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tribe_student_path(self):
|
|
||||||
return (
|
|
||||||
Path(config["source"])
|
|
||||||
/ [t["students"] for t in config["tribes"] if t["name"] == self.tribename][
|
|
||||||
0
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def long_name(self):
|
|
||||||
""" Get exam name with date inside """
|
|
||||||
return f"{self.date.strftime('%y%m%d')}_{self.name}"
|
|
||||||
|
|
||||||
def path(self, extention=""):
|
|
||||||
return self.tribe_path / (self.long_name + extention)
|
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
return {
|
|
||||||
"name": self.name,
|
|
||||||
"tribename": self.tribename,
|
|
||||||
"date": self.date,
|
|
||||||
"term": self.term,
|
|
||||||
"exercices": self.exercices,
|
|
||||||
}
|
|
||||||
|
|
||||||
def to_row(self):
|
|
||||||
rows = []
|
|
||||||
for ex, questions in self.exercices.items():
|
|
||||||
for q in questions:
|
|
||||||
rows.append(
|
|
||||||
{
|
|
||||||
"term": self.term,
|
|
||||||
"assessment": self.name,
|
|
||||||
"date": self.date.strftime("%d/%m/%Y"),
|
|
||||||
"exercise": ex,
|
|
||||||
"question": q["id"],
|
|
||||||
**q,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return rows
|
|
||||||
|
|
||||||
@property
|
|
||||||
def themes(self):
|
|
||||||
themes = set()
|
|
||||||
for questions in self._exercises.values():
|
|
||||||
themes.update([q["theme"] for q in questions])
|
|
||||||
return themes
|
|
||||||
|
|
||||||
def display_exercise(self, name):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def display(self, name):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def write(self):
|
|
||||||
print(f"Sauvegarde temporaire dans {self.path('.yml')}")
|
|
||||||
self.tribe_path.mkdir(exist_ok=True)
|
|
||||||
with open(self.path(".yml"), "w") as f:
|
|
||||||
f.write(yaml.dump(self.to_dict()))
|
|
@@ -1,299 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
import dash
|
|
||||||
import dash_html_components as html
|
|
||||||
import dash_core_components as dcc
|
|
||||||
import dash_table
|
|
||||||
from dash.exceptions import PreventUpdate
|
|
||||||
import plotly.graph_objects as go
|
|
||||||
from pathlib import Path
|
|
||||||
from datetime import datetime
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
from .. import flat_df_students, pp_q_scores
|
|
||||||
from ..config import NO_ST_COLUMNS
|
|
||||||
from .getconfig import config, CONFIGPATH
|
|
||||||
|
|
||||||
COLORS = {
|
|
||||||
".": "black",
|
|
||||||
0: "#E7472B",
|
|
||||||
1: "#FF712B",
|
|
||||||
2: "#F2EC4C",
|
|
||||||
3: "#68D42F",
|
|
||||||
}
|
|
||||||
|
|
||||||
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
|
|
||||||
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
|
|
||||||
# app = dash.Dash(__name__)
|
|
||||||
|
|
||||||
app.layout = html.Div(
|
|
||||||
children=[
|
|
||||||
html.H1("Analyse des notes"),
|
|
||||||
html.Div(
|
|
||||||
[
|
|
||||||
"Classe: ",
|
|
||||||
dcc.Dropdown(
|
|
||||||
id="tribe",
|
|
||||||
options=[
|
|
||||||
{"label": t["name"], "value": t["name"]}
|
|
||||||
for t in config["tribes"]
|
|
||||||
],
|
|
||||||
value=config["tribes"][0]["name"],
|
|
||||||
),
|
|
||||||
"Evaluation: ",
|
|
||||||
dcc.Dropdown(id="csv"),
|
|
||||||
],
|
|
||||||
style={"columnCount": 2},
|
|
||||||
),
|
|
||||||
html.Div(
|
|
||||||
[
|
|
||||||
dash_table.DataTable(
|
|
||||||
id="final_score_table",
|
|
||||||
columns=[
|
|
||||||
{"id": "Élève", "name": "Élève"},
|
|
||||||
{"id": "Note", "name": "Note"},
|
|
||||||
{"id": "Barème", "name": "Bareme"},
|
|
||||||
],
|
|
||||||
data=[],
|
|
||||||
style_data_conditional=[
|
|
||||||
{
|
|
||||||
"if": {"row_index": "odd"},
|
|
||||||
"backgroundColor": "rgb(248, 248, 248)",
|
|
||||||
}
|
|
||||||
],
|
|
||||||
style_header={
|
|
||||||
"backgroundColor": "rgb(230, 230, 230)",
|
|
||||||
"fontWeight": "bold",
|
|
||||||
},
|
|
||||||
style_data={
|
|
||||||
"width": "100px",
|
|
||||||
"maxWidth": "100px",
|
|
||||||
"minWidth": "100px",
|
|
||||||
},
|
|
||||||
),
|
|
||||||
html.Div(
|
|
||||||
[
|
|
||||||
dash_table.DataTable(
|
|
||||||
id="final_score_describe",
|
|
||||||
),
|
|
||||||
dcc.Graph(id="fig_assessment_hist"),
|
|
||||||
dcc.Graph(id="fig_competences"),
|
|
||||||
]
|
|
||||||
),
|
|
||||||
],
|
|
||||||
style={"columnCount": 2},
|
|
||||||
),
|
|
||||||
html.Br(),
|
|
||||||
html.Div(
|
|
||||||
[
|
|
||||||
dash_table.DataTable(
|
|
||||||
id="scores_table",
|
|
||||||
columns=[{"id": c, "name": c} for c in NO_ST_COLUMNS.values()],
|
|
||||||
style_cell={
|
|
||||||
"whiteSpace": "normal",
|
|
||||||
"height": "auto",
|
|
||||||
},
|
|
||||||
style_data_conditional=[],
|
|
||||||
editable=True,
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
html.P(id="lastsave"),
|
|
||||||
dcc.Store(id="final_score"),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("csv", "options"),
|
|
||||||
dash.dependencies.Output("csv", "value"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("tribe", "value")],
|
|
||||||
)
|
|
||||||
def update_csvs(value):
|
|
||||||
if not value:
|
|
||||||
raise PreventUpdate
|
|
||||||
p = Path(value)
|
|
||||||
csvs = list(p.glob("*.csv"))
|
|
||||||
try:
|
|
||||||
return [{"label": str(c), "value": str(c)} for c in csvs], str(csvs[0])
|
|
||||||
except IndexError:
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("final_score", "data"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("scores_table", "data")],
|
|
||||||
)
|
|
||||||
def update_final_scores(data):
|
|
||||||
if not data:
|
|
||||||
raise PreventUpdate
|
|
||||||
try:
|
|
||||||
scores = pd.DataFrame.from_records(data)
|
|
||||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
|
||||||
scores = pp_q_scores(scores)
|
|
||||||
assessment_scores = scores.groupby(["Eleve"]).agg(
|
|
||||||
{"Note": "sum", "Bareme": "sum"}
|
|
||||||
)
|
|
||||||
return [assessment_scores.reset_index().to_dict("records")]
|
|
||||||
except KeyError:
|
|
||||||
raise PreventUpdate
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("final_score_table", "columns"),
|
|
||||||
dash.dependencies.Output("final_score_table", "data"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("final_score", "data")],
|
|
||||||
)
|
|
||||||
def update_final_scores_table(data):
|
|
||||||
assessment_scores = pd.DataFrame.from_records(data)
|
|
||||||
return [
|
|
||||||
{"id": c, "name": c} for c in assessment_scores.columns
|
|
||||||
], assessment_scores.to_dict("records")
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("final_score_describe", "columns"),
|
|
||||||
dash.dependencies.Output("final_score_describe", "data"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("final_score", "data")],
|
|
||||||
)
|
|
||||||
def update_final_scores_descr(data):
|
|
||||||
desc = pd.DataFrame.from_records(data)["Note"].describe()
|
|
||||||
return [{"id": c, "name": c} for c in desc.keys()], [desc.to_dict()]
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("fig_assessment_hist", "figure"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("final_score", "data")],
|
|
||||||
)
|
|
||||||
def update_final_scores_hist(data):
|
|
||||||
assessment_scores = pd.DataFrame.from_records(data)
|
|
||||||
ranges = np.linspace(
|
|
||||||
0, assessment_scores.Bareme.max(), int(assessment_scores.Bareme.max() * 2 + 1)
|
|
||||||
)
|
|
||||||
bins = pd.cut(assessment_scores["Note"], ranges)
|
|
||||||
assessment_scores["Bin"] = bins
|
|
||||||
assessment_grouped = (
|
|
||||||
assessment_scores.reset_index()
|
|
||||||
.groupby("Bin")
|
|
||||||
.agg({"Bareme": "count", "Eleve": lambda x: "\n".join(x)})
|
|
||||||
)
|
|
||||||
assessment_grouped.index = assessment_grouped.index.map(lambda i: i.right)
|
|
||||||
fig = go.Figure()
|
|
||||||
fig.add_bar(
|
|
||||||
x=assessment_grouped.index,
|
|
||||||
y=assessment_grouped.Bareme,
|
|
||||||
text=assessment_grouped.Eleve,
|
|
||||||
textposition="auto",
|
|
||||||
hovertemplate="",
|
|
||||||
marker_color="#4E89DE",
|
|
||||||
)
|
|
||||||
return [fig]
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("fig_competences", "figure"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("scores_table", "data")],
|
|
||||||
)
|
|
||||||
def update_competence_fig(data):
|
|
||||||
scores = pd.DataFrame.from_records(data)
|
|
||||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
|
||||||
scores = pp_q_scores(scores)
|
|
||||||
pt = pd.pivot_table(
|
|
||||||
scores,
|
|
||||||
index=["Exercice", "Question", "Commentaire"],
|
|
||||||
columns="Score",
|
|
||||||
aggfunc="size",
|
|
||||||
fill_value=0,
|
|
||||||
)
|
|
||||||
for i in {i for i in pt.index.get_level_values(0)}:
|
|
||||||
pt.loc[(str(i), "", ""), :] = ""
|
|
||||||
pt.sort_index(inplace=True)
|
|
||||||
index = (
|
|
||||||
pt.index.get_level_values(0)
|
|
||||||
+ ":"
|
|
||||||
+ pt.index.get_level_values(1)
|
|
||||||
+ " "
|
|
||||||
+ pt.index.get_level_values(2)
|
|
||||||
)
|
|
||||||
|
|
||||||
fig = go.Figure()
|
|
||||||
bars = [
|
|
||||||
{"score": -1, "name":"Pas de réponse", "color": COLORS["."]},
|
|
||||||
{"score": 0, "name":"Faut", "color": COLORS[0]},
|
|
||||||
{"score": 1, "name":"Peu juste", "color": COLORS[1]},
|
|
||||||
{"score": 2, "name":"Presque juste", "color": COLORS[2]},
|
|
||||||
{"score": 3, "name":"Juste", "color": COLORS[3]},
|
|
||||||
]
|
|
||||||
for b in bars:
|
|
||||||
try:
|
|
||||||
fig.add_bar(x=index, y=pt[b["score"]], name=b["name"], marker_color=b["color"])
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
fig.update_layout(barmode="relative")
|
|
||||||
return [fig]
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[dash.dependencies.Output("lastsave", "children")],
|
|
||||||
[
|
|
||||||
dash.dependencies.Input("scores_table", "data"),
|
|
||||||
dash.dependencies.State("csv", "value"),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def save_scores(data, csv):
|
|
||||||
scores = pd.DataFrame.from_records(data)
|
|
||||||
print(f"save at {csv} ({datetime.today()})")
|
|
||||||
scores.to_csv(csv, index=False)
|
|
||||||
return [datetime.today()]
|
|
||||||
|
|
||||||
|
|
||||||
def highlight_value(df):
|
|
||||||
""" Cells style """
|
|
||||||
hight = []
|
|
||||||
for v, color in COLORS.items():
|
|
||||||
hight += [
|
|
||||||
{
|
|
||||||
"if": {"filter_query": "{{{}}} = {}".format(col, v), "column_id": col},
|
|
||||||
"backgroundColor": color,
|
|
||||||
"color": "white",
|
|
||||||
}
|
|
||||||
for col in df.columns
|
|
||||||
if col not in NO_ST_COLUMNS.values()
|
|
||||||
]
|
|
||||||
return hight
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
dash.dependencies.Output("scores_table", "columns"),
|
|
||||||
dash.dependencies.Output("scores_table", "data"),
|
|
||||||
dash.dependencies.Output("scores_table", "style_data_conditional"),
|
|
||||||
],
|
|
||||||
[dash.dependencies.Input("csv", "value")],
|
|
||||||
)
|
|
||||||
def update_scores_table(value):
|
|
||||||
if not value:
|
|
||||||
raise PreventUpdate
|
|
||||||
stack = pd.read_csv(value, encoding="UTF8")
|
|
||||||
# try:
|
|
||||||
# stack = stack.drop(columns=["Nom", "Trimestre", "Date", "Competence", "Domaine", "Est_nivele", "Bareme"])
|
|
||||||
# except KeyError:
|
|
||||||
# stack = stack
|
|
||||||
return (
|
|
||||||
[{"id": c, "name": c} for c in stack.columns],
|
|
||||||
stack.to_dict("records"),
|
|
||||||
highlight_value(stack),
|
|
||||||
)
|
|
@@ -1,9 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
CONFIGPATH = "recoconfig.yml"
|
|
||||||
|
|
||||||
with open(CONFIGPATH, "r") as config:
|
|
||||||
config = yaml.load(config, Loader=yaml.FullLoader)
|
|
||||||
|
|
@@ -1,233 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
|
|
||||||
from prompt_toolkit import prompt, HTML, ANSI
|
|
||||||
from prompt_toolkit import print_formatted_text as print
|
|
||||||
from prompt_toolkit.styles import Style
|
|
||||||
from prompt_toolkit.validation import Validator
|
|
||||||
from prompt_toolkit.completion import WordCompleter
|
|
||||||
from unidecode import unidecode
|
|
||||||
from datetime import datetime
|
|
||||||
from functools import wraps
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from .getconfig import config
|
|
||||||
|
|
||||||
|
|
||||||
VALIDATE = [
|
|
||||||
"o",
|
|
||||||
"ok",
|
|
||||||
"OK",
|
|
||||||
"oui",
|
|
||||||
"OUI",
|
|
||||||
"yes",
|
|
||||||
"YES",
|
|
||||||
]
|
|
||||||
REFUSE = ["n", "non", "NON", "no", "NO"]
|
|
||||||
CANCEL = ["a", "annuler"]
|
|
||||||
|
|
||||||
STYLE = Style.from_dict(
|
|
||||||
{
|
|
||||||
"": "#93A1A1",
|
|
||||||
"validation": "#884444",
|
|
||||||
"appending": "#448844",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CancelError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def prompt_validate(question, cancelable=False, empty_means=1, style="validation"):
|
|
||||||
"""Prompt for validation
|
|
||||||
|
|
||||||
:param question: Text to print to ask the question.
|
|
||||||
:param cancelable: enable cancel answer
|
|
||||||
:param empty_means: result for no answer
|
|
||||||
:return:
|
|
||||||
0 -> Refuse
|
|
||||||
1 -> Validate
|
|
||||||
-1 -> cancel
|
|
||||||
"""
|
|
||||||
question_ = question
|
|
||||||
choices = VALIDATE + REFUSE
|
|
||||||
|
|
||||||
if cancelable:
|
|
||||||
question_ += "(a ou annuler pour sortir)"
|
|
||||||
choices += CANCEL
|
|
||||||
|
|
||||||
ans = prompt(
|
|
||||||
[
|
|
||||||
(f"class:{style}", question_),
|
|
||||||
],
|
|
||||||
completer=WordCompleter(choices),
|
|
||||||
style=STYLE,
|
|
||||||
).lower()
|
|
||||||
|
|
||||||
if ans == "":
|
|
||||||
return empty_means
|
|
||||||
if ans in VALIDATE:
|
|
||||||
return 1
|
|
||||||
if cancelable and ans in CANCEL:
|
|
||||||
return -1
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def prompt_until_validate(question="C'est ok? ", cancelable=False):
|
|
||||||
def decorator(func):
|
|
||||||
@wraps(func)
|
|
||||||
def wrapper(*args, **kwrd):
|
|
||||||
ans = func(*args, **kwrd)
|
|
||||||
|
|
||||||
confirm = prompt_validate(question, cancelable)
|
|
||||||
|
|
||||||
if confirm == -1:
|
|
||||||
raise CancelError
|
|
||||||
|
|
||||||
while not confirm:
|
|
||||||
sys.stdout.flush()
|
|
||||||
ans = func(*args, **ans, **kwrd)
|
|
||||||
confirm = prompt_validate(question, cancelable)
|
|
||||||
if confirm == -1:
|
|
||||||
raise CancelError
|
|
||||||
return ans
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
@prompt_until_validate()
|
|
||||||
def prompt_exam(**kwrd):
|
|
||||||
""" Prompt questions to edit an exam """
|
|
||||||
print(HTML("<b>Nouvelle évaluation</b>"))
|
|
||||||
exam = {}
|
|
||||||
exam["name"] = prompt("Nom de l'évaluation: ", default=kwrd.get("name", "DS"))
|
|
||||||
|
|
||||||
tribes_name = [t["name"] for t in config["tribes"]]
|
|
||||||
|
|
||||||
exam["tribename"] = prompt(
|
|
||||||
"Nom de la classe: ",
|
|
||||||
default=kwrd.get("tribename", ""),
|
|
||||||
completer=WordCompleter(tribes_name),
|
|
||||||
validator=Validator.from_callable(lambda x: x in tribes_name),
|
|
||||||
)
|
|
||||||
exam["tribe"] = [t for t in config["tribes"] if t["name"] == exam["tribename"]][0]
|
|
||||||
|
|
||||||
exam["date"] = prompt(
|
|
||||||
"Date de l'évaluation (%y%m%d): ",
|
|
||||||
default=kwrd.get("date", datetime.today()).strftime("%y%m%d"),
|
|
||||||
validator=Validator.from_callable(lambda x: (len(x) == 6) and x.isdigit()),
|
|
||||||
)
|
|
||||||
exam["date"] = datetime.strptime(exam["date"], "%y%m%d")
|
|
||||||
|
|
||||||
exam["term"] = prompt(
|
|
||||||
"Trimestre: ",
|
|
||||||
validator=Validator.from_callable(lambda x: x.isdigit()),
|
|
||||||
default=kwrd.get("term", "1"),
|
|
||||||
)
|
|
||||||
|
|
||||||
return exam
|
|
||||||
|
|
||||||
|
|
||||||
@prompt_until_validate()
|
|
||||||
def prompt_exercise(number=1, completer={}, **kwrd):
|
|
||||||
exercise = {}
|
|
||||||
try:
|
|
||||||
kwrd["name"]
|
|
||||||
except KeyError:
|
|
||||||
print(HTML("<b>Nouvel exercice</b>"))
|
|
||||||
exercise["name"] = prompt(
|
|
||||||
"Nom de l'exercice: ", default=kwrd.get("name", f"Exercice {number}")
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
print(HTML(f"<b>Modification de l'exercice: {kwrd['name']}</b>"))
|
|
||||||
exercise["name"] = kwrd["name"]
|
|
||||||
|
|
||||||
exercise["questions"] = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
kwrd["questions"][0]
|
|
||||||
except KeyError:
|
|
||||||
last_question_id = "1a"
|
|
||||||
except IndexError:
|
|
||||||
last_question_id = "1a"
|
|
||||||
else:
|
|
||||||
for ques in kwrd["questions"]:
|
|
||||||
try:
|
|
||||||
exercise["questions"].append(
|
|
||||||
prompt_question(completer=completer, **ques)
|
|
||||||
)
|
|
||||||
except CancelError:
|
|
||||||
print("Cette question a été supprimée")
|
|
||||||
last_question_id = exercise["questions"][-1]["id"]
|
|
||||||
|
|
||||||
appending = prompt_validate(
|
|
||||||
question="Ajouter un élément de notation? ", style="appending"
|
|
||||||
)
|
|
||||||
while appending:
|
|
||||||
try:
|
|
||||||
exercise["questions"].append(
|
|
||||||
prompt_question(last_question_id, completer=completer)
|
|
||||||
)
|
|
||||||
except CancelError:
|
|
||||||
print("Cette question a été supprimée")
|
|
||||||
else:
|
|
||||||
last_question_id = exercise["questions"][-1]["id"]
|
|
||||||
appending = prompt_validate(
|
|
||||||
question="Ajouter un élément de notation? ", style="appending"
|
|
||||||
)
|
|
||||||
|
|
||||||
return exercise
|
|
||||||
|
|
||||||
|
|
||||||
@prompt_until_validate(cancelable=True)
|
|
||||||
def prompt_question(last_question_id="1a", completer={}, **kwrd):
|
|
||||||
try:
|
|
||||||
kwrd["id"]
|
|
||||||
except KeyError:
|
|
||||||
print(HTML("<b>Nouvel élément de notation</b>"))
|
|
||||||
else:
|
|
||||||
print(
|
|
||||||
HTML(f"<b>Modification de l'élément {kwrd['id']} ({kwrd['comment']})</b>")
|
|
||||||
)
|
|
||||||
|
|
||||||
question = {}
|
|
||||||
question["id"] = prompt(
|
|
||||||
"Identifiant de la question: ",
|
|
||||||
default=kwrd.get("id", "1a"),
|
|
||||||
)
|
|
||||||
|
|
||||||
question["competence"] = prompt(
|
|
||||||
"Competence: ",
|
|
||||||
default=kwrd.get("competence", list(config["competences"].keys())[0]),
|
|
||||||
completer=WordCompleter(config["competences"].keys()),
|
|
||||||
validator=Validator.from_callable(lambda x: x in config["competences"].keys()),
|
|
||||||
)
|
|
||||||
|
|
||||||
question["theme"] = prompt(
|
|
||||||
"Domaine: ",
|
|
||||||
default=kwrd.get("theme", ""),
|
|
||||||
completer=WordCompleter(completer.get("theme", [])),
|
|
||||||
)
|
|
||||||
|
|
||||||
question["comment"] = prompt(
|
|
||||||
"Commentaire: ",
|
|
||||||
default=kwrd.get("comment", ""),
|
|
||||||
)
|
|
||||||
|
|
||||||
question["is_leveled"] = prompt(
|
|
||||||
"Évaluation par niveau: ",
|
|
||||||
default=kwrd.get("is_leveled", "1"),
|
|
||||||
# validate
|
|
||||||
)
|
|
||||||
|
|
||||||
question["score_rate"] = prompt(
|
|
||||||
"Barème: ",
|
|
||||||
default=kwrd.get("score_rate", "1"),
|
|
||||||
# validate
|
|
||||||
)
|
|
||||||
|
|
||||||
return question
|
|
@@ -2,19 +2,7 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
|
||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from recopytex.dashboard.index import app as dash
|
||||||
import sys
|
|
||||||
import papermill as pm
|
|
||||||
import pandas as pd
|
|
||||||
from datetime import datetime
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from .getconfig import config, CONFIGPATH
|
|
||||||
from .prompts import prompt_exam, prompt_exercise, prompt_validate
|
|
||||||
from ..config import NO_ST_COLUMNS
|
|
||||||
from .exam import Exam
|
|
||||||
from .exam_dash import app as exam_app
|
|
||||||
|
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
def cli():
|
def cli():
|
||||||
@@ -22,110 +10,9 @@ def cli():
|
|||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
def print_config():
|
@click.option("--debug", default=0, help="Debug mode for dash")
|
||||||
click.echo(f"Config file is {CONFIGPATH}")
|
def dashboard(debug):
|
||||||
click.echo("It contains")
|
dash.run_server(debug=bool(debug))
|
||||||
click.echo(config)
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
@cli.command()
|
cli()
|
||||||
def setup():
|
|
||||||
"""Setup the environnement using recoconfig.yml"""
|
|
||||||
for tribe in config["tribes"]:
|
|
||||||
Path(tribe["name"]).mkdir(exist_ok=True)
|
|
||||||
if not Path(tribe["students"]).exists():
|
|
||||||
print(f"The file {tribe['students']} does not exists")
|
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
|
||||||
def new_exam():
|
|
||||||
""" Create new exam csv file """
|
|
||||||
exam = Exam(**prompt_exam())
|
|
||||||
|
|
||||||
if exam.path(".yml").exists():
|
|
||||||
print(f"Fichier sauvegarde trouvé à {exam.path('.yml')} -- importation")
|
|
||||||
with open(exam.path(".yml"), "r") as f:
|
|
||||||
for name, questions in yaml.load(f, Loader=yaml.SafeLoader)[
|
|
||||||
"exercices"
|
|
||||||
].items():
|
|
||||||
exam.add_exercise(name, questions)
|
|
||||||
|
|
||||||
print(exam.themes)
|
|
||||||
# print(yaml.dump(exam.to_dict()))
|
|
||||||
|
|
||||||
exam.write()
|
|
||||||
|
|
||||||
for name, questions in exam.exercices.items():
|
|
||||||
exam.modify_exercise(
|
|
||||||
**prompt_exercise(
|
|
||||||
name=name, completer={"theme": exam.themes}, questions=questions
|
|
||||||
)
|
|
||||||
)
|
|
||||||
exam.write()
|
|
||||||
|
|
||||||
new_exercise = prompt_validate("Ajouter un exercice? ")
|
|
||||||
while new_exercise:
|
|
||||||
exam.add_exercise(
|
|
||||||
**prompt_exercise(len(exam.exercices) + 1, completer={"theme": exam.themes})
|
|
||||||
)
|
|
||||||
exam.write()
|
|
||||||
new_exercise = prompt_validate("Ajouter un exercice? ")
|
|
||||||
|
|
||||||
rows = exam.to_row()
|
|
||||||
|
|
||||||
base_df = pd.DataFrame.from_dict(rows)[NO_ST_COLUMNS.keys()]
|
|
||||||
base_df.rename(columns=NO_ST_COLUMNS, inplace=True)
|
|
||||||
|
|
||||||
students = pd.read_csv(exam.tribe_student_path)["Nom"]
|
|
||||||
for student in students:
|
|
||||||
base_df[student] = ""
|
|
||||||
|
|
||||||
exam.tribe_path.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
base_df.to_csv(exam.path(".csv"), index=False)
|
|
||||||
print(f"Le fichier note a été enregistré à {exam.path('.csv')}")
|
|
||||||
|
|
||||||
@cli.command()
|
|
||||||
def exam_analysis():
|
|
||||||
exam_app.run_server(debug=True)
|
|
||||||
|
|
||||||
@cli.command()
|
|
||||||
@click.argument("csv_file")
|
|
||||||
def report(csv_file):
|
|
||||||
csv = Path(csv_file)
|
|
||||||
if not csv.exists():
|
|
||||||
click.echo(f"{csv_file} does not exists")
|
|
||||||
sys.exit(1)
|
|
||||||
if csv.suffix != ".csv":
|
|
||||||
click.echo(f"{csv_file} has to be a csv file")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
csv_file = Path(csv_file)
|
|
||||||
tribe_dir = csv_file.parent
|
|
||||||
csv_filename = csv_file.name.split(".")[0]
|
|
||||||
|
|
||||||
assessment = str(csv_filename).split("_")[-1].capitalize()
|
|
||||||
date = str(csv_filename).split("_")[0]
|
|
||||||
try:
|
|
||||||
date = datetime.strptime(date, "%y%m%d")
|
|
||||||
except ValueError:
|
|
||||||
date = None
|
|
||||||
|
|
||||||
tribe = str(tribe_dir).split("/")[-1]
|
|
||||||
|
|
||||||
template = Path(config["templates"]) / "tpl_evaluation.ipynb"
|
|
||||||
|
|
||||||
dest = Path(config["output"]) / tribe / csv_filename
|
|
||||||
dest.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
click.echo(f"Building {assessment} ({date:%d/%m/%y}) report")
|
|
||||||
pm.execute_notebook(
|
|
||||||
str(template),
|
|
||||||
str(dest / f"{assessment}.ipynb"),
|
|
||||||
parameters=dict(
|
|
||||||
tribe=tribe,
|
|
||||||
assessment=assessment,
|
|
||||||
date=f"{date:%d/%m/%y}",
|
|
||||||
csv_file=str(csv_file.absolute()),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
13
test_config.yml
Normal file
13
test_config.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
source: ./example
|
||||||
|
output: ./output
|
||||||
|
templates: templates/
|
||||||
|
|
||||||
|
tribes:
|
||||||
|
Tribe1:
|
||||||
|
name: Tribe1
|
||||||
|
type: Type1
|
||||||
|
students: tribe1.csv
|
||||||
|
Tribe2:
|
||||||
|
name: Tribe2
|
||||||
|
students: tribe2.csv
|
Reference in New Issue
Block a user