Compare commits
43 Commits
Author | SHA1 | Date | |
---|---|---|---|
83eb9c327b | |||
ff1ecfef25 | |||
921292a0d2 | |||
2d08671247 | |||
a16211cbd4 | |||
876f583d51 | |||
97b97af2de | |||
d8d84690c6 | |||
18f855ab83 | |||
36425e587e | |||
8cdeecfc53 | |||
1a7c97d869 | |||
ab5de2711e | |||
235019102b | |||
8ec24a24b3 | |||
2e86b3a0a2 | |||
7e6b24aaea | |||
6889ddd97c | |||
10b9954c05 | |||
7553628306 | |||
effc049578 | |||
411f910ce6 | |||
00d81d694a | |||
a8b2ac455d | |||
9e0ea14d05 | |||
2031ade1ab | |||
6ed55c07d4 | |||
1d234ea5fc | |||
646314ad88 | |||
0739cfdae7 | |||
a50901556e | |||
abc5513268 | |||
598086ddb0 | |||
94f8080acd | |||
6331573940 | |||
a292fe23e0 | |||
3005d49a1d | |||
9fce390304 | |||
94f94dae84 | |||
596a52b1d0 | |||
37c95d75de | |||
bd91bf51d6 | |||
c1fd060707 |
5
example/Tribe1/210112_DS.csv
Normal file
5
example/Tribe1/210112_DS.csv
Normal file
@@ -0,0 +1,5 @@
|
||||
Trimestre,Nom,Date,Exercice,Question,Competence,Domaine,Commentaire,Bareme,Est_nivele,Star Tice,Umberto Dingate,Starlin Crangle,Humbert Bourcq,Gabriella Handyside,Stewart Eaves,Erick Going,Ase Praton,Rollins Planks,Dunstan Sarjant,Stacy Guiton,Ange Stanes,Amabelle Elleton,Darn Broomhall,Dyan Chatto,Keane Rennebach,Nari Paulton,Brandy Wase,Jaclyn Firidolfi,Violette Lockney
|
||||
1,DS,12/01/2021,Exercice 1,1,Calculer,Plop,Coucou,1,1,,,1,0,1,2,3,0,3,3,2,,1,,,,,,,
|
||||
1,DS,12/01/2021,Exercice 1,2,Calculer,C'est trop chouette!,Coucou,1,1,,,1,2,,,3,3,,,,,2,,,,,,,
|
||||
1,DS,12/01/2021,Exercice 1,3,Calculer,Null,Coucou,1,1,,,,3,2,,,,,,,,3,,,,,,,
|
||||
1,DS,12/01/2021,Exercice 1,3,Calculer,Nié,DChic,1,1,,,,2,.,,,,,,,,,,,,,,,
|
|
5
example/Tribe1/210122_DS6.csv
Normal file
5
example/Tribe1/210122_DS6.csv
Normal file
@@ -0,0 +1,5 @@
|
||||
Trimestre,Nom,Date,Exercice,Question,Competence,Domaine,Commentaire,Bareme,Est_nivele,Star Tice,Umberto Dingate,Starlin Crangle,Humbert Bourcq,Gabriella Handyside,Stewart Eaves,Erick Going,Ase Praton,Rollins Planks,Dunstan Sarjant,Stacy Guiton,Ange Stanes,Amabelle Elleton,Darn Broomhall,Dyan Chatto,Keane Rennebach,Nari Paulton,Brandy Wase,Jaclyn Firidolfi,Violette Lockney
|
||||
1,DS6,22/01/2021,Exercice 1,Sait pas,,,,,,,,,,,,,,,,,,,,,,,,,
|
||||
1,DS6,22/01/2021,Exercice 1,Ha,,,,,,,,,,,,,,,,,,,,,,,,,
|
||||
1,DS6,22/01/2021,Exercice 1,,,,,,,,,,,,,,,,,,,,,,,,,,
|
||||
1,DS6,22/01/2021,Exercice 2,grr,,,,,,,,,,,,,,,,,,,,,,,,,
|
|
@@ -3,30 +3,11 @@ source: ./
|
||||
output: ./
|
||||
templates: templates/
|
||||
|
||||
competences:
|
||||
Chercher:
|
||||
name: Chercher
|
||||
abrv: Cher
|
||||
Représenter:
|
||||
name: Représenter
|
||||
abrv: Rep
|
||||
Modéliser:
|
||||
name: Modéliser
|
||||
abrv: Mod
|
||||
Raisonner:
|
||||
name: Raisonner
|
||||
abrv: Rai
|
||||
Calculer:
|
||||
name: Calculer
|
||||
abrv: Cal
|
||||
Communiquer:
|
||||
name: Communiquer
|
||||
abrv: Com
|
||||
|
||||
|
||||
tribes:
|
||||
- name: Tribe1
|
||||
type: Type1
|
||||
students: tribe1.csv
|
||||
- name: Tribe2
|
||||
students: tribe2.csv
|
||||
Tribe1:
|
||||
name: Tribe1
|
||||
type: Type1
|
||||
students: tribe1.csv
|
||||
Tribe2:
|
||||
name: Tribe2
|
||||
students: tribe2.csv
|
||||
|
@@ -1,4 +0,0 @@
|
||||
---
|
||||
source: sheets/
|
||||
output: reports/
|
||||
templates: templates/
|
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from .csv_extraction import flat_df_students, flat_df_for
|
||||
from .df_marks_manip import pp_q_scores
|
||||
|
@@ -1,30 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
NO_ST_COLUMNS = {
|
||||
"assessment": "Nom",
|
||||
"term": "Trimestre",
|
||||
"date": "Date",
|
||||
"exercise": "Exercice",
|
||||
"question": "Question",
|
||||
"competence": "Competence",
|
||||
"theme": "Domaine",
|
||||
"comment": "Commentaire",
|
||||
"is_leveled": "Est_nivele",
|
||||
"score_rate": "Bareme",
|
||||
}
|
||||
|
||||
COLUMNS = {
|
||||
**NO_ST_COLUMNS,
|
||||
"student": "Eleve",
|
||||
"score": "Score",
|
||||
"mark": "Note",
|
||||
"level": "Niveau",
|
||||
"normalized": "Normalise",
|
||||
}
|
||||
|
||||
VALIDSCORE = {
|
||||
"NOTFILLED": "", # The item is not scored yet
|
||||
"NOANSWER": ".", # Student gives no answer (this score will impact the fianl mark)
|
||||
"ABS": "a", # Student has absent (this score won't be impact the final mark)
|
||||
}
|
@@ -1,119 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
""" Extracting data from xlsx files """
|
||||
|
||||
import pandas as pd
|
||||
from .config import NO_ST_COLUMNS, COLUMNS, VALIDSCORE
|
||||
|
||||
pd.set_option("Precision", 2)
|
||||
|
||||
|
||||
def try_replace(x, old, new):
|
||||
try:
|
||||
return str(x).replace(old, new)
|
||||
except ValueError:
|
||||
return x
|
||||
|
||||
|
||||
def extract_students(df, no_student_columns=NO_ST_COLUMNS.values()):
|
||||
"""Extract the list of students from df
|
||||
|
||||
:param df: the dataframe
|
||||
:param no_student_columns: columns that are not students
|
||||
:return: list of students
|
||||
"""
|
||||
students = df.columns.difference(no_student_columns)
|
||||
return students
|
||||
|
||||
|
||||
def flat_df_students(
|
||||
df, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
||||
):
|
||||
"""Flat the dataframe by returning a dataframe with on student on each line
|
||||
|
||||
:param df: the dataframe (one row per questions)
|
||||
:param no_student_columns: columns that are not students
|
||||
:return: dataframe with one row per questions and students
|
||||
|
||||
Columns of csv files:
|
||||
|
||||
- NO_ST_COLUMNS meta data on questions
|
||||
- one for each students
|
||||
|
||||
This function flat student's columns to "student" and "score"
|
||||
"""
|
||||
students = extract_students(df, no_student_columns)
|
||||
scores = []
|
||||
for st in students:
|
||||
scores.append(
|
||||
pd.melt(
|
||||
df,
|
||||
id_vars=no_student_columns,
|
||||
value_vars=st,
|
||||
var_name=COLUMNS["student"],
|
||||
value_name=COLUMNS["score"],
|
||||
).dropna(subset=[COLUMNS["score"]])
|
||||
)
|
||||
if postprocessing:
|
||||
return postprocess(pd.concat(scores))
|
||||
return pd.concat(scores)
|
||||
|
||||
|
||||
def flat_df_for(
|
||||
df, student, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
||||
):
|
||||
"""Extract the data only for one student
|
||||
|
||||
:param df: the dataframe (one row per questions)
|
||||
:param no_student_columns: columns that are not students
|
||||
:return: dataframe with one row per questions and students
|
||||
|
||||
Columns of csv files:
|
||||
|
||||
- NO_ST_COLUMNS meta data on questions
|
||||
- one for each students
|
||||
|
||||
"""
|
||||
students = extract_students(df, no_student_columns)
|
||||
if student not in students:
|
||||
raise KeyError("This student is not in the table")
|
||||
st_df = df[list(no_student_columns) + [student]]
|
||||
st_df = st_df.rename(columns={student: COLUMNS["score"]}).dropna(
|
||||
subset=[COLUMNS["score"]]
|
||||
)
|
||||
if postprocessing:
|
||||
return postprocess(st_df)
|
||||
return st_df
|
||||
|
||||
|
||||
def postprocess(df):
|
||||
"""Postprocessing score dataframe
|
||||
|
||||
- Replace na with an empty string
|
||||
- Replace "NOANSWER" with -1
|
||||
- Turn commas number to dot numbers
|
||||
"""
|
||||
|
||||
df[COLUMNS["question"]].fillna("", inplace=True)
|
||||
df[COLUMNS["exercise"]].fillna("", inplace=True)
|
||||
df[COLUMNS["comment"]].fillna("", inplace=True)
|
||||
df[COLUMNS["competence"]].fillna("", inplace=True)
|
||||
|
||||
df[COLUMNS["score"]] = pd.to_numeric(
|
||||
df[COLUMNS["score"]]
|
||||
.replace(VALIDSCORE["NOANSWER"], -1)
|
||||
.apply(lambda x: try_replace(x, ",", "."))
|
||||
)
|
||||
df[COLUMNS["score_rate"]] = pd.to_numeric(
|
||||
df[COLUMNS["score_rate"]].apply(lambda x: try_replace(x, ",", ".")),
|
||||
errors="coerce",
|
||||
)
|
||||
|
||||
return df
|
||||
|
||||
|
||||
# -----------------------------
|
||||
# Reglages pour 'vim'
|
||||
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
||||
# cursor: 16 del
|
@@ -1,5 +1,20 @@
|
||||
import dash
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
app = dash.Dash(__name__, suppress_callback_exceptions=True)
|
||||
# app = dash.Dash(__name__)
|
||||
import dash
|
||||
import flask
|
||||
|
||||
from .layout.layout import layout
|
||||
|
||||
server = flask.Flask(__name__)
|
||||
app = dash.Dash(
|
||||
__name__,
|
||||
server=server,
|
||||
suppress_callback_exceptions=True,
|
||||
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
|
||||
)
|
||||
|
||||
|
||||
app.layout = layout
|
||||
server = app.server
|
||||
|
||||
|
@@ -1,66 +0,0 @@
|
||||
body {
|
||||
margin: 0px;
|
||||
font-family: 'Source Sans Pro','Roboto','Open Sans','Liberation Sans','DejaVu Sans','Verdana','Helvetica','Arial',sans-serif;
|
||||
}
|
||||
|
||||
header {
|
||||
margin: 0px 0px 20px 0px;
|
||||
background-color: #333333;
|
||||
color: #ffffff;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
header > h1 {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
main {
|
||||
width: 95vw;
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
section {
|
||||
margin-top: 20px;
|
||||
margin-bottom: 20px;
|
||||
|
||||
}
|
||||
|
||||
/* Exam analysis */
|
||||
|
||||
#select {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
#select > div {
|
||||
width: 40vw;
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
#analysis {
|
||||
display: flex;
|
||||
flex-flow: row wrap;
|
||||
}
|
||||
|
||||
#analysis > * {
|
||||
display: flex;
|
||||
flex-flow: column;
|
||||
width: 45vw;
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
/* Create new exam */
|
||||
|
||||
#new-exam {
|
||||
display: flex;
|
||||
flex-flow: row;
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
#new-exam label {
|
||||
width: 20%;
|
||||
display: flex;
|
||||
flex-flow: column;
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
|
23
recopytex/dashboard/common/formating.py
Normal file
23
recopytex/dashboard/common/formating.py
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
|
||||
def highlight_scores(highlight_columns, score_color):
|
||||
""" Cells style in a datatable for scores
|
||||
|
||||
:param highlight_columns: columns to highlight
|
||||
:param value_color: dictionnary {"score": "color"}
|
||||
|
||||
"""
|
||||
hight = []
|
||||
for v, color in score_color.items():
|
||||
if v:
|
||||
hight += [
|
||||
{
|
||||
"if": {"filter_query": "{{{}}} = {}".format(col, v), "column_id": col},
|
||||
"backgroundColor": color,
|
||||
"color": "white",
|
||||
}
|
||||
for col in highlight_columns
|
||||
]
|
||||
return hight
|
@@ -1,355 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import dash
|
||||
import dash_html_components as html
|
||||
import dash_core_components as dcc
|
||||
import dash_table
|
||||
import plotly.graph_objects as go
|
||||
from datetime import date, datetime
|
||||
import uuid
|
||||
import pandas as pd
|
||||
import yaml
|
||||
|
||||
from ...scripts.getconfig import config
|
||||
from ...config import NO_ST_COLUMNS
|
||||
from ..app import app
|
||||
from ...scripts.exam import Exam
|
||||
|
||||
QUESTION_COLUMNS = [
|
||||
{"id": "id", "name": "Question"},
|
||||
{
|
||||
"id": "competence",
|
||||
"name": "Competence",
|
||||
"presentation": "dropdown",
|
||||
},
|
||||
{"id": "theme", "name": "Domaine"},
|
||||
{"id": "comment", "name": "Commentaire"},
|
||||
{"id": "score_rate", "name": "Bareme"},
|
||||
{"id": "is_leveled", "name": "Est_nivele"},
|
||||
]
|
||||
|
||||
|
||||
def get_current_year_limit():
|
||||
today = date.today()
|
||||
if today.month > 8:
|
||||
return {
|
||||
"min_date_allowed": date(today.year, 9, 1),
|
||||
"max_date_allowed": date(today.year + 1, 7, 15),
|
||||
"initial_visible_month": today,
|
||||
}
|
||||
|
||||
return {
|
||||
"min_date_allowed": date(today.year - 1, 9, 1),
|
||||
"max_date_allowed": date(today.year, 7, 15),
|
||||
"initial_visible_month": today,
|
||||
}
|
||||
|
||||
|
||||
layout = html.Div(
|
||||
[
|
||||
html.Header(
|
||||
children=[
|
||||
html.H1("Création d'une évaluation"),
|
||||
html.P("Pas encore de sauvegarde", id="is-saved"),
|
||||
html.Button("Enregistrer dans csv", id="save-csv"),
|
||||
],
|
||||
),
|
||||
html.Main(
|
||||
children=[
|
||||
html.Section(
|
||||
children=[
|
||||
html.Form(
|
||||
id="new-exam",
|
||||
children=[
|
||||
html.Label(
|
||||
children=[
|
||||
"Classe",
|
||||
dcc.Dropdown(
|
||||
id="tribe",
|
||||
options=[
|
||||
{"label": t["name"], "value": t["name"]}
|
||||
for t in config["tribes"]
|
||||
],
|
||||
value=config["tribes"][0]["name"],
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Nom de l'évaluation",
|
||||
dcc.Input(
|
||||
id="exam_name",
|
||||
type="text",
|
||||
placeholder="Nom de l'évaluation",
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Date",
|
||||
dcc.DatePickerSingle(
|
||||
id="date",
|
||||
date=date.today(),
|
||||
**get_current_year_limit(),
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Trimestre",
|
||||
dcc.Dropdown(
|
||||
id="term",
|
||||
options=[
|
||||
{"label": i + 1, "value": i + 1}
|
||||
for i in range(3)
|
||||
],
|
||||
value=1,
|
||||
),
|
||||
]
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
id="form",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
html.Div(
|
||||
id="exercises",
|
||||
children=[],
|
||||
),
|
||||
html.Button(
|
||||
"Ajouter un exercice",
|
||||
id="add-exercise",
|
||||
className="add-exercise",
|
||||
),
|
||||
html.Div(
|
||||
id="summary",
|
||||
),
|
||||
],
|
||||
id="exercises",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
html.Div(
|
||||
id="score_rate",
|
||||
),
|
||||
html.Div(
|
||||
id="exercises-viz",
|
||||
),
|
||||
html.Div(
|
||||
id="competences-viz",
|
||||
),
|
||||
html.Div(
|
||||
id="themes-viz",
|
||||
),
|
||||
],
|
||||
id="visualisation",
|
||||
),
|
||||
]
|
||||
),
|
||||
dcc.Store(id="exam_store"),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("exercises", "children"),
|
||||
dash.dependencies.Input("add-exercise", "n_clicks"),
|
||||
dash.dependencies.State("exercises", "children"),
|
||||
)
|
||||
def add_exercise(n_clicks, children):
|
||||
if n_clicks is None:
|
||||
return children
|
||||
element_table = pd.DataFrame(columns=[c["id"] for c in QUESTION_COLUMNS])
|
||||
element_table = element_table.append(
|
||||
pd.Series(
|
||||
data={
|
||||
"id": 1,
|
||||
"competence": "Rechercher",
|
||||
"theme": "",
|
||||
"comment": "",
|
||||
"score_rate": 1,
|
||||
"is_leveled": 1,
|
||||
},
|
||||
name=0,
|
||||
)
|
||||
)
|
||||
new_exercise = html.Div(
|
||||
children=[
|
||||
html.Div(
|
||||
children=[
|
||||
dcc.Input(
|
||||
id={"type": "exercice", "index": str(n_clicks)},
|
||||
type="text",
|
||||
value=f"Exercice {len(children)+1}",
|
||||
placeholder="Nom de l'exercice",
|
||||
className="exercise-name",
|
||||
),
|
||||
html.Button(
|
||||
"X",
|
||||
id={"type": "rm_exercice", "index": str(n_clicks)},
|
||||
className="delete-exercise",
|
||||
),
|
||||
],
|
||||
className="exercise-head",
|
||||
),
|
||||
dash_table.DataTable(
|
||||
id={"type": "elements", "index": str(n_clicks)},
|
||||
columns=QUESTION_COLUMNS,
|
||||
data=element_table.to_dict("records"),
|
||||
editable=True,
|
||||
row_deletable=True,
|
||||
dropdown={
|
||||
"competence": {
|
||||
"options": [
|
||||
{"label": i, "value": i} for i in config["competences"]
|
||||
]
|
||||
},
|
||||
},
|
||||
style_cell={
|
||||
"whiteSpace": "normal",
|
||||
"height": "auto",
|
||||
},
|
||||
),
|
||||
html.Button(
|
||||
"Ajouter un élément de notation",
|
||||
id={"type": "add-element", "index": str(n_clicks)},
|
||||
className="add-element",
|
||||
),
|
||||
],
|
||||
className="exercise",
|
||||
id=f"exercise-{n_clicks}",
|
||||
)
|
||||
children.append(new_exercise)
|
||||
return children
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output(
|
||||
{"type": "elements", "index": dash.dependencies.MATCH}, "data"
|
||||
),
|
||||
dash.dependencies.Input(
|
||||
{"type": "add-element", "index": dash.dependencies.MATCH}, "n_clicks"
|
||||
),
|
||||
[
|
||||
dash.dependencies.State(
|
||||
{"type": "elements", "index": dash.dependencies.MATCH}, "data"
|
||||
),
|
||||
],
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def add_element(n_clicks, elements):
|
||||
if n_clicks is None or n_clicks < len(elements):
|
||||
return elements
|
||||
|
||||
df = pd.DataFrame.from_records(elements)
|
||||
df = df.append(
|
||||
pd.Series(
|
||||
data={
|
||||
"id": len(df) + 1,
|
||||
"competence": "",
|
||||
"theme": "",
|
||||
"comment": "",
|
||||
"score_rate": 1,
|
||||
"is_leveled": 1,
|
||||
},
|
||||
name=n_clicks,
|
||||
)
|
||||
)
|
||||
return df.to_dict("records")
|
||||
|
||||
|
||||
def exam_generalities(tribe, exam_name, date, term, exercices=[], elements=[]):
|
||||
return [
|
||||
html.H1(f"{exam_name} pour les {tribe}"),
|
||||
html.P(f"Fait le {date} (Trimestre {term})"),
|
||||
]
|
||||
|
||||
|
||||
def exercise_summary(identifier, name, elements=[]):
|
||||
df = pd.DataFrame.from_records(elements)
|
||||
return html.Div(
|
||||
[
|
||||
html.H2(name),
|
||||
dash_table.DataTable(
|
||||
columns=[{"id": c, "name": c} for c in df], data=elements
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("exam_store", "data"),
|
||||
[
|
||||
dash.dependencies.Input("tribe", "value"),
|
||||
dash.dependencies.Input("exam_name", "value"),
|
||||
dash.dependencies.Input("date", "date"),
|
||||
dash.dependencies.Input("term", "value"),
|
||||
dash.dependencies.Input(
|
||||
{"type": "exercice", "index": dash.dependencies.ALL}, "value"
|
||||
),
|
||||
dash.dependencies.Input(
|
||||
{"type": "elements", "index": dash.dependencies.ALL}, "data"
|
||||
),
|
||||
],
|
||||
dash.dependencies.State({"type": "elements", "index": dash.dependencies.ALL}, "id"),
|
||||
)
|
||||
def store_exam(tribe, exam_name, date, term, exercices, elements, elements_id):
|
||||
exam = Exam(exam_name, tribe, date, term)
|
||||
for (i, name) in enumerate(exercices):
|
||||
ex_elements_id = [el for el in elements_id if el["index"] == str(i + 1)][0]
|
||||
index = elements_id.index(ex_elements_id)
|
||||
ex_elements = elements[index]
|
||||
exam.add_exercise(name, ex_elements)
|
||||
|
||||
return exam.to_dict()
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("score_rate", "children"),
|
||||
dash.dependencies.Input("exam_store", "data"),
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def score_rate(data):
|
||||
exam = Exam(**data)
|
||||
return [html.P(f"Barème /{exam.score_rate}")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("competences-viz", "figure"),
|
||||
dash.dependencies.Input("exam_store", "data"),
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def competences_viz(data):
|
||||
exam = Exam(**data)
|
||||
return [html.P(str(exam.competences_rate))]
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("themes-viz", "children"),
|
||||
dash.dependencies.Input("exam_store", "data"),
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def themes_viz(data):
|
||||
exam = Exam(**data)
|
||||
themes_rate = exam.themes_rate
|
||||
fig = go.Figure()
|
||||
if themes_rate:
|
||||
fig.add_trace(go.Pie(labels=list(themes_rate.keys()), values=list(themes_rate.values())))
|
||||
return [dcc.Graph(figure=fig)]
|
||||
return []
|
||||
|
||||
|
||||
@app.callback(
|
||||
dash.dependencies.Output("is-saved", "children"),
|
||||
dash.dependencies.Input("save-csv", "n_clicks"),
|
||||
dash.dependencies.State("exam_store", "data"),
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def save_to_csv(n_clicks, data):
|
||||
exam = Exam(**data)
|
||||
csv = exam.path(".csv")
|
||||
exam.write_csv()
|
||||
return [f"Dernière sauvegarde {datetime.today()} dans {csv}"]
|
@@ -1,406 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import dash
|
||||
import dash_html_components as html
|
||||
import dash_core_components as dcc
|
||||
import dash_table
|
||||
from dash.exceptions import PreventUpdate
|
||||
import plotly.graph_objects as go
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
|
||||
from ... import flat_df_students, pp_q_scores
|
||||
from ...config import NO_ST_COLUMNS
|
||||
from ...scripts.getconfig import config
|
||||
from ..app import app
|
||||
|
||||
COLORS = {
|
||||
".": "black",
|
||||
0: "#E7472B",
|
||||
1: "#FF712B",
|
||||
2: "#F2EC4C",
|
||||
3: "#68D42F",
|
||||
}
|
||||
|
||||
layout = html.Div(
|
||||
children=[
|
||||
html.Header(
|
||||
children=[
|
||||
html.H1("Analyse des notes"),
|
||||
html.P("Dernière sauvegarde", id="lastsave"),
|
||||
],
|
||||
),
|
||||
html.Main(
|
||||
[
|
||||
html.Section(
|
||||
[
|
||||
html.Div(
|
||||
[
|
||||
"Classe: ",
|
||||
dcc.Dropdown(
|
||||
id="tribe",
|
||||
options=[
|
||||
{"label": t["name"], "value": t["name"]}
|
||||
for t in config["tribes"]
|
||||
],
|
||||
value=config["tribes"][0]["name"],
|
||||
),
|
||||
],
|
||||
style={
|
||||
"display": "flex",
|
||||
"flex-flow": "column",
|
||||
},
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
"Evaluation: ",
|
||||
dcc.Dropdown(id="csv"),
|
||||
],
|
||||
style={
|
||||
"display": "flex",
|
||||
"flex-flow": "column",
|
||||
},
|
||||
),
|
||||
],
|
||||
id="select",
|
||||
style={
|
||||
"display": "flex",
|
||||
"flex-flow": "row wrap",
|
||||
},
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
html.Div(
|
||||
dash_table.DataTable(
|
||||
id="final_score_table",
|
||||
columns=[
|
||||
{"id": "Eleve", "name": "Élève"},
|
||||
{"id": "Note", "name": "Note"},
|
||||
{"id": "Bareme", "name": "Barème"},
|
||||
],
|
||||
data=[],
|
||||
style_data_conditional=[
|
||||
{
|
||||
"if": {"row_index": "odd"},
|
||||
"backgroundColor": "rgb(248, 248, 248)",
|
||||
}
|
||||
],
|
||||
style_data={
|
||||
"width": "100px",
|
||||
"maxWidth": "100px",
|
||||
"minWidth": "100px",
|
||||
},
|
||||
),
|
||||
id="final_score_table_container",
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
dash_table.DataTable(
|
||||
id="final_score_describe",
|
||||
columns=[
|
||||
{"id": "count", "name": "count"},
|
||||
{"id": "mean", "name": "mean"},
|
||||
{"id": "std", "name": "std"},
|
||||
{"id": "min", "name": "min"},
|
||||
{"id": "25%", "name": "25%"},
|
||||
{"id": "50%", "name": "50%"},
|
||||
{"id": "75%", "name": "75%"},
|
||||
{"id": "max", "name": "max"},
|
||||
],
|
||||
),
|
||||
dcc.Graph(
|
||||
id="fig_assessment_hist",
|
||||
),
|
||||
dcc.Graph(id="fig_competences"),
|
||||
],
|
||||
id="desc_plots",
|
||||
),
|
||||
],
|
||||
id="analysis",
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
dash_table.DataTable(
|
||||
id="scores_table",
|
||||
columns=[
|
||||
{"id": "id", "name": "Question"},
|
||||
{
|
||||
"id": "competence",
|
||||
"name": "Competence",
|
||||
},
|
||||
{"id": "theme", "name": "Domaine"},
|
||||
{"id": "comment", "name": "Commentaire"},
|
||||
{"id": "score_rate", "name": "Bareme"},
|
||||
{"id": "is_leveled", "name": "Est_nivele"},
|
||||
],
|
||||
style_cell={
|
||||
"whiteSpace": "normal",
|
||||
"height": "auto",
|
||||
},
|
||||
fixed_columns={"headers": True, "data": 7},
|
||||
style_table={"minWidth": "100%"},
|
||||
style_data_conditional=[],
|
||||
editable=True,
|
||||
),
|
||||
html.Button("Ajouter un élément", id="btn_add_element"),
|
||||
],
|
||||
id="big_table",
|
||||
),
|
||||
dcc.Store(id="final_score"),
|
||||
],
|
||||
className="content",
|
||||
style={
|
||||
"width": "95vw",
|
||||
"margin": "auto",
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("csv", "options"),
|
||||
dash.dependencies.Output("csv", "value"),
|
||||
],
|
||||
[dash.dependencies.Input("tribe", "value")],
|
||||
)
|
||||
def update_csvs(value):
|
||||
if not value:
|
||||
raise PreventUpdate
|
||||
p = Path(value)
|
||||
csvs = list(p.glob("*.csv"))
|
||||
try:
|
||||
return [{"label": str(c), "value": str(c)} for c in csvs], str(csvs[0])
|
||||
except IndexError:
|
||||
return []
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("final_score", "data"),
|
||||
],
|
||||
[dash.dependencies.Input("scores_table", "data")],
|
||||
)
|
||||
def update_final_scores(data):
|
||||
if not data:
|
||||
raise PreventUpdate
|
||||
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
try:
|
||||
if scores.iloc[0]["Commentaire"] == "commentaire" or scores.iloc[0].str.contains("PPRE").any():
|
||||
scores.drop([0], inplace=True)
|
||||
except KeyError:
|
||||
pass
|
||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
||||
if scores.empty:
|
||||
return [{}]
|
||||
|
||||
scores = pp_q_scores(scores)
|
||||
assessment_scores = scores.groupby(["Eleve"]).agg({"Note": "sum", "Bareme": "sum"})
|
||||
return [assessment_scores.reset_index().to_dict("records")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("final_score_table", "data"),
|
||||
],
|
||||
[dash.dependencies.Input("final_score", "data")],
|
||||
)
|
||||
def update_final_scores_table(data):
|
||||
assessment_scores = pd.DataFrame.from_records(data)
|
||||
return [assessment_scores.to_dict("records")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("final_score_describe", "data"),
|
||||
],
|
||||
[dash.dependencies.Input("final_score", "data")],
|
||||
)
|
||||
def update_final_scores_descr(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
if scores.empty:
|
||||
return [[{}]]
|
||||
desc = scores["Note"].describe().T.round(2)
|
||||
return [[desc.to_dict()]]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("fig_assessment_hist", "figure"),
|
||||
],
|
||||
[dash.dependencies.Input("final_score", "data")],
|
||||
)
|
||||
def update_final_scores_hist(data):
|
||||
assessment_scores = pd.DataFrame.from_records(data)
|
||||
|
||||
if assessment_scores.empty:
|
||||
return [go.Figure(data=[go.Scatter(x=[], y=[])])]
|
||||
|
||||
ranges = np.linspace(
|
||||
-0.5,
|
||||
assessment_scores.Bareme.max(),
|
||||
int(assessment_scores.Bareme.max() * 2 + 2),
|
||||
)
|
||||
bins = pd.cut(assessment_scores["Note"], ranges)
|
||||
assessment_scores["Bin"] = bins
|
||||
assessment_grouped = (
|
||||
assessment_scores.reset_index()
|
||||
.groupby("Bin")
|
||||
.agg({"Bareme": "count", "Eleve": lambda x: "\n".join(x)})
|
||||
)
|
||||
assessment_grouped.index = assessment_grouped.index.map(lambda i: i.right)
|
||||
fig = go.Figure()
|
||||
fig.add_bar(
|
||||
x=assessment_grouped.index,
|
||||
y=assessment_grouped.Bareme,
|
||||
text=assessment_grouped.Eleve,
|
||||
textposition="auto",
|
||||
hovertemplate="",
|
||||
marker_color="#4E89DE",
|
||||
)
|
||||
fig.update_layout(
|
||||
height=300,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
)
|
||||
return [fig]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("fig_competences", "figure"),
|
||||
],
|
||||
[dash.dependencies.Input("scores_table", "data")],
|
||||
)
|
||||
def update_competence_fig(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
try:
|
||||
if scores.iloc[0]["Commentaire"] == "commentaire" or scores.iloc[0].str.contains("PPRE").any():
|
||||
scores.drop([0], inplace=True)
|
||||
except KeyError:
|
||||
pass
|
||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
||||
|
||||
if scores.empty:
|
||||
return [go.Figure(data=[go.Scatter(x=[], y=[])])]
|
||||
|
||||
scores = pp_q_scores(scores)
|
||||
pt = pd.pivot_table(
|
||||
scores,
|
||||
index=["Exercice", "Question", "Commentaire"],
|
||||
columns="Score",
|
||||
aggfunc="size",
|
||||
fill_value=0,
|
||||
)
|
||||
for i in {i for i in pt.index.get_level_values(0)}:
|
||||
pt.loc[(str(i), "", ""), :] = ""
|
||||
pt.sort_index(inplace=True)
|
||||
index = (
|
||||
pt.index.get_level_values(0).map(str)
|
||||
+ ":"
|
||||
+ pt.index.get_level_values(1).map(str)
|
||||
+ " "
|
||||
+ pt.index.get_level_values(2).map(str)
|
||||
)
|
||||
|
||||
fig = go.Figure()
|
||||
bars = [
|
||||
{"score": -1, "name": "Pas de réponse", "color": COLORS["."]},
|
||||
{"score": 0, "name": "Faux", "color": COLORS[0]},
|
||||
{"score": 1, "name": "Peu juste", "color": COLORS[1]},
|
||||
{"score": 2, "name": "Presque juste", "color": COLORS[2]},
|
||||
{"score": 3, "name": "Juste", "color": COLORS[3]},
|
||||
]
|
||||
for b in bars:
|
||||
try:
|
||||
fig.add_bar(
|
||||
x=index, y=pt[b["score"]], name=b["name"], marker_color=b["color"]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
fig.update_layout(barmode="relative")
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
)
|
||||
return [fig]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("lastsave", "children"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("scores_table", "data"),
|
||||
dash.dependencies.State("csv", "value"),
|
||||
],
|
||||
)
|
||||
def save_scores(data, csv):
|
||||
try:
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
scores = scores_table_column_order(scores)
|
||||
scores.to_csv(csv, index=False)
|
||||
except:
|
||||
return [f"Soucis pour sauvegarder à {datetime.today()} dans {csv}"]
|
||||
else:
|
||||
return [f"Dernière sauvegarde {datetime.today()} dans {csv}"]
|
||||
|
||||
|
||||
def highlight_value(df):
|
||||
""" Cells style """
|
||||
hight = []
|
||||
for v, color in COLORS.items():
|
||||
hight += [
|
||||
{
|
||||
"if": {"filter_query": "{{{}}} = {}".format(col, v), "column_id": col},
|
||||
"backgroundColor": color,
|
||||
"color": "white",
|
||||
}
|
||||
for col in df.columns
|
||||
if col not in NO_ST_COLUMNS.values()
|
||||
]
|
||||
return hight
|
||||
|
||||
def scores_table_column_order(df):
|
||||
df_student_columns = [c for c in df.columns if c not in NO_ST_COLUMNS.values()]
|
||||
order = list(NO_ST_COLUMNS.values())+df_student_columns
|
||||
return df.loc[:, order]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("scores_table", "columns"),
|
||||
dash.dependencies.Output("scores_table", "data"),
|
||||
dash.dependencies.Output("scores_table", "style_data_conditional"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("csv", "value"),
|
||||
dash.dependencies.Input("btn_add_element", "n_clicks"),
|
||||
dash.dependencies.State("scores_table", "data"),
|
||||
],
|
||||
)
|
||||
def update_scores_table(csv, add_element, data):
|
||||
ctx = dash.callback_context
|
||||
if ctx.triggered[0]["prop_id"] == "csv.value":
|
||||
stack = pd.read_csv(csv, encoding="UTF8")
|
||||
elif ctx.triggered[0]["prop_id"] == "btn_add_element.n_clicks":
|
||||
stack = pd.DataFrame.from_records(data)
|
||||
infos = pd.DataFrame.from_records(
|
||||
[{k: stack.iloc[-1][k] for k in NO_ST_COLUMNS.values()}]
|
||||
)
|
||||
stack = stack.append(infos)
|
||||
stack = scores_table_column_order(stack)
|
||||
return (
|
||||
[
|
||||
{"id": c, "name": c}
|
||||
for c in stack.columns
|
||||
if c not in ["Trimestre", "Nom", "Date"]
|
||||
],
|
||||
stack.to_dict("records"),
|
||||
highlight_value(stack),
|
||||
)
|
@@ -1,29 +1,8 @@
|
||||
import dash_core_components as dcc
|
||||
import dash_html_components as html
|
||||
from dash.dependencies import Input, Output
|
||||
|
||||
from .app import app
|
||||
from .exam_analysis import app as exam_analysis
|
||||
from .create_exam import app as create_exam
|
||||
from .student_analysis import app as student_analysis
|
||||
|
||||
|
||||
app.layout = html.Div(
|
||||
[dcc.Location(id="url", refresh=False), html.Div(id="page-content")]
|
||||
)
|
||||
|
||||
|
||||
@app.callback(Output("page-content", "children"), Input("url", "pathname"))
|
||||
def display_page(pathname):
|
||||
if pathname == "/":
|
||||
return exam_analysis.layout
|
||||
elif pathname == "/create-exam":
|
||||
return create_exam.layout
|
||||
elif pathname == "/students":
|
||||
return student_analysis.layout
|
||||
else:
|
||||
return "404"
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from .app import app, server
|
||||
from .routes import render_page_content
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run_server(debug=True)
|
||||
|
9
recopytex/dashboard/layout/layout.py
Normal file
9
recopytex/dashboard/layout/layout.py
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import dash_html_components as html
|
||||
import dash_core_components as dcc
|
||||
|
||||
content = html.Div(id="page-content")
|
||||
|
||||
layout = html.Div([dcc.Location(id="url"), content])
|
0
recopytex/dashboard/pages/exams_scores/__init__.py
Normal file
0
recopytex/dashboard/pages/exams_scores/__init__.py
Normal file
112
recopytex/dashboard/pages/exams_scores/app.py
Normal file
112
recopytex/dashboard/pages/exams_scores/app.py
Normal file
@@ -0,0 +1,112 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import dash_html_components as html
|
||||
import dash_core_components as dcc
|
||||
from .models import get_tribes, get_exams
|
||||
from .callbacks import *
|
||||
|
||||
layout = html.Div(
|
||||
children=[
|
||||
html.Header(
|
||||
children=[
|
||||
html.H1("Analyse des notes"),
|
||||
html.P("Dernière sauvegarde", id="lastsave"),
|
||||
],
|
||||
),
|
||||
html.Main(
|
||||
children=[
|
||||
html.Section(
|
||||
children=[
|
||||
html.Div(
|
||||
children=[
|
||||
"Classe: ",
|
||||
dcc.Dropdown(
|
||||
id="tribe",
|
||||
options=[
|
||||
{"label": t["name"], "value": t["name"]}
|
||||
for t in get_tribes().values()
|
||||
],
|
||||
value=next(iter(get_tribes().values()))["name"],
|
||||
),
|
||||
],
|
||||
),
|
||||
html.Div(
|
||||
children=[
|
||||
"Evaluation: ",
|
||||
dcc.Dropdown(id="exam_select"),
|
||||
],
|
||||
),
|
||||
],
|
||||
id="selects",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
html.Div(
|
||||
children=[
|
||||
dash_table.DataTable(
|
||||
id="final_score_table",
|
||||
columns=[
|
||||
{"name": "Étudiant", "id": "student_name"},
|
||||
{"name": "Note", "id": "mark"},
|
||||
{"name": "Barème", "id": "score_rate"},
|
||||
],
|
||||
)
|
||||
],
|
||||
id="final_score_table_container",
|
||||
),
|
||||
html.Div(
|
||||
children=[
|
||||
dash_table.DataTable(
|
||||
id="score_statistics_table",
|
||||
columns=[],
|
||||
)
|
||||
],
|
||||
id="score_statistics_table_container",
|
||||
),
|
||||
html.Div(
|
||||
children=[
|
||||
dcc.Graph(
|
||||
id="fig_exam_histo",
|
||||
config={"displayModeBar": False},
|
||||
)
|
||||
],
|
||||
id="fig_exam_histo_container",
|
||||
),
|
||||
html.Div(
|
||||
children=[
|
||||
dcc.Graph(
|
||||
id="fig_questions_bar",
|
||||
config={"displayModeBar": False},
|
||||
)
|
||||
],
|
||||
id="fig_questions_bar_container",
|
||||
),
|
||||
],
|
||||
id="analysis",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
dash_table.DataTable(
|
||||
id="scores_table",
|
||||
columns=[],
|
||||
style_data_conditional=[],
|
||||
fixed_columns={},
|
||||
editable=True,
|
||||
style_table={"minWidth": "100%"},
|
||||
style_cell={
|
||||
"minWidth": "100px",
|
||||
"width": "100px",
|
||||
"maxWidth": "100px",
|
||||
"overflow": "hidden",
|
||||
"textOverflow": "ellipsis",
|
||||
},
|
||||
)
|
||||
],
|
||||
id="edit",
|
||||
),
|
||||
],
|
||||
),
|
||||
dcc.Store(id="scores"),
|
||||
],
|
||||
)
|
216
recopytex/dashboard/pages/exams_scores/callbacks.py
Normal file
216
recopytex/dashboard/pages/exams_scores/callbacks.py
Normal file
@@ -0,0 +1,216 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from dash.dependencies import Input, Output, State
|
||||
from dash.exceptions import PreventUpdate
|
||||
import plotly.graph_objects as go
|
||||
import dash_table
|
||||
import json
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
from recopytex.dashboard.app import app
|
||||
from recopytex.dashboard.common.formating import highlight_scores
|
||||
|
||||
from .models import (
|
||||
get_tribes,
|
||||
get_exams,
|
||||
get_unstack_scores,
|
||||
get_students_from_exam,
|
||||
get_score_colors,
|
||||
get_level_color_bar,
|
||||
score_to_final_mark,
|
||||
stack_scores,
|
||||
pivot_score_on,
|
||||
)
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
Output("exam_select", "options"),
|
||||
Output("exam_select", "value"),
|
||||
],
|
||||
[Input("tribe", "value")],
|
||||
)
|
||||
def update_exams_choices(tribe):
|
||||
if not tribe:
|
||||
raise PreventUpdate
|
||||
exams = get_exams(tribe)
|
||||
exams.reset_index(inplace=True)
|
||||
if not exams.empty:
|
||||
return [
|
||||
{"label": e["name"], "value": e.to_json()} for i, e in exams.iterrows()
|
||||
], exams.loc[0].to_json()
|
||||
return [], None
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
Output("scores_table", "columns"),
|
||||
Output("scores_table", "data"),
|
||||
Output("scores_table", "style_data_conditional"),
|
||||
Output("scores_table", "fixed_columns"),
|
||||
],
|
||||
[
|
||||
Input("exam_select", "value"),
|
||||
],
|
||||
)
|
||||
def update_scores_store(exam):
|
||||
if not exam:
|
||||
return [[], [], [], {}]
|
||||
exam = pd.DataFrame.from_dict([json.loads(exam)])
|
||||
scores = get_unstack_scores(exam)
|
||||
fixed_columns = [
|
||||
"exercise",
|
||||
"question",
|
||||
"competence",
|
||||
"theme",
|
||||
"comment",
|
||||
"score_rate",
|
||||
"is_leveled",
|
||||
]
|
||||
|
||||
students = list(get_students_from_exam(exam))
|
||||
columns = fixed_columns + students
|
||||
|
||||
score_color = get_score_colors()
|
||||
|
||||
return [
|
||||
[{"id": c, "name": c} for c in columns],
|
||||
scores.to_dict("records"),
|
||||
highlight_scores(students, score_color),
|
||||
{"headers": True, "data": len(fixed_columns)},
|
||||
]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
Output("final_score_table", "data"),
|
||||
],
|
||||
[
|
||||
Input("scores_table", "data"),
|
||||
],
|
||||
)
|
||||
def update_finale_score_table(scores):
|
||||
scores_df = pd.DataFrame.from_records(scores)
|
||||
stacked_scores = stack_scores(scores_df)
|
||||
return score_to_final_mark(stacked_scores)
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
Output("score_statistics_table", "columns"),
|
||||
Output("score_statistics_table", "data"),
|
||||
],
|
||||
[
|
||||
Input("final_score_table", "data"),
|
||||
],
|
||||
)
|
||||
def update_statictics_table(finale_score):
|
||||
df = pd.DataFrame.from_records(finale_score)
|
||||
statistics = df["mark"].describe().to_frame().T
|
||||
return [
|
||||
[{"id": c, "name": c} for c in statistics.columns],
|
||||
statistics.to_dict("records"),
|
||||
]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
Output("fig_exam_histo", "figure"),
|
||||
],
|
||||
[
|
||||
Input("final_score_table", "data"),
|
||||
],
|
||||
)
|
||||
def update_exam_histo(finale_scores):
|
||||
scores = pd.DataFrame.from_records(finale_scores)
|
||||
|
||||
if scores.empty:
|
||||
return [go.Figure(data=[go.Scatter(x=[], y=[])])]
|
||||
|
||||
ranges = np.linspace(
|
||||
-0.5,
|
||||
scores["score_rate"].max(),
|
||||
int(scores["score_rate"].max() * 2 + 2),
|
||||
)
|
||||
|
||||
bins = pd.cut(scores["mark"], ranges)
|
||||
scores["Bin"] = bins
|
||||
grouped = (
|
||||
scores.reset_index()
|
||||
.groupby("Bin")
|
||||
.agg({"score_rate": "count", "student_name": lambda x: "\n".join(x)})
|
||||
)
|
||||
grouped.index = grouped.index.map(lambda i: i.right)
|
||||
fig = go.Figure()
|
||||
fig.add_bar(
|
||||
x=grouped.index,
|
||||
y=grouped["score_rate"],
|
||||
text=grouped["student_name"],
|
||||
textposition="auto",
|
||||
hovertemplate="",
|
||||
marker_color="#4E89DE",
|
||||
)
|
||||
fig.update_layout(
|
||||
height=300,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
)
|
||||
return [fig]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
Output("fig_questions_bar", "figure"),
|
||||
],
|
||||
[
|
||||
Input("scores_table", "data"),
|
||||
],
|
||||
)
|
||||
def update_questions_bar(finale_scores):
|
||||
scores = pd.DataFrame.from_records(finale_scores)
|
||||
scores = stack_scores(scores)
|
||||
|
||||
if scores.empty:
|
||||
return [go.Figure(data=[go.Scatter(x=[], y=[])])]
|
||||
|
||||
pt = pivot_score_on(scores, ["exercise", "question", "comment"], "score")
|
||||
|
||||
# separation between exercises
|
||||
for i in {i for i in pt.index.get_level_values(0)}:
|
||||
pt.loc[(str(i), "", ""), :] = ""
|
||||
pt.sort_index(inplace=True)
|
||||
|
||||
# Bar label
|
||||
index = (
|
||||
pt.index.get_level_values(0).map(str)
|
||||
+ ":"
|
||||
+ pt.index.get_level_values(1).map(str)
|
||||
+ " "
|
||||
+ pt.index.get_level_values(2).map(str)
|
||||
)
|
||||
|
||||
fig = go.Figure()
|
||||
|
||||
bars = get_level_color_bar()
|
||||
|
||||
for b in bars:
|
||||
try:
|
||||
fig.add_bar(
|
||||
x=index, y=pt[b["score"]], name=b["name"], marker_color=b["color"]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
fig.update_layout(barmode="relative")
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
legend=dict(
|
||||
orientation="h",
|
||||
yanchor="bottom",
|
||||
y=1.02,
|
||||
xanchor="right",
|
||||
x=1
|
||||
)
|
||||
)
|
||||
return [fig]
|
128
recopytex/dashboard/pages/exams_scores/models.py
Normal file
128
recopytex/dashboard/pages/exams_scores/models.py
Normal file
@@ -0,0 +1,128 @@
|
||||
#!/use/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from recopytex.database.filesystem.loader import CSVLoader
|
||||
from recopytex.datalib.dataframe import column_values_to_column
|
||||
import recopytex.datalib.on_score_column as on_column
|
||||
import pandas as pd
|
||||
|
||||
LOADER = CSVLoader("./test_confia.ml")
|
||||
SCORES_CONFIG = LOADER.get_config()["scores"]
|
||||
|
||||
|
||||
def unstack_scores(scores):
|
||||
"""Put student_name values to columns
|
||||
|
||||
:param scores: Score dataframe with one line per score
|
||||
:returns: Scrore dataframe with student_name in columns
|
||||
|
||||
"""
|
||||
kept_columns = [col for col in LOADER.score_columns if col != "score"]
|
||||
return column_values_to_column("student_name", "score", kept_columns, scores)
|
||||
|
||||
|
||||
def stack_scores(scores):
|
||||
"""Student columns are melt to rows with student_name column
|
||||
|
||||
:param scores: Score dataframe with student_name in columns
|
||||
:returns: Scrore dataframe with one line per score
|
||||
|
||||
"""
|
||||
kept_columns = [
|
||||
c for c in LOADER.score_columns if c not in ["score", "student_name"]
|
||||
]
|
||||
student_names = [c for c in scores.columns if c not in kept_columns]
|
||||
return pd.melt(
|
||||
scores,
|
||||
id_vars=kept_columns,
|
||||
value_vars=student_names,
|
||||
var_name="student_name",
|
||||
value_name="score",
|
||||
)
|
||||
|
||||
|
||||
def get_tribes():
|
||||
return LOADER.get_tribes()
|
||||
|
||||
|
||||
def get_exams(tribe):
|
||||
return LOADER.get_exams([tribe])
|
||||
|
||||
|
||||
def get_record_scores(exam):
|
||||
return LOADER.get_exam_scores(exam)
|
||||
|
||||
|
||||
def get_unstack_scores(exam):
|
||||
flat_scores = LOADER.get_exam_scores(exam)
|
||||
return unstack_scores(flat_scores)
|
||||
|
||||
|
||||
def get_students_from_exam(exam):
|
||||
flat_scores = LOADER.get_exam_scores(exam)
|
||||
return flat_scores["student_name"].unique()
|
||||
|
||||
|
||||
def get_score_colors():
|
||||
score_color = {}
|
||||
for key, score in SCORES_CONFIG.items():
|
||||
score_color[score["value"]] = score["color"]
|
||||
return score_color
|
||||
|
||||
|
||||
def get_level_color_bar():
|
||||
return [
|
||||
{"score": str(s["value"]), "name": s["comment"], "color": s["color"]}
|
||||
for s in SCORES_CONFIG.values()
|
||||
]
|
||||
|
||||
|
||||
is_none_score = lambda x: on_column.is_none_score(x, SCORES_CONFIG)
|
||||
format_score = lambda x: on_column.format_score(x, SCORES_CONFIG)
|
||||
score_to_numeric_score = lambda x: on_column.score_to_numeric_score(x, SCORES_CONFIG)
|
||||
score_to_mark = lambda x: on_column.score_to_mark(
|
||||
x, max([v["value"] for v in SCORES_CONFIG.values() if isinstance(v["value"], int)])
|
||||
)
|
||||
|
||||
|
||||
def filter_clean_score(scores):
|
||||
filtered_scores = scores[~scores.apply(is_none_score, axis=1)]
|
||||
filtered_scores = filtered_scores.assign(
|
||||
score=filtered_scores.apply(format_score, axis=1)
|
||||
)
|
||||
return filtered_scores
|
||||
|
||||
|
||||
def score_to_final_mark(scores):
|
||||
""" Compute marks then reduce to final mark per student """
|
||||
|
||||
filtered_scores = filter_clean_score(scores)
|
||||
filtered_scores = filtered_scores.assign(
|
||||
score=filtered_scores.apply(score_to_numeric_score, axis=1)
|
||||
)
|
||||
filtered_scores = filtered_scores.assign(
|
||||
mark=filtered_scores.apply(score_to_mark, axis=1)
|
||||
)
|
||||
final_score = filtered_scores.groupby(["student_name"])[
|
||||
["mark", "score_rate"]
|
||||
].sum()
|
||||
return [final_score.reset_index().to_dict("records")]
|
||||
|
||||
|
||||
def pivot_score_on(scores, index, columns, aggfunc="size"):
|
||||
"""Pivot scores on index, columns with aggfunc
|
||||
|
||||
It assumes thant scores are levels
|
||||
|
||||
"""
|
||||
filtered_scores = filter_clean_score(scores)
|
||||
filtered_scores["score"] = filtered_scores["score"].astype(str)
|
||||
pt = pd.pivot_table(
|
||||
filtered_scores,
|
||||
index=index,
|
||||
columns=columns,
|
||||
aggfunc=aggfunc,
|
||||
fill_value=0,
|
||||
)
|
||||
return pt
|
||||
|
0
recopytex/dashboard/pages/home/__init__.py
Normal file
0
recopytex/dashboard/pages/home/__init__.py
Normal file
50
recopytex/dashboard/pages/home/app.py
Normal file
50
recopytex/dashboard/pages/home/app.py
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import dash_html_components as html
|
||||
from recopytex.database.filesystem.loader import CSVLoader
|
||||
from .models import get_tribes, get_exams, get_students
|
||||
|
||||
loader = CSVLoader("./test_config.yml")
|
||||
|
||||
|
||||
def listing(elements, formating=lambda x: x):
|
||||
|
||||
return html.Ul(
|
||||
children=[html.Li(children=formating(element)) for element in elements]
|
||||
)
|
||||
|
||||
|
||||
def format_tribe(tribe):
|
||||
children = [html.H3(tribe["name"])]
|
||||
exams = loader.get_exams([tribe["name"]])
|
||||
if exams.empty:
|
||||
children.append(html.P("Pas d'évaluation"))
|
||||
else:
|
||||
exams_html = listing([exam for id, exam in exams.iterrows()], format_exam)
|
||||
children.append(exams_html)
|
||||
return children
|
||||
|
||||
|
||||
def format_exam(exam):
|
||||
children = [html.P(exam["name"])]
|
||||
return children
|
||||
|
||||
|
||||
layout = html.Div(
|
||||
children=[
|
||||
html.H1("Recopytex"),
|
||||
html.H2("Tribes"),
|
||||
html.Div(
|
||||
children=[listing(loader.get_tribes().values(), format_tribe)],
|
||||
id="tribes",
|
||||
),
|
||||
html.H2("Config"),
|
||||
html.Div(
|
||||
children=[
|
||||
html.P(str(loader.get_config())),
|
||||
],
|
||||
id="config",
|
||||
),
|
||||
]
|
||||
)
|
6
recopytex/dashboard/pages/home/callbacks.py
Normal file
6
recopytex/dashboard/pages/home/callbacks.py
Normal file
@@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from dash.dependencies import Input, Output
|
||||
from recopytex.dashboard.app import app
|
||||
|
14
recopytex/dashboard/pages/home/models.py
Normal file
14
recopytex/dashboard/pages/home/models.py
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
|
||||
def get_tribes(loader):
|
||||
return loader.get_tribes()
|
||||
|
||||
|
||||
def get_exams(loader, tribe):
|
||||
return loader.get_exams([tribe])
|
||||
|
||||
|
||||
def get_students(loader, tribe):
|
||||
return loader.get_students([tribe])
|
27
recopytex/dashboard/routes.py
Normal file
27
recopytex/dashboard/routes.py
Normal file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from dash.dependencies import Input, Output
|
||||
|
||||
from .app import app
|
||||
from .pages.home import app as home
|
||||
from .pages.exams_scores import app as exams_scores
|
||||
import dash_html_components as html
|
||||
|
||||
|
||||
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
|
||||
def render_page_content(pathname):
|
||||
if pathname == "/":
|
||||
return home.layout
|
||||
elif pathname == "/exams/scores/":
|
||||
return exams_scores.layout
|
||||
# elif pathname == iris_page_location:
|
||||
# return iris.layout
|
||||
# # If the user tries to reach a different page, return a 404 message
|
||||
return html.Div(
|
||||
[
|
||||
html.H1("404: Not found", className="text-danger"),
|
||||
html.Hr(),
|
||||
html.P(f"The pathname {pathname} was not recognised..."),
|
||||
]
|
||||
)
|
@@ -1,300 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import dash
|
||||
import dash_html_components as html
|
||||
import dash_core_components as dcc
|
||||
import dash_table
|
||||
import plotly.graph_objects as go
|
||||
from datetime import date, datetime
|
||||
import uuid
|
||||
import pandas as pd
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
|
||||
from ...scripts.getconfig import config
|
||||
from ... import flat_df_students, pp_q_scores
|
||||
from ...config import NO_ST_COLUMNS
|
||||
from ..app import app
|
||||
from ...scripts.exam import Exam
|
||||
|
||||
|
||||
def get_students(csv):
|
||||
return list(pd.read_csv(csv).T.to_dict().values())
|
||||
|
||||
|
||||
COLORS = {
|
||||
".": "black",
|
||||
0: "#E7472B",
|
||||
1: "#FF712B",
|
||||
2: "#F2EC4C",
|
||||
3: "#68D42F",
|
||||
}
|
||||
|
||||
QUESTION_COLUMNS = [
|
||||
{"id": "id", "name": "Question"},
|
||||
{
|
||||
"id": "competence",
|
||||
"name": "Competence",
|
||||
"presentation": "dropdown",
|
||||
},
|
||||
{"id": "theme", "name": "Domaine"},
|
||||
{"id": "comment", "name": "Commentaire"},
|
||||
{"id": "score_rate", "name": "Bareme"},
|
||||
{"id": "is_leveled", "name": "Est_nivele"},
|
||||
]
|
||||
|
||||
layout = html.Div(
|
||||
[
|
||||
html.Header(
|
||||
children=[
|
||||
html.H1("Bilan des élèves"),
|
||||
],
|
||||
),
|
||||
html.Main(
|
||||
children=[
|
||||
html.Section(
|
||||
children=[
|
||||
html.Form(
|
||||
id="select-student",
|
||||
children=[
|
||||
html.Label(
|
||||
children=[
|
||||
"Classe",
|
||||
dcc.Dropdown(
|
||||
id="tribe",
|
||||
options=[
|
||||
{"label": t["name"], "value": t["name"]}
|
||||
for t in config["tribes"]
|
||||
],
|
||||
value=config["tribes"][0]["name"],
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Élève",
|
||||
dcc.Dropdown(
|
||||
id="student",
|
||||
options=[
|
||||
{"label": t["Nom"], "value": t["Nom"]}
|
||||
for t in get_students(config["tribes"][0]["students"])
|
||||
],
|
||||
value=get_students(config["tribes"][0]["students"])[0]["Nom"],
|
||||
),
|
||||
]
|
||||
),
|
||||
html.Label(
|
||||
children=[
|
||||
"Trimestre",
|
||||
dcc.Dropdown(
|
||||
id="term",
|
||||
options=[
|
||||
{"label": i + 1, "value": i + 1}
|
||||
for i in range(3)
|
||||
],
|
||||
value=1,
|
||||
),
|
||||
]
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
id="form",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
html.H2("Évaluations"),
|
||||
html.Div(
|
||||
dash_table.DataTable(
|
||||
id="exam_scores",
|
||||
columns=[
|
||||
{"id": "Nom", "name": "Évaluations"},
|
||||
{"id": "Note", "name": "Note"},
|
||||
{"id": "Bareme", "name": "Barème"},
|
||||
],
|
||||
data=[],
|
||||
style_data_conditional=[
|
||||
{
|
||||
"if": {"row_index": "odd"},
|
||||
"backgroundColor": "rgb(248, 248, 248)",
|
||||
}
|
||||
],
|
||||
style_data={
|
||||
"width": "100px",
|
||||
"maxWidth": "100px",
|
||||
"minWidth": "100px",
|
||||
},
|
||||
),
|
||||
id="eval-table",
|
||||
),
|
||||
],
|
||||
id="Évaluations",
|
||||
),
|
||||
html.Section(
|
||||
children=[
|
||||
html.Div(
|
||||
id="competences-viz",
|
||||
),
|
||||
html.Div(
|
||||
id="themes-vizz",
|
||||
),
|
||||
],
|
||||
id="visualisation",
|
||||
),
|
||||
]
|
||||
),
|
||||
dcc.Store(id="student-scores"),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("student", "options"),
|
||||
dash.dependencies.Output("student", "value"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("tribe", "value")
|
||||
],)
|
||||
def update_students_list(tribe):
|
||||
tribe_config = [t for t in config["tribes"] if t["name"] == tribe][0]
|
||||
students = get_students(tribe_config["students"])
|
||||
options = [
|
||||
{"label": t["Nom"], "value": t["Nom"]}
|
||||
for t in students
|
||||
]
|
||||
value = students[0]["Nom"]
|
||||
return options, value
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("student-scores", "data"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("tribe", "value"),
|
||||
dash.dependencies.Input("student", "value"),
|
||||
dash.dependencies.Input("term", "value"),
|
||||
],
|
||||
)
|
||||
def update_student_scores(tribe, student, term):
|
||||
tribe_config = [t for t in config["tribes"] if t["name"] == tribe][0]
|
||||
|
||||
p = Path(tribe_config["name"])
|
||||
csvs = list(p.glob("*.csv"))
|
||||
|
||||
dfs = []
|
||||
for csv in csvs:
|
||||
try:
|
||||
scores = pd.read_csv(csv)
|
||||
except pd.errors.ParserError:
|
||||
pass
|
||||
else:
|
||||
if scores.iloc[0]["Commentaire"] == "commentaire" or scores.iloc[0].str.contains("PPRE").any():
|
||||
scores.drop([0], inplace=True)
|
||||
scores = flat_df_students(scores).dropna(subset=["Score"])
|
||||
scores = scores[scores["Eleve"] == student]
|
||||
scores = scores[scores["Trimestre"] == term]
|
||||
dfs.append(scores)
|
||||
|
||||
df = pd.concat(dfs)
|
||||
|
||||
return [df.to_dict("records")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("exam_scores", "data"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("student-scores", "data"),
|
||||
],
|
||||
)
|
||||
def update_exam_scores(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
scores = pp_q_scores(scores)
|
||||
assessment_scores = scores.groupby(["Nom"]).agg({"Note": "sum", "Bareme": "sum"})
|
||||
return [assessment_scores.reset_index().to_dict("records")]
|
||||
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("competences-viz", "children"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("student-scores", "data"),
|
||||
],
|
||||
)
|
||||
def update_competences_viz(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
scores = pp_q_scores(scores)
|
||||
pt = pd.pivot_table(
|
||||
scores,
|
||||
index=["Competence"],
|
||||
columns="Score",
|
||||
aggfunc="size",
|
||||
fill_value=0,
|
||||
)
|
||||
fig = go.Figure()
|
||||
bars = [
|
||||
{"score": -1, "name": "Pas de réponse", "color": COLORS["."]},
|
||||
{"score": 0, "name": "Faux", "color": COLORS[0]},
|
||||
{"score": 1, "name": "Peu juste", "color": COLORS[1]},
|
||||
{"score": 2, "name": "Presque juste", "color": COLORS[2]},
|
||||
{"score": 3, "name": "Juste", "color": COLORS[3]},
|
||||
]
|
||||
for b in bars:
|
||||
try:
|
||||
fig.add_bar(
|
||||
x=list(config["competences"].keys()), y=pt[b["score"]], name=b["name"], marker_color=b["color"]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
fig.update_layout(barmode="relative")
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
)
|
||||
return [dcc.Graph(figure=fig)]
|
||||
|
||||
@app.callback(
|
||||
[
|
||||
dash.dependencies.Output("themes-vizz", "children"),
|
||||
],
|
||||
[
|
||||
dash.dependencies.Input("student-scores", "data"),
|
||||
],
|
||||
)
|
||||
def update_themes_viz(data):
|
||||
scores = pd.DataFrame.from_records(data)
|
||||
scores = pp_q_scores(scores)
|
||||
pt = pd.pivot_table(
|
||||
scores,
|
||||
index=["Domaine"],
|
||||
columns="Score",
|
||||
aggfunc="size",
|
||||
fill_value=0,
|
||||
)
|
||||
fig = go.Figure()
|
||||
bars = [
|
||||
{"score": -1, "name": "Pas de réponse", "color": COLORS["."]},
|
||||
{"score": 0, "name": "Faux", "color": COLORS[0]},
|
||||
{"score": 1, "name": "Peu juste", "color": COLORS[1]},
|
||||
{"score": 2, "name": "Presque juste", "color": COLORS[2]},
|
||||
{"score": 3, "name": "Juste", "color": COLORS[3]},
|
||||
]
|
||||
for b in bars:
|
||||
try:
|
||||
fig.add_bar(
|
||||
x=list(pt.index), y=pt[b["score"]], name=b["name"], marker_color=b["color"]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
fig.update_layout(barmode="relative")
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
margin=dict(l=5, r=5, b=5, t=5),
|
||||
)
|
||||
return [dcc.Graph(figure=fig)]
|
||||
|
88
recopytex/database/__init__.py
Normal file
88
recopytex/database/__init__.py
Normal file
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
import yaml
|
||||
|
||||
"""
|
||||
|
||||
Adapter to pull data from the filesystem
|
||||
|
||||
# Loader
|
||||
|
||||
# Writer
|
||||
"""
|
||||
|
||||
|
||||
class Loader(ABC):
|
||||
|
||||
"""Load data from source"""
|
||||
|
||||
CONFIG = {}
|
||||
|
||||
def __init__(self, configfile=""):
|
||||
"""Init loader
|
||||
|
||||
:param configfile: yaml file with informations on data source
|
||||
"""
|
||||
self._config = self.CONFIG
|
||||
if configfile.endswith(".yml"):
|
||||
with open(configfile, "r") as config:
|
||||
self._config.update(yaml.load(config, Loader=yaml.FullLoader))
|
||||
|
||||
def get_config(self):
|
||||
""" Get config"""
|
||||
return self._config
|
||||
|
||||
@abstractmethod
|
||||
def get_tribes(self):
|
||||
""" Get tribes list """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_exams(self, tribes=[]):
|
||||
"""Get exams list
|
||||
|
||||
:param tribes: get only exams for those tribes
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_students(self, tribes=[]):
|
||||
"""Get student list
|
||||
|
||||
:param filters: list of filters
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_exam_questions(self, exams=[]):
|
||||
"""Get questions for the exam
|
||||
|
||||
:param exams: questions for those exams only
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_questions_scores(self, questions=[]):
|
||||
"""Get scores of those questions
|
||||
|
||||
:param questions: score for those questions
|
||||
"""
|
||||
pass
|
||||
|
||||
# @abstractmethod
|
||||
# def get_student_scores(self, student):
|
||||
# """Get scores of the student
|
||||
|
||||
# :param student:
|
||||
# """
|
||||
# pass
|
||||
|
||||
|
||||
class Writer(ABC):
|
||||
|
||||
""" Write datas to the source """
|
||||
|
||||
def __init__(self):
|
||||
pass
|
15
recopytex/database/filesystem/__init__.py
Normal file
15
recopytex/database/filesystem/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""
|
||||
Store data using filesystem for organisation, csv for scores
|
||||
|
||||
## Organisation
|
||||
|
||||
- tribe1.csv # list of students for the tribe
|
||||
- tribe1/
|
||||
- exam1.csv # questions and scores for exam1
|
||||
- exam1.yml # Extra information about exam1
|
||||
- exam2.csv # questions and scores for exam2
|
||||
"""
|
||||
|
75
recopytex/database/filesystem/default_config.yml
Normal file
75
recopytex/database/filesystem/default_config.yml
Normal file
@@ -0,0 +1,75 @@
|
||||
---
|
||||
source: ./ # basepath where to start
|
||||
|
||||
competences: # Competences
|
||||
Chercher:
|
||||
name: Chercher
|
||||
abrv: Cher
|
||||
Représenter:
|
||||
name: Représenter
|
||||
abrv: Rep
|
||||
Modéliser:
|
||||
name: Modéliser
|
||||
abrv: Mod
|
||||
Raisonner:
|
||||
name: Raisonner
|
||||
abrv: Rai
|
||||
Calculer:
|
||||
name: Calculer
|
||||
abrv: Cal
|
||||
Communiquer:
|
||||
name: Communiquer
|
||||
abrv: Com
|
||||
|
||||
scores: #
|
||||
BAD: # Everything is bad
|
||||
value: 0
|
||||
numeric_value: 0
|
||||
color: "#E7472B"
|
||||
comment: Faux
|
||||
FEW: # Few good things
|
||||
value: 1
|
||||
numeric_value: 1
|
||||
color: "#FF712B"
|
||||
comment: Peu juste
|
||||
NEARLY: # Nearly good but things are missing
|
||||
value: 2
|
||||
numeric_value: 2
|
||||
color: "#F2EC4C"
|
||||
comment: Presque juste
|
||||
GOOD: # Everything is good
|
||||
value: 3
|
||||
numeric_value: 3
|
||||
color: "#68D42F"
|
||||
comment: Juste
|
||||
NOTFILLED: # The item is not scored yet
|
||||
value: ""
|
||||
numeric_value: None
|
||||
color: white
|
||||
comment: En attente
|
||||
NOANSWER: # Student gives no answer (count as 0)
|
||||
value: "."
|
||||
numeric_value: 0
|
||||
color: black
|
||||
comment: Pas de réponse
|
||||
ABS: # Student has absent (this score won't be impact the final mark)
|
||||
value: a
|
||||
numeric_value: None
|
||||
color: lightgray
|
||||
comment: Non noté
|
||||
|
||||
csv_fields: # dataframe_field: csv_field
|
||||
term: Trimestre
|
||||
exam: Nom
|
||||
date: Date
|
||||
exercise: Exercice
|
||||
question: Question
|
||||
competence: Competence
|
||||
theme: Domaine
|
||||
comment: Commentaire
|
||||
score_rate: Bareme
|
||||
is_leveled: Est_nivele
|
||||
|
||||
id_templates:
|
||||
exam: "{name}_{tribe}"
|
||||
question: "{exam_id}_{exercise}_{question}_{comment}"
|
52
recopytex/database/filesystem/lib.py
Normal file
52
recopytex/database/filesystem/lib.py
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
from unidecode import unidecode
|
||||
|
||||
|
||||
__all__ = ["list_csvs", "extract_fields"]
|
||||
|
||||
|
||||
def list_csvs(path):
|
||||
"""list csv files in path
|
||||
|
||||
:example:
|
||||
>>> list_csvs("./example/Tribe1/")
|
||||
[PosixPath('example/Tribe1/210112_DS.csv'), PosixPath('example/Tribe1/210122_DS6.csv')]
|
||||
>>> list_csvs("./example/Tribe1")
|
||||
[PosixPath('example/Tribe1/210112_DS.csv'), PosixPath('example/Tribe1/210122_DS6.csv')]
|
||||
"""
|
||||
return list(Path(path).glob("*.csv"))
|
||||
|
||||
|
||||
def extract_fields(csv_filename, fields=[], remove_duplicates=True):
|
||||
"""Extract fields in csv
|
||||
|
||||
:param csv_filename: csv filename (with header)
|
||||
:param fields: list of fields to extract (all fields if empty list - default)
|
||||
:param remove_duplicates: keep uniques rows (default True)
|
||||
|
||||
:example:
|
||||
>>> extract_fields("./example/Tribe1/210122_DS6.csv", ["Trimestre", "Nom", "Date"])
|
||||
Trimestre Nom Date
|
||||
0 1 DS6 22/01/2021
|
||||
"""
|
||||
df = pd.read_csv(csv_filename)
|
||||
if fields:
|
||||
df = df[fields]
|
||||
if remove_duplicates:
|
||||
return df.drop_duplicates()
|
||||
return df
|
||||
|
||||
|
||||
def build_id(template, element):
|
||||
"""Build an id from template to the element
|
||||
|
||||
:example:
|
||||
>>> element = {"name": "pléà", "place": "here", "foo":"bar"}
|
||||
>>> build_id("{name} {place}", element)
|
||||
'plea_here'
|
||||
"""
|
||||
return unidecode(template.format(**element)).replace(" ", "_")
|
298
recopytex/database/filesystem/loader.py
Normal file
298
recopytex/database/filesystem/loader.py
Normal file
@@ -0,0 +1,298 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import yaml
|
||||
import os
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
import pandas as pd
|
||||
from .. import Loader
|
||||
from .lib import list_csvs, extract_fields, build_id
|
||||
|
||||
|
||||
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "default_config.yml")
|
||||
with open(DEFAULT_CONFIG_FILE, "r") as config:
|
||||
DEFAULT_CONFIG = yaml.load(config, Loader=yaml.FullLoader)
|
||||
|
||||
|
||||
def maybe_dataframe(datas):
|
||||
try:
|
||||
return [e[1] for e in datas.iterrows()]
|
||||
except AttributeError:
|
||||
return datas
|
||||
|
||||
|
||||
class CSVLoader(Loader):
|
||||
|
||||
"""Loader when scores and metadatas are stored in csv files
|
||||
|
||||
:config:
|
||||
|
||||
:example:
|
||||
>>> loader = CSVLoader()
|
||||
>>> loader.get_config()
|
||||
{'source': './', 'competences': {'Chercher': {'name': 'Chercher', 'abrv': 'Cher'}, 'Représenter': {'name': 'Représenter', 'abrv': 'Rep'}, 'Modéliser': {'name': 'Modéliser', 'abrv': 'Mod'}, 'Raisonner': {'name': 'Raisonner', 'abrv': 'Rai'}, 'Calculer': {'name': 'Calculer', 'abrv': 'Cal'}, 'Communiquer': {'name': 'Communiquer', 'abrv': 'Com'}}, 'scores': {'BAD': {'value': 0, 'numeric_value': 0, 'color': '#E7472B', 'comment': 'Faux'}, 'FEW': {'value': 1, 'numeric_value': 1, 'color': '#FF712B', 'comment': 'Peu juste'}, 'NEARLY': {'value': 2, 'numeric_value': 2, 'color': '#F2EC4C', 'comment': 'Presque juste'}, 'GOOD': {'value': 3, 'numeric_value': 3, 'color': '#68D42F', 'comment': 'Juste'}, 'NOTFILLED': {'value': '', 'numeric_value': 'None', 'color': 'white', 'comment': 'En attente'}, 'NOANSWER': {'value': '.', 'numeric_value': 0, 'color': 'black', 'comment': 'Pas de réponse'}, 'ABS': {'value': 'a', 'numeric_value': 'None', 'color': 'lightgray', 'comment': 'Non noté'}}, 'csv_fields': {'term': 'Trimestre', 'exam': 'Nom', 'date': 'Date', 'exercise': 'Exercice', 'question': 'Question', 'competence': 'Competence', 'theme': 'Domaine', 'comment': 'Commentaire', 'score_rate': 'Bareme', 'is_leveled': 'Est_nivele'}, 'id_templates': {'exam': '{name}_{tribe}', 'question': '{exam_id}_{exercise}_{question}_{comment}'}}
|
||||
|
||||
>>> loader = CSVLoader("./test_config.yml")
|
||||
>>> loader.get_config()
|
||||
{'source': './example', 'competences': {'Chercher': {'name': 'Chercher', 'abrv': 'Cher'}, 'Représenter': {'name': 'Représenter', 'abrv': 'Rep'}, 'Modéliser': {'name': 'Modéliser', 'abrv': 'Mod'}, 'Raisonner': {'name': 'Raisonner', 'abrv': 'Rai'}, 'Calculer': {'name': 'Calculer', 'abrv': 'Cal'}, 'Communiquer': {'name': 'Communiquer', 'abrv': 'Com'}}, 'scores': {'BAD': {'value': 0, 'numeric_value': 0, 'color': '#E7472B', 'comment': 'Faux'}, 'FEW': {'value': 1, 'numeric_value': 1, 'color': '#FF712B', 'comment': 'Peu juste'}, 'NEARLY': {'value': 2, 'numeric_value': 2, 'color': '#F2EC4C', 'comment': 'Presque juste'}, 'GOOD': {'value': 3, 'numeric_value': 3, 'color': '#68D42F', 'comment': 'Juste'}, 'NOTFILLED': {'value': '', 'numeric_value': 'None', 'color': 'white', 'comment': 'En attente'}, 'NOANSWER': {'value': '.', 'numeric_value': 0, 'color': 'black', 'comment': 'Pas de réponse'}, 'ABS': {'value': 'a', 'numeric_value': 'None', 'color': 'lightgray', 'comment': 'Non noté'}}, 'csv_fields': {'term': 'Trimestre', 'exam': 'Nom', 'date': 'Date', 'exercise': 'Exercice', 'question': 'Question', 'competence': 'Competence', 'theme': 'Domaine', 'comment': 'Commentaire', 'score_rate': 'Bareme', 'is_leveled': 'Est_nivele'}, 'id_templates': {'exam': '{name}_{tribe}', 'question': '{exam_id}_{exercise}_{question}_{comment}'}, 'output': './output', 'templates': 'templates/', 'tribes': {'Tribe1': {'name': 'Tribe1', 'type': 'Type1', 'students': 'tribe1.csv'}, 'Tribe2': {'name': 'Tribe2', 'students': 'tribe2.csv'}}}
|
||||
"""
|
||||
|
||||
CONFIG = DEFAULT_CONFIG
|
||||
|
||||
def get_config(self):
|
||||
""" Get config """
|
||||
return self._config
|
||||
|
||||
@property
|
||||
def exam_columns(self):
|
||||
return pd.Index(["name", "date", "term", "origin", "tribe", "id"])
|
||||
|
||||
@property
|
||||
def question_columns(self):
|
||||
return pd.Index(
|
||||
[
|
||||
"exercise",
|
||||
"question",
|
||||
"competence",
|
||||
"theme",
|
||||
"comment",
|
||||
"score_rate",
|
||||
"is_leveled",
|
||||
"origin",
|
||||
"exam_id",
|
||||
"id",
|
||||
]
|
||||
)
|
||||
|
||||
@property
|
||||
def score_columns(self):
|
||||
return pd.Index(
|
||||
[
|
||||
"term",
|
||||
"exam",
|
||||
"date",
|
||||
"exercise",
|
||||
"question",
|
||||
"competence",
|
||||
"theme",
|
||||
"comment",
|
||||
"score_rate",
|
||||
"is_leveled",
|
||||
"origin",
|
||||
"exam_id",
|
||||
"question_id",
|
||||
"student_name",
|
||||
"score",
|
||||
]
|
||||
)
|
||||
|
||||
def rename_columns(self, dataframe):
|
||||
"""Rename dataframe column to match with `csv_fields`
|
||||
|
||||
:param dataframe: the dataframe
|
||||
|
||||
:example:
|
||||
>>> loader = CSVLoader()
|
||||
>>>
|
||||
|
||||
"""
|
||||
return dataframe.rename(
|
||||
columns={v: k for k, v in self._config["csv_fields"].items()}
|
||||
)
|
||||
|
||||
def reverse_csv_field(self, keys):
|
||||
""" Reverse csv field from keys """
|
||||
return [self._config["csv_fields"][k] for k in keys]
|
||||
|
||||
def get_tribes(self, only_names=False):
|
||||
"""Get tribes list
|
||||
|
||||
:example:
|
||||
>>> loader = CSVLoader("./test_config.yml")
|
||||
>>> loader.get_tribes()
|
||||
{'Tribe1': {'name': 'Tribe1', 'type': 'Type1', 'students': 'tribe1.csv'}, 'Tribe2': {'name': 'Tribe2', 'students': 'tribe2.csv'}}
|
||||
>>> loader.get_tribes(only_names=True)
|
||||
['Tribe1', 'Tribe2']
|
||||
"""
|
||||
if only_names:
|
||||
return list(self._config["tribes"].keys())
|
||||
return self._config["tribes"]
|
||||
|
||||
def get_exams(self, tribes=[]):
|
||||
"""Get exams list
|
||||
|
||||
:param tribes: get only exams for those tribes
|
||||
:return: dataframe of exams
|
||||
|
||||
:example:
|
||||
>>> loader = CSVLoader("./test_config.yml")
|
||||
>>> exams = loader.get_exams(["Tribe1"])
|
||||
>>> all(exams.columns == loader.exam_columns)
|
||||
True
|
||||
>>> exams
|
||||
name date term origin tribe id
|
||||
0 DS 12/01/2021 1 example/Tribe1/210112_DS.csv Tribe1 DS_Tribe1
|
||||
0 DS6 22/01/2021 1 example/Tribe1/210122_DS6.csv Tribe1 DS6_Tribe1
|
||||
"""
|
||||
exams = []
|
||||
for tribe in tribes:
|
||||
tribe_path = Path(self._config["source"]) / tribe
|
||||
csvs = list_csvs(tribe_path)
|
||||
for csv in csvs:
|
||||
fields = self.reverse_csv_field(["exam", "date", "term"])
|
||||
exam = extract_fields(csv, fields)
|
||||
exam = self.rename_columns(exam)
|
||||
exam = exam.rename(columns={"exam": "name"})
|
||||
exam["origin"] = str(csv)
|
||||
exam["tribe"] = tribe
|
||||
exam["id"] = build_id(
|
||||
self._config["id_templates"]["exam"], exam.iloc[0]
|
||||
)
|
||||
exams.append(exam)
|
||||
if exams:
|
||||
return pd.concat(exams)
|
||||
return pd.DataFrame(columns=["name", "date", "term", "origin", "tribe", "id"])
|
||||
|
||||
def get_exam_questions(self, exams=[]):
|
||||
"""Get questions for exams stored in score_files
|
||||
|
||||
:param exams: list or dataframe of exams metadatas (need origin field to find the csv)
|
||||
|
||||
:example:
|
||||
>>> loader = CSVLoader("./test_config.yml")
|
||||
>>> exams = loader.get_exams(["Tribe1"])
|
||||
>>> all(loader.get_exam_questions([exams.iloc[0]]).columns == loader.question_columns)
|
||||
True
|
||||
>>> questions = loader.get_exam_questions(exams)
|
||||
>>> questions.iloc[0]
|
||||
exercise Exercice 1
|
||||
question 1
|
||||
competence Calculer
|
||||
theme Plop
|
||||
comment Coucou
|
||||
score_rate 1.0
|
||||
is_leveled 1.0
|
||||
origin example/Tribe1/210112_DS.csv
|
||||
exam_id DS_Tribe1
|
||||
id DS_Tribe1_Exercice_1_1_Coucou
|
||||
Name: 0, dtype: object
|
||||
"""
|
||||
_exams = maybe_dataframe(exams)
|
||||
|
||||
questions = []
|
||||
for exam in _exams:
|
||||
fields = self.reverse_csv_field(
|
||||
[
|
||||
"exercise",
|
||||
"question",
|
||||
"competence",
|
||||
"theme",
|
||||
"comment",
|
||||
"score_rate",
|
||||
"is_leveled",
|
||||
]
|
||||
)
|
||||
question = extract_fields(exam["origin"], fields)
|
||||
question = self.rename_columns(question)
|
||||
question["origin"] = exam["origin"]
|
||||
question["exam_id"] = exam["id"]
|
||||
question["id"] = build_id(
|
||||
self._config["id_templates"]["question"], question.iloc[0]
|
||||
)
|
||||
questions.append(question)
|
||||
|
||||
return pd.concat(questions)
|
||||
|
||||
def get_questions_scores(self, questions=[]):
|
||||
"""Get scores of those questions
|
||||
|
||||
:param questions: list or dataframe of questions metadatas (need origin field to find the csv)
|
||||
|
||||
:example:
|
||||
>>> loader = CSVLoader("./test_config.yml")
|
||||
>>> exams = loader.get_exams(["Tribe1"])
|
||||
>>> questions = loader.get_exam_questions(exams)
|
||||
>>> scores = loader.get_questions_scores(questions)
|
||||
>>> all(scores.columns == loader.score_columns)
|
||||
True
|
||||
>>> scores["student_name"].unique()
|
||||
array(['Star Tice', 'Umberto Dingate', 'Starlin Crangle',
|
||||
'Humbert Bourcq', 'Gabriella Handyside', 'Stewart Eaves',
|
||||
'Erick Going', 'Ase Praton', 'Rollins Planks', 'Dunstan Sarjant',
|
||||
'Stacy Guiton', 'Ange Stanes', 'Amabelle Elleton',
|
||||
'Darn Broomhall', 'Dyan Chatto', 'Keane Rennebach', 'Nari Paulton',
|
||||
'Brandy Wase', 'Jaclyn Firidolfi', 'Violette Lockney'],
|
||||
dtype=object)
|
||||
"""
|
||||
scores = []
|
||||
group_questions = questions.groupby("origin")
|
||||
for origin, questions_df in group_questions:
|
||||
scores_df = pd.read_csv(origin)
|
||||
scores_df = self.rename_columns(scores_df)
|
||||
student_names = [
|
||||
c
|
||||
for c in scores_df.columns
|
||||
if c not in self._config["csv_fields"].keys()
|
||||
]
|
||||
|
||||
common_columns = [c for c in questions_df.columns if c in scores_df.columns]
|
||||
scores_df = pd.merge(scores_df, questions_df, on=common_columns)
|
||||
|
||||
kept_columns = [c for c in scores_df if c not in student_names]
|
||||
scores_df = pd.melt(
|
||||
scores_df,
|
||||
id_vars=kept_columns,
|
||||
value_vars=student_names,
|
||||
var_name="student_name",
|
||||
value_name="score",
|
||||
)
|
||||
|
||||
scores_df = scores_df.rename(columns={"id": "question_id"})
|
||||
scores.append(scores_df)
|
||||
|
||||
return pd.concat(scores)
|
||||
|
||||
def get_exam_scores(self, exams=[]):
|
||||
"""Get scores for all question of the exam
|
||||
|
||||
:param exams: list or dataframe of exams metadatas (need origin field to find the csv)
|
||||
|
||||
:example:
|
||||
>>> loader = CSVLoader("./test_config.yml")
|
||||
>>> exams = loader.get_exams(["Tribe1"])
|
||||
>>> scores = loader.get_exam_scores(exams)
|
||||
>>> scores.columns
|
||||
Index(['term', 'exam', 'date', 'exercise', 'question', 'competence', 'theme',
|
||||
'comment', 'score_rate', 'is_leveled', 'origin', 'exam_id',
|
||||
'question_id', 'student_name', 'score'],
|
||||
dtype='object')
|
||||
"""
|
||||
questions = self.get_exam_questions(exams)
|
||||
return self.get_questions_scores(questions)
|
||||
|
||||
def get_students(self, tribes=[]):
|
||||
"""Get student list
|
||||
|
||||
:param tribes: concerned tribes
|
||||
|
||||
:example:
|
||||
>>> loader = CSVLoader("./test_config.yml")
|
||||
>>> tribes = loader.get_tribes()
|
||||
>>> students = loader.get_students([tribes["Tribe1"]])
|
||||
>>> students.columns
|
||||
Index(['Nom', 'email', 'origin', 'tribe'], dtype='object')
|
||||
"""
|
||||
students = []
|
||||
for tribe in tribes:
|
||||
students_csv = Path(self._config["source"]) / tribe["students"]
|
||||
students_df = pd.read_csv(students_csv)
|
||||
students_df["origin"] = students_csv
|
||||
students_df["tribe"] = tribe["name"]
|
||||
students.append(students_df)
|
||||
|
||||
return pd.concat(students)
|
||||
|
||||
def get_student_scores(self, student=[]):
|
||||
"""Get all scores for students"""
|
||||
pass
|
7
recopytex/database/filesystem/writer.py
Normal file
7
recopytex/database/filesystem/writer.py
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
"""
|
||||
|
||||
"""
|
||||
|
0
recopytex/datalib/__init__.py
Normal file
0
recopytex/datalib/__init__.py
Normal file
21
recopytex/datalib/dataframe.py
Normal file
21
recopytex/datalib/dataframe.py
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
|
||||
def column_values_to_column(pivot_column, value_column, kept_columns, df):
|
||||
"""Pivot_column's values go to column with value_column under it, keeping kept_columns
|
||||
|
||||
:param pivot_column: column name where value will become columns
|
||||
:param value_column: column name where value will be under pivot_column
|
||||
:param kept_columns: unchanged columns
|
||||
:param df: DataFrame to work with
|
||||
|
||||
:return: Stack dataframe
|
||||
|
||||
"""
|
||||
if pivot_column in kept_columns:
|
||||
pivot_columns = kept_columns
|
||||
else:
|
||||
pivot_columns = kept_columns + [pivot_column]
|
||||
|
||||
return df.set_index(pivot_columns).unstack(pivot_column)[value_column].reset_index()
|
257
recopytex/datalib/on_score_column.py
Normal file
257
recopytex/datalib/on_score_column.py
Normal file
@@ -0,0 +1,257 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from math import ceil
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def is_none_score(x, score_config):
|
||||
"""Is a score correspond to a None numeric_value which
|
||||
|
||||
>>> import pandas as pd
|
||||
>>> d = {"Eleve":["E1"]*7,
|
||||
... "score_rate": [1]*7,
|
||||
... "is_leveled":[0]+[1]*6,
|
||||
... "score":[0.33, "", ".", "a", 1, 2, 3],
|
||||
... }
|
||||
>>> score_config = {
|
||||
... 'BAD': {'value': 0, 'numeric_value': 0},
|
||||
... 'FEW': {'value': 1, 'numeric_value': 1},
|
||||
... 'NEARLY': {'value': 2, 'numeric_value': 2},
|
||||
... 'GOOD': {'value': 3, 'numeric_value': 3},
|
||||
... 'NOTFILLED': {'value': '', 'numeric_value': 'None'},
|
||||
... 'NOANSWER': {'value': '.', 'numeric_value': 0},
|
||||
... 'ABS': {'value': 'a', 'numeric_value': 'None'}
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> df.apply(lambda x:is_none_score(x, score_config), axis=1)
|
||||
0 False
|
||||
1 True
|
||||
2 False
|
||||
3 True
|
||||
4 False
|
||||
5 False
|
||||
6 False
|
||||
dtype: bool
|
||||
|
||||
"""
|
||||
none_values = [
|
||||
v["value"]
|
||||
for v in score_config.values()
|
||||
if str(v["numeric_value"]).lower() == "none"
|
||||
]
|
||||
return x["score"] in none_values or pd.isnull(x["score"])
|
||||
|
||||
|
||||
def format_score(x, score_config):
|
||||
"""Make sure that score have the appropriate format
|
||||
|
||||
>>> import pandas as pd
|
||||
>>> d = {"Eleve":["E1"]*6,
|
||||
... "score_rate": [1]*6,
|
||||
... "is_leveled":[0]+[1]*5,
|
||||
... "score":[0.33, ".", "a", 1, 2, 3],
|
||||
... }
|
||||
>>> score_config = {
|
||||
... 'BAD': {'value': 0, 'numeric_value': 0},
|
||||
... 'FEW': {'value': 1, 'numeric_value': 1},
|
||||
... 'NEARLY': {'value': 2, 'numeric_value': 2},
|
||||
... 'GOOD': {'value': 3, 'numeric_value': 3},
|
||||
... 'NOTFILLED': {'value': '', 'numeric_value': 'None'},
|
||||
... 'NOANSWER': {'value': '.', 'numeric_value': 0},
|
||||
... 'ABS': {'value': 'a', 'numeric_value': 'None'}
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> df.apply(lambda x:format_score(x, score_config), axis=1)
|
||||
0 0.33
|
||||
1 .
|
||||
2 a
|
||||
3 1
|
||||
4 2
|
||||
5 3
|
||||
dtype: object
|
||||
>>> format_score({"score": "1.0", "is_leveled": 1}, score_config)
|
||||
1
|
||||
>>> format_score({"score": "3.0", "is_leveled": 1}, score_config)
|
||||
3
|
||||
>>> format_score({"score": 4, "is_leveled": 1}, score_config)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: 4 (<class 'int'>) can't be a score
|
||||
|
||||
"""
|
||||
if not x["is_leveled"]:
|
||||
return float(x["score"])
|
||||
|
||||
try:
|
||||
score = int(float(x["score"]))
|
||||
except ValueError:
|
||||
score = str(x["score"])
|
||||
|
||||
if score in [v["value"] for v in score_config.values()]:
|
||||
return score
|
||||
|
||||
raise ValueError(f"{x['score']} ({type(x['score'])}) can't be a score")
|
||||
|
||||
|
||||
def score_to_numeric_score(x, score_config):
|
||||
"""Convert a score to the corresponding numeric value
|
||||
|
||||
>>> import pandas as pd
|
||||
>>> d = {"Eleve":["E1"]*7,
|
||||
... "score_rate": [1]*7,
|
||||
... "is_leveled":[0]+[1]*6,
|
||||
... "score":[0.33, "", ".", "a", 1, 2, 3],
|
||||
... }
|
||||
>>> score_config = {
|
||||
... 'BAD': {'value': 0, 'numeric_value': 0},
|
||||
... 'FEW': {'value': 1, 'numeric_value': 1},
|
||||
... 'NEARLY': {'value': 2, 'numeric_value': 2},
|
||||
... 'GOOD': {'value': 3, 'numeric_value': 3},
|
||||
... 'NOTFILLED': {'value': '', 'numeric_value': 'None'},
|
||||
... 'NOANSWER': {'value': '.', 'numeric_value': 0},
|
||||
... 'ABS': {'value': 'a', 'numeric_value': 'None'}
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> df.apply(lambda x:score_to_numeric_score(x, score_config), axis=1)
|
||||
0 0.33
|
||||
1 None
|
||||
2 0
|
||||
3 None
|
||||
4 1
|
||||
5 2
|
||||
6 3
|
||||
dtype: object
|
||||
|
||||
"""
|
||||
if x["is_leveled"]:
|
||||
replacements = {v["value"]: v["numeric_value"] for v in score_config.values()}
|
||||
return replacements[x["score"]]
|
||||
|
||||
return x["score"]
|
||||
|
||||
|
||||
def score_to_mark(x, score_max, rounding=lambda x: round(x, 2)):
|
||||
"""Compute the mark from "score" which have to be filtered and in numeric form
|
||||
|
||||
if the item is leveled then the score is multiply by the score_rate
|
||||
otherwise it copies the score
|
||||
|
||||
:param x: dictionnary with "is_leveled", "score" (need to be number) and "score_rate" keys
|
||||
:param score_max:
|
||||
:param rounding: rounding mark function
|
||||
:return: the mark
|
||||
|
||||
>>> import pandas as pd
|
||||
>>> d = {"Eleve":["E1"]*7,
|
||||
... "score_rate": [1]*7,
|
||||
... "is_leveled":[0]+[1]*6,
|
||||
... "score":[0.33, "", ".", "a", 1, 2, 3],
|
||||
... }
|
||||
>>> score_config = {
|
||||
... 'BAD': {'value': 0, 'numeric_value': 0},
|
||||
... 'FEW': {'value': 1, 'numeric_value': 1},
|
||||
... 'NEARLY': {'value': 2, 'numeric_value': 2},
|
||||
... 'GOOD': {'value': 3, 'numeric_value': 3},
|
||||
... 'NOTFILLED': {'value': '', 'numeric_value': 'None'},
|
||||
... 'NOANSWER': {'value': '.', 'numeric_value': 0},
|
||||
... 'ABS': {'value': 'a', 'numeric_value': 'None'}
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> df = df[~df.apply(lambda x:is_none_score(x, score_config), axis=1)]
|
||||
>>> df["score"] = df.apply(lambda x:score_to_numeric_score(x, score_config), axis=1)
|
||||
>>> df.apply(lambda x:score_to_mark(x, 3), axis=1)
|
||||
0 0.33
|
||||
2 0.00
|
||||
4 0.33
|
||||
5 0.67
|
||||
6 1.00
|
||||
dtype: float64
|
||||
>>> from .on_value import round_half_point
|
||||
>>> df.apply(lambda x:score_to_mark(x, 3, round_half_point), axis=1)
|
||||
0 0.5
|
||||
2 0.0
|
||||
4 0.5
|
||||
5 0.5
|
||||
6 1.0
|
||||
dtype: float64
|
||||
"""
|
||||
if x["is_leveled"]:
|
||||
if x["score"] not in list(range(score_max + 1)):
|
||||
raise ValueError(f"The evaluation is out of range: {x['score']} at {x}")
|
||||
return rounding(x["score"] * x["score_rate"] / score_max)
|
||||
|
||||
return rounding(x["score"])
|
||||
|
||||
|
||||
def score_to_level(x, level_max=3):
|
||||
"""Compute the level (".",0,1,2,3).
|
||||
|
||||
:param x: dictionnary with "is_leveled", "score" and "score_rate" keys
|
||||
:return: the level
|
||||
|
||||
>>> import pandas as pd
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... "score_rate":[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... "is_leveled":[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... "score":[1, 0.33, 0, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> df
|
||||
Eleve score_rate is_leveled score
|
||||
0 E1 1 0 1.000
|
||||
1 E1 1 0 0.330
|
||||
2 E1 2 0 0.000
|
||||
3 E1 2 0 1.500
|
||||
4 E1 2 1 1.000
|
||||
5 E1 2 1 3.000
|
||||
6 E2 1 0 0.666
|
||||
7 E2 1 0 1.000
|
||||
8 E2 2 0 1.500
|
||||
9 E2 2 0 1.000
|
||||
10 E2 2 1 2.000
|
||||
11 E2 2 1 3.000
|
||||
>>> df.apply(score_to_level, axis=1)
|
||||
0 3
|
||||
1 1
|
||||
2 0
|
||||
3 3
|
||||
4 1
|
||||
5 3
|
||||
6 2
|
||||
7 3
|
||||
8 3
|
||||
9 2
|
||||
10 2
|
||||
11 3
|
||||
dtype: int64
|
||||
>>> df.apply(lambda x: score_to_level(x, 5), axis=1)
|
||||
0 5
|
||||
1 2
|
||||
2 0
|
||||
3 4
|
||||
4 1
|
||||
5 3
|
||||
6 4
|
||||
7 5
|
||||
8 4
|
||||
9 3
|
||||
10 2
|
||||
11 3
|
||||
dtype: int64
|
||||
"""
|
||||
if x["is_leveled"]:
|
||||
return int(x["score"])
|
||||
|
||||
if x["score"] > x["score_rate"]:
|
||||
raise ValueError(
|
||||
f"score is higher than score_rate ({x['score']} > {x['score_rate']}) for {x}"
|
||||
)
|
||||
|
||||
return int(ceil(x["score"] / x["score_rate"] * level_max))
|
||||
|
||||
|
||||
# -----------------------------
|
||||
# Reglages pour 'vim'
|
||||
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
||||
# cursor: 16 del
|
40
recopytex/datalib/on_value.py
Normal file
40
recopytex/datalib/on_value.py
Normal file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from math import ceil, floor
|
||||
|
||||
|
||||
def round_with_base(x, base=0.5):
|
||||
"""Round to a multiple of base
|
||||
|
||||
:example:
|
||||
>>> round_with_base(1.33, 0.1)
|
||||
1.3
|
||||
>>> round_with_base(1.33, 0.2)
|
||||
1.4
|
||||
>>> round_with_base(1.33, 1)
|
||||
1
|
||||
>>> round_with_base(1.33, 2)
|
||||
2
|
||||
"""
|
||||
try:
|
||||
prec = len(str(base).split(".")[1])
|
||||
except IndexError:
|
||||
prec = 0
|
||||
return round(base * round(float(x) / base), prec)
|
||||
|
||||
|
||||
def round_half_point(x):
|
||||
"""Round to nearest half point
|
||||
|
||||
:example:
|
||||
>>> round_half_point(1.33)
|
||||
1.5
|
||||
>>> round_half_point(1.1)
|
||||
1.0
|
||||
>>> round_half_point(1.66)
|
||||
1.5
|
||||
>>> round_half_point(1.76)
|
||||
2.0
|
||||
"""
|
||||
return round_with_base(x, base=0.5)
|
@@ -1,219 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from math import ceil, floor
|
||||
from .config import COLUMNS
|
||||
|
||||
"""
|
||||
Functions for manipulate score dataframes
|
||||
"""
|
||||
|
||||
|
||||
def round_half_point(val):
|
||||
try:
|
||||
return 0.5 * ceil(2.0 * val)
|
||||
except ValueError:
|
||||
return val
|
||||
except TypeError:
|
||||
return val
|
||||
|
||||
|
||||
def score_to_mark(x):
|
||||
"""Compute the mark
|
||||
|
||||
if the item is leveled then the score is multiply by the score_rate
|
||||
otherwise it copies the score
|
||||
|
||||
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
||||
:return: the mark
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> score_to_mark(df.loc[0])
|
||||
1.0
|
||||
>>> score_to_mark(df.loc[10])
|
||||
1.3333333333333333
|
||||
"""
|
||||
# -1 is no answer
|
||||
if x[COLUMNS["score"]] == -1:
|
||||
return 0
|
||||
|
||||
if x[COLUMNS["is_leveled"]]:
|
||||
if x[COLUMNS["score"]] not in [0, 1, 2, 3]:
|
||||
raise ValueError(
|
||||
f"The evaluation is out of range: {x[COLUMNS['score']]} at {x}"
|
||||
)
|
||||
return round(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3, 2)
|
||||
|
||||
if x[COLUMNS["score"]] > x[COLUMNS["score_rate"]]:
|
||||
raise ValueError(
|
||||
f"The score ({x['score']}) is greated than the rating scale ({x[COLUMNS['score_rate']]}) at {x}"
|
||||
)
|
||||
return x[COLUMNS["score"]]
|
||||
|
||||
|
||||
def score_to_level(x):
|
||||
"""Compute the level (".",0,1,2,3).
|
||||
|
||||
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
||||
:return: the level
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[1, 0.33, np.nan, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> score_to_level(df.loc[0])
|
||||
3
|
||||
>>> score_to_level(df.loc[1])
|
||||
1
|
||||
>>> score_to_level(df.loc[2])
|
||||
'na'
|
||||
>>> score_to_level(df.loc[3])
|
||||
3
|
||||
>>> score_to_level(df.loc[5])
|
||||
3
|
||||
>>> score_to_level(df.loc[10])
|
||||
2
|
||||
"""
|
||||
# negatives are no answer or negatives points
|
||||
if x[COLUMNS["score"]] <= -1:
|
||||
return np.nan
|
||||
|
||||
if x[COLUMNS["is_leveled"]]:
|
||||
return int(x[COLUMNS["score"]])
|
||||
|
||||
return int(ceil(x[COLUMNS["score"]] / x[COLUMNS["score_rate"]] * 3))
|
||||
|
||||
|
||||
# DataFrame columns manipulations
|
||||
|
||||
|
||||
def compute_mark(df):
|
||||
"""Compute the mark for the dataframe
|
||||
|
||||
apply score_to_mark to each row
|
||||
|
||||
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> compute_mark(df)
|
||||
0 1.00
|
||||
1 0.33
|
||||
2 2.00
|
||||
3 1.50
|
||||
4 0.67
|
||||
5 2.00
|
||||
6 0.67
|
||||
7 1.00
|
||||
8 1.50
|
||||
9 1.00
|
||||
10 1.33
|
||||
11 2.00
|
||||
dtype: float64
|
||||
"""
|
||||
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
|
||||
score_to_mark, axis=1
|
||||
)
|
||||
|
||||
|
||||
def compute_level(df):
|
||||
"""Compute level for the dataframe
|
||||
|
||||
Applies score_to_level to each row
|
||||
|
||||
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
||||
:return: Columns with level
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[np.nan, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> compute_level(df)
|
||||
0 na
|
||||
1 1
|
||||
2 3
|
||||
3 3
|
||||
4 1
|
||||
5 3
|
||||
6 2
|
||||
7 3
|
||||
8 3
|
||||
9 2
|
||||
10 2
|
||||
11 3
|
||||
dtype: object
|
||||
"""
|
||||
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
|
||||
score_to_level, axis=1
|
||||
)
|
||||
|
||||
|
||||
def compute_normalized(df):
|
||||
"""Compute the normalized mark (Mark / score_rate)
|
||||
|
||||
:param df: DataFrame with "Mark" and COLUMNS["score_rate"] columns
|
||||
:return: column with normalized mark
|
||||
|
||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||
... }
|
||||
>>> df = pd.DataFrame(d)
|
||||
>>> df["Mark"] = compute_marks(df)
|
||||
>>> compute_normalized(df)
|
||||
0 1.00
|
||||
1 0.33
|
||||
2 1.00
|
||||
3 0.75
|
||||
4 0.33
|
||||
5 1.00
|
||||
6 0.67
|
||||
7 1.00
|
||||
8 0.75
|
||||
9 0.50
|
||||
10 0.67
|
||||
11 1.00
|
||||
dtype: float64
|
||||
"""
|
||||
return df[COLUMNS["mark"]] / df[COLUMNS["score_rate"]]
|
||||
|
||||
|
||||
# Postprocessing question scores
|
||||
|
||||
|
||||
def pp_q_scores(df):
|
||||
"""Postprocessing questions scores dataframe
|
||||
|
||||
Add 3 columns: mark, level and normalized
|
||||
|
||||
:param df: questions-scores dataframe
|
||||
:return: same data frame with mark, level and normalize columns
|
||||
"""
|
||||
assign = {
|
||||
COLUMNS["mark"]: compute_mark,
|
||||
COLUMNS["level"]: compute_level,
|
||||
COLUMNS["normalized"]: compute_normalized,
|
||||
}
|
||||
return df.assign(**assign)
|
||||
|
||||
|
||||
# -----------------------------
|
||||
# Reglages pour 'vim'
|
||||
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
||||
# cursor: 16 del
|
@@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import yaml
|
||||
|
||||
CONFIGPATH = "recoconfig.yml"
|
||||
|
||||
with open(CONFIGPATH, "r") as configfile:
|
||||
config = yaml.load(configfile, Loader=yaml.FullLoader)
|
||||
|
@@ -1,211 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# from prompt_toolkit import HTML
|
||||
from ..config import NO_ST_COLUMNS
|
||||
import pandas as pd
|
||||
import yaml
|
||||
from .getconfig import config
|
||||
|
||||
|
||||
def try_parsing_date(text, formats=["%Y-%m-%d", "%Y.%m.%d", "%Y/%m/%d"]):
|
||||
for fmt in formats:
|
||||
try:
|
||||
return datetime.strptime(text[:10], fmt)
|
||||
except ValueError:
|
||||
pass
|
||||
raise ValueError("no valid date format found")
|
||||
|
||||
|
||||
def format_question(question):
|
||||
question["score_rate"] = float(question["score_rate"])
|
||||
return question
|
||||
|
||||
|
||||
class Exam:
|
||||
def __init__(self, name, tribename, date, term, **kwrds):
|
||||
self._name = name
|
||||
self._tribename = tribename
|
||||
|
||||
self._date = try_parsing_date(date)
|
||||
|
||||
self._term = term
|
||||
|
||||
try:
|
||||
kwrds["exercices"]
|
||||
except KeyError:
|
||||
self._exercises = {}
|
||||
else:
|
||||
self._exercises = kwrds["exercices"]
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def tribename(self):
|
||||
return self._tribename
|
||||
|
||||
@property
|
||||
def date(self):
|
||||
return self._date
|
||||
|
||||
@property
|
||||
def term(self):
|
||||
return self._term
|
||||
|
||||
def add_exercise(self, name, questions):
|
||||
"""Add key with questions in ._exercises"""
|
||||
try:
|
||||
self._exercises[name]
|
||||
except KeyError:
|
||||
self._exercises[name] = [
|
||||
format_question(question) for question in questions
|
||||
]
|
||||
else:
|
||||
raise KeyError("The exercise already exsists. Use modify_exercise")
|
||||
|
||||
def modify_exercise(self, name, questions, append=False):
|
||||
"""Modify questions of an exercise
|
||||
|
||||
If append==True, add questions to the exercise questions
|
||||
|
||||
"""
|
||||
try:
|
||||
self._exercises[name]
|
||||
except KeyError:
|
||||
raise KeyError("The exercise already exsists. Use modify_exercise")
|
||||
else:
|
||||
if append:
|
||||
self._exercises[name] += format_question(questions)
|
||||
else:
|
||||
self._exercises[name] = format_question(questions)
|
||||
|
||||
@property
|
||||
def exercices(self):
|
||||
return self._exercises
|
||||
|
||||
@property
|
||||
def tribe_path(self):
|
||||
return Path(config["source"]) / self.tribename
|
||||
|
||||
@property
|
||||
def tribe_student_path(self):
|
||||
return (
|
||||
Path(config["source"])
|
||||
/ [t["students"] for t in config["tribes"] if t["name"] == self.tribename][
|
||||
0
|
||||
]
|
||||
)
|
||||
|
||||
@property
|
||||
def long_name(self):
|
||||
"""Get exam name with date inside"""
|
||||
return f"{self.date.strftime('%y%m%d')}_{self.name}"
|
||||
|
||||
def path(self, extention=""):
|
||||
return self.tribe_path / (self.long_name + extention)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"name": self.name,
|
||||
"tribename": self.tribename,
|
||||
"date": self.date,
|
||||
"term": self.term,
|
||||
"exercices": self.exercices,
|
||||
}
|
||||
|
||||
def to_row(self):
|
||||
rows = []
|
||||
for ex, questions in self.exercices.items():
|
||||
for q in questions:
|
||||
rows.append(
|
||||
{
|
||||
"term": self.term,
|
||||
"assessment": self.name,
|
||||
"date": self.date.strftime("%d/%m/%Y"),
|
||||
"exercise": ex,
|
||||
"question": q["id"],
|
||||
**q,
|
||||
}
|
||||
)
|
||||
return rows
|
||||
|
||||
@property
|
||||
def themes(self):
|
||||
themes = set()
|
||||
for questions in self._exercises.values():
|
||||
themes.update([q["theme"] for q in questions])
|
||||
return themes
|
||||
|
||||
def display_exercise(self, name):
|
||||
pass
|
||||
|
||||
def display(self, name):
|
||||
pass
|
||||
|
||||
def write_yaml(self):
|
||||
print(f"Sauvegarde temporaire dans {self.path('.yml')}")
|
||||
self.tribe_path.mkdir(exist_ok=True)
|
||||
with open(self.path(".yml"), "w") as f:
|
||||
f.write(yaml.dump(self.to_dict()))
|
||||
|
||||
def write_csv(self):
|
||||
rows = self.to_row()
|
||||
|
||||
print(rows)
|
||||
base_df = pd.DataFrame.from_dict(rows)[NO_ST_COLUMNS.keys()]
|
||||
base_df.rename(columns=NO_ST_COLUMNS, inplace=True)
|
||||
|
||||
students = pd.read_csv(self.tribe_student_path)["Nom"]
|
||||
for student in students:
|
||||
base_df[student] = ""
|
||||
|
||||
self.tribe_path.mkdir(exist_ok=True)
|
||||
base_df.to_csv(self.path(".csv"), index=False)
|
||||
|
||||
@property
|
||||
def score_rate(self):
|
||||
total = 0
|
||||
for ex, questions in self._exercises.items():
|
||||
total += sum([q["score_rate"] for q in questions])
|
||||
|
||||
return total
|
||||
|
||||
@property
|
||||
def competences_rate(self):
|
||||
"""Dictionnary with competences as key and total rate as value"""
|
||||
rates = {}
|
||||
for ex, questions in self._exercises.items():
|
||||
for q in questions:
|
||||
try:
|
||||
q["competence"]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
rates[q["competence"]] += q["score_rate"]
|
||||
except KeyError:
|
||||
rates[q["competence"]] = q["score_rate"]
|
||||
return rates
|
||||
|
||||
@property
|
||||
def themes_rate(self):
|
||||
"""Dictionnary with themes as key and total rate as value"""
|
||||
rates = {}
|
||||
for ex, questions in self._exercises.items():
|
||||
for q in questions:
|
||||
try:
|
||||
q["theme"]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
if q["theme"]:
|
||||
try:
|
||||
rates[q["theme"]] += q["score_rate"]
|
||||
except KeyError:
|
||||
rates[q["theme"]] = q["score_rate"]
|
||||
return rates
|
@@ -1,9 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
import yaml
|
||||
|
||||
CONFIGPATH = "recoconfig.yml"
|
||||
|
||||
with open(CONFIGPATH, "r") as config:
|
||||
config = yaml.load(config, Loader=yaml.FullLoader)
|
||||
|
@@ -1,160 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import click
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from PyInquirer import prompt, print_json
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
from .config import config
|
||||
from ..config import NO_ST_COLUMNS
|
||||
|
||||
|
||||
class PromptAbortException(EOFError):
|
||||
def __init__(self, message, errors=None):
|
||||
|
||||
# Call the base class constructor with the parameters it needs
|
||||
super(PromptAbortException, self).__init__("Abort questionnary", errors)
|
||||
|
||||
|
||||
def get_tribes(answers):
|
||||
""" List tribes based on subdirectory of config["source"] which have an "eleves.csv" file inside """
|
||||
return [
|
||||
p.name for p in Path(config["source"]).iterdir() if (p / "eleves.csv").exists()
|
||||
]
|
||||
|
||||
|
||||
def prepare_csv():
|
||||
items = new_eval()
|
||||
|
||||
item = items[0]
|
||||
# item = {"tribe": "308", "date": datetime.today(), "assessment": "plop"}
|
||||
csv_output = (
|
||||
Path(config["source"])
|
||||
/ item["tribe"]
|
||||
/ f"{item['date']:%y%m%d}_{item['assessment']}.csv"
|
||||
)
|
||||
|
||||
students = pd.read_csv(Path(config["source"]) / item["tribe"] / "eleves.csv")["Nom"]
|
||||
|
||||
columns = list(NO_ST_COLUMNS.keys())
|
||||
items = [[it[c] for c in columns] for it in items]
|
||||
columns = list(NO_ST_COLUMNS.values())
|
||||
items_df = pd.DataFrame.from_records(items, columns=columns)
|
||||
for s in students:
|
||||
items_df[s] = np.nan
|
||||
|
||||
items_df.to_csv(csv_output, index=False, date_format="%d/%m/%Y")
|
||||
click.echo(f"Saving csv file to {csv_output}")
|
||||
|
||||
|
||||
def new_eval(answers={}):
|
||||
click.echo(f"Préparation d'un nouveau devoir")
|
||||
|
||||
eval_questions = [
|
||||
{"type": "input", "name": "assessment", "message": "Nom de l'évaluation",},
|
||||
{
|
||||
"type": "list",
|
||||
"name": "tribe",
|
||||
"message": "Classe concernée",
|
||||
"choices": get_tribes,
|
||||
},
|
||||
{
|
||||
"type": "input",
|
||||
"name": "date",
|
||||
"message": "Date du devoir (%y%m%d)",
|
||||
"default": datetime.today().strftime("%y%m%d"),
|
||||
"filter": lambda val: datetime.strptime(val, "%y%m%d"),
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"name": "term",
|
||||
"message": "Trimestre",
|
||||
"choices": ["1", "2", "3"],
|
||||
},
|
||||
]
|
||||
|
||||
eval_ans = prompt(eval_questions)
|
||||
|
||||
items = []
|
||||
add_exo = True
|
||||
while add_exo:
|
||||
ex_items = new_exercice(eval_ans)
|
||||
items += ex_items
|
||||
add_exo = prompt(
|
||||
[
|
||||
{
|
||||
"type": "confirm",
|
||||
"name": "add_exo",
|
||||
"message": "Ajouter un autre exercice",
|
||||
"default": True,
|
||||
}
|
||||
]
|
||||
)["add_exo"]
|
||||
return items
|
||||
|
||||
|
||||
def new_exercice(answers={}):
|
||||
exercise_questions = [
|
||||
{"type": "input", "name": "exercise", "message": "Nom de l'exercice"},
|
||||
]
|
||||
|
||||
click.echo(f"Nouvel exercice")
|
||||
exercise_ans = prompt(exercise_questions, answers=answers)
|
||||
|
||||
items = []
|
||||
|
||||
add_item = True
|
||||
while add_item:
|
||||
try:
|
||||
item_ans = new_item(exercise_ans)
|
||||
except PromptAbortException:
|
||||
click.echo("Création de l'item annulée")
|
||||
else:
|
||||
items.append(item_ans)
|
||||
add_item = prompt(
|
||||
[
|
||||
{
|
||||
"type": "confirm",
|
||||
"name": "add_item",
|
||||
"message": f"Ajouter un autre item pour l'exercice {exercise_ans['exercise']}",
|
||||
"default": True,
|
||||
}
|
||||
]
|
||||
)["add_item"]
|
||||
|
||||
return items
|
||||
|
||||
|
||||
def new_item(answers={}):
|
||||
item_questions = [
|
||||
{"type": "input", "name": "question", "message": "Nom de l'item",},
|
||||
{"type": "input", "name": "comment", "message": "Commentaire",},
|
||||
{
|
||||
"type": "list",
|
||||
"name": "competence",
|
||||
"message": "Competence",
|
||||
"choices": ["Cher", "Rep", "Mod", "Rai", "Cal", "Com"],
|
||||
},
|
||||
{"type": "input", "name": "theme", "message": "Domaine",},
|
||||
{
|
||||
"type": "confirm",
|
||||
"name": "is_leveled",
|
||||
"message": "Évaluation par niveau",
|
||||
"default": True,
|
||||
},
|
||||
{"type": "input", "name": "score_rate", "message": "Bareme"},
|
||||
{
|
||||
"type": "confirm",
|
||||
"name": "correct",
|
||||
"message": "Tout est correct?",
|
||||
"default": True,
|
||||
},
|
||||
]
|
||||
click.echo(f"Nouvelle question pour l'exercice {answers['exercise']}")
|
||||
item_ans = prompt(item_questions, answers=answers)
|
||||
if item_ans["correct"]:
|
||||
return item_ans
|
||||
raise PromptAbortException("Abort item creation")
|
@@ -1,233 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
|
||||
from prompt_toolkit import prompt, HTML, ANSI
|
||||
from prompt_toolkit import print_formatted_text as print
|
||||
from prompt_toolkit.styles import Style
|
||||
from prompt_toolkit.validation import Validator
|
||||
from prompt_toolkit.completion import WordCompleter
|
||||
from unidecode import unidecode
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
import sys
|
||||
|
||||
from .getconfig import config
|
||||
|
||||
|
||||
VALIDATE = [
|
||||
"o",
|
||||
"ok",
|
||||
"OK",
|
||||
"oui",
|
||||
"OUI",
|
||||
"yes",
|
||||
"YES",
|
||||
]
|
||||
REFUSE = ["n", "non", "NON", "no", "NO"]
|
||||
CANCEL = ["a", "annuler"]
|
||||
|
||||
STYLE = Style.from_dict(
|
||||
{
|
||||
"": "#93A1A1",
|
||||
"validation": "#884444",
|
||||
"appending": "#448844",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class CancelError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def prompt_validate(question, cancelable=False, empty_means=1, style="validation"):
|
||||
"""Prompt for validation
|
||||
|
||||
:param question: Text to print to ask the question.
|
||||
:param cancelable: enable cancel answer
|
||||
:param empty_means: result for no answer
|
||||
:return:
|
||||
0 -> Refuse
|
||||
1 -> Validate
|
||||
-1 -> cancel
|
||||
"""
|
||||
question_ = question
|
||||
choices = VALIDATE + REFUSE
|
||||
|
||||
if cancelable:
|
||||
question_ += "(a ou annuler pour sortir)"
|
||||
choices += CANCEL
|
||||
|
||||
ans = prompt(
|
||||
[
|
||||
(f"class:{style}", question_),
|
||||
],
|
||||
completer=WordCompleter(choices),
|
||||
style=STYLE,
|
||||
).lower()
|
||||
|
||||
if ans == "":
|
||||
return empty_means
|
||||
if ans in VALIDATE:
|
||||
return 1
|
||||
if cancelable and ans in CANCEL:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
|
||||
def prompt_until_validate(question="C'est ok? ", cancelable=False):
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwrd):
|
||||
ans = func(*args, **kwrd)
|
||||
|
||||
confirm = prompt_validate(question, cancelable)
|
||||
|
||||
if confirm == -1:
|
||||
raise CancelError
|
||||
|
||||
while not confirm:
|
||||
sys.stdout.flush()
|
||||
ans = func(*args, **ans, **kwrd)
|
||||
confirm = prompt_validate(question, cancelable)
|
||||
if confirm == -1:
|
||||
raise CancelError
|
||||
return ans
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@prompt_until_validate()
|
||||
def prompt_exam(**kwrd):
|
||||
""" Prompt questions to edit an exam """
|
||||
print(HTML("<b>Nouvelle évaluation</b>"))
|
||||
exam = {}
|
||||
exam["name"] = prompt("Nom de l'évaluation: ", default=kwrd.get("name", "DS"))
|
||||
|
||||
tribes_name = [t["name"] for t in config["tribes"]]
|
||||
|
||||
exam["tribename"] = prompt(
|
||||
"Nom de la classe: ",
|
||||
default=kwrd.get("tribename", ""),
|
||||
completer=WordCompleter(tribes_name),
|
||||
validator=Validator.from_callable(lambda x: x in tribes_name),
|
||||
)
|
||||
exam["tribe"] = [t for t in config["tribes"] if t["name"] == exam["tribename"]][0]
|
||||
|
||||
exam["date"] = prompt(
|
||||
"Date de l'évaluation (%y%m%d): ",
|
||||
default=kwrd.get("date", datetime.today()).strftime("%y%m%d"),
|
||||
validator=Validator.from_callable(lambda x: (len(x) == 6) and x.isdigit()),
|
||||
)
|
||||
exam["date"] = datetime.strptime(exam["date"], "%y%m%d")
|
||||
|
||||
exam["term"] = prompt(
|
||||
"Trimestre: ",
|
||||
validator=Validator.from_callable(lambda x: x.isdigit()),
|
||||
default=kwrd.get("term", "1"),
|
||||
)
|
||||
|
||||
return exam
|
||||
|
||||
|
||||
@prompt_until_validate()
|
||||
def prompt_exercise(number=1, completer={}, **kwrd):
|
||||
exercise = {}
|
||||
try:
|
||||
kwrd["name"]
|
||||
except KeyError:
|
||||
print(HTML("<b>Nouvel exercice</b>"))
|
||||
exercise["name"] = prompt(
|
||||
"Nom de l'exercice: ", default=kwrd.get("name", f"Exercice {number}")
|
||||
)
|
||||
else:
|
||||
print(HTML(f"<b>Modification de l'exercice: {kwrd['name']}</b>"))
|
||||
exercise["name"] = kwrd["name"]
|
||||
|
||||
exercise["questions"] = []
|
||||
|
||||
try:
|
||||
kwrd["questions"][0]
|
||||
except KeyError:
|
||||
last_question_id = "1a"
|
||||
except IndexError:
|
||||
last_question_id = "1a"
|
||||
else:
|
||||
for ques in kwrd["questions"]:
|
||||
try:
|
||||
exercise["questions"].append(
|
||||
prompt_question(completer=completer, **ques)
|
||||
)
|
||||
except CancelError:
|
||||
print("Cette question a été supprimée")
|
||||
last_question_id = exercise["questions"][-1]["id"]
|
||||
|
||||
appending = prompt_validate(
|
||||
question="Ajouter un élément de notation? ", style="appending"
|
||||
)
|
||||
while appending:
|
||||
try:
|
||||
exercise["questions"].append(
|
||||
prompt_question(last_question_id, completer=completer)
|
||||
)
|
||||
except CancelError:
|
||||
print("Cette question a été supprimée")
|
||||
else:
|
||||
last_question_id = exercise["questions"][-1]["id"]
|
||||
appending = prompt_validate(
|
||||
question="Ajouter un élément de notation? ", style="appending"
|
||||
)
|
||||
|
||||
return exercise
|
||||
|
||||
|
||||
@prompt_until_validate(cancelable=True)
|
||||
def prompt_question(last_question_id="1a", completer={}, **kwrd):
|
||||
try:
|
||||
kwrd["id"]
|
||||
except KeyError:
|
||||
print(HTML("<b>Nouvel élément de notation</b>"))
|
||||
else:
|
||||
print(
|
||||
HTML(f"<b>Modification de l'élément {kwrd['id']} ({kwrd['comment']})</b>")
|
||||
)
|
||||
|
||||
question = {}
|
||||
question["id"] = prompt(
|
||||
"Identifiant de la question: ",
|
||||
default=kwrd.get("id", "1a"),
|
||||
)
|
||||
|
||||
question["competence"] = prompt(
|
||||
"Competence: ",
|
||||
default=kwrd.get("competence", list(config["competences"].keys())[0]),
|
||||
completer=WordCompleter(config["competences"].keys()),
|
||||
validator=Validator.from_callable(lambda x: x in config["competences"].keys()),
|
||||
)
|
||||
|
||||
question["theme"] = prompt(
|
||||
"Domaine: ",
|
||||
default=kwrd.get("theme", ""),
|
||||
completer=WordCompleter(completer.get("theme", [])),
|
||||
)
|
||||
|
||||
question["comment"] = prompt(
|
||||
"Commentaire: ",
|
||||
default=kwrd.get("comment", ""),
|
||||
)
|
||||
|
||||
question["is_leveled"] = prompt(
|
||||
"Évaluation par niveau: ",
|
||||
default=kwrd.get("is_leveled", "1"),
|
||||
# validate
|
||||
)
|
||||
|
||||
question["score_rate"] = prompt(
|
||||
"Barème: ",
|
||||
default=kwrd.get("score_rate", "1"),
|
||||
# validate
|
||||
)
|
||||
|
||||
return question
|
@@ -2,83 +2,17 @@
|
||||
# encoding: utf-8
|
||||
|
||||
import click
|
||||
from pathlib import Path
|
||||
import sys
|
||||
import papermill as pm
|
||||
import pandas as pd
|
||||
from datetime import datetime
|
||||
import yaml
|
||||
|
||||
from .getconfig import config, CONFIGPATH
|
||||
from ..config import NO_ST_COLUMNS
|
||||
from .exam import Exam
|
||||
from ..dashboard.index import app as dash
|
||||
|
||||
from recopytex.dashboard.index import app as dash
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
def print_config():
|
||||
click.echo(f"Config file is {CONFIGPATH}")
|
||||
click.echo("It contains")
|
||||
click.echo(config)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def setup():
|
||||
"""Setup the environnement using recoconfig.yml"""
|
||||
for tribe in config["tribes"]:
|
||||
Path(tribe["name"]).mkdir(exist_ok=True)
|
||||
if not Path(tribe["students"]).exists():
|
||||
print(f"The file {tribe['students']} does not exists")
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--debug", default=0, help="Debug mode for dash")
|
||||
def dashboard(debug):
|
||||
dash.run_server(debug=bool(debug))
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("csv_file")
|
||||
def report(csv_file):
|
||||
csv = Path(csv_file)
|
||||
if not csv.exists():
|
||||
click.echo(f"{csv_file} does not exists")
|
||||
sys.exit(1)
|
||||
if csv.suffix != ".csv":
|
||||
click.echo(f"{csv_file} has to be a csv file")
|
||||
sys.exit(1)
|
||||
|
||||
csv_file = Path(csv_file)
|
||||
tribe_dir = csv_file.parent
|
||||
csv_filename = csv_file.name.split(".")[0]
|
||||
|
||||
assessment = str(csv_filename).split("_")[-1].capitalize()
|
||||
date = str(csv_filename).split("_")[0]
|
||||
try:
|
||||
date = datetime.strptime(date, "%y%m%d")
|
||||
except ValueError:
|
||||
date = datetime.today().strptime(date, "%y%m%d")
|
||||
|
||||
tribe = str(tribe_dir).split("/")[-1]
|
||||
|
||||
template = Path(config["templates"]) / "tpl_evaluation.ipynb"
|
||||
|
||||
dest = Path(config["output"]) / tribe / csv_filename
|
||||
dest.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
click.echo(f"Building {assessment} ({date:%d/%m/%y}) report")
|
||||
pm.execute_notebook(
|
||||
str(template),
|
||||
str(dest / f"{assessment}.ipynb"),
|
||||
parameters=dict(
|
||||
tribe=tribe,
|
||||
assessment=assessment,
|
||||
date=f"{date:%d/%m/%y}",
|
||||
csv_file=str(csv_file.absolute()),
|
||||
),
|
||||
)
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
|
@@ -1,77 +1,4 @@
|
||||
pandas
|
||||
click
|
||||
papermill
|
||||
prompt_toolkit
|
||||
ansiwrap==0.8.4
|
||||
appdirs==1.4.3
|
||||
attrs==19.1.0
|
||||
backcall==0.1.0
|
||||
black==19.10b0
|
||||
bleach==3.1.0
|
||||
certifi==2019.6.16
|
||||
chardet==3.0.4
|
||||
Click==7.0
|
||||
colorama==0.4.1
|
||||
cycler==0.10.0
|
||||
decorator==4.4.0
|
||||
defusedxml==0.6.0
|
||||
entrypoints==0.3
|
||||
future==0.17.1
|
||||
idna==2.8
|
||||
importlib-resources==1.0.2
|
||||
ipykernel==5.1.3
|
||||
ipython==7.11.1
|
||||
ipython-genutils==0.2.0
|
||||
ipywidgets==7.5.1
|
||||
jedi==0.15.2
|
||||
Jinja2==2.10.3
|
||||
jsonschema==3.2.0
|
||||
jupyter==1.0.0
|
||||
jupyter-client==5.3.4
|
||||
jupyter-console==6.1.0
|
||||
jupyter-core==4.6.1
|
||||
jupytex==0.0.3
|
||||
kiwisolver==1.1.0
|
||||
Markdown==3.1.1
|
||||
MarkupSafe==1.1.1
|
||||
matplotlib==3.1.2
|
||||
mistune==0.8.4
|
||||
nbconvert==5.6.1
|
||||
nbformat==5.0.3
|
||||
notebook==6.0.3
|
||||
numpy==1.18.1
|
||||
pandas==0.25.3
|
||||
pandocfilters==1.4.2
|
||||
papermill==1.2.1
|
||||
parso==0.5.2
|
||||
pathspec==0.7.0
|
||||
pexpect==4.8.0
|
||||
pickleshare==0.7.5
|
||||
prometheus-client==0.7.1
|
||||
prompt-toolkit==1.0.14
|
||||
ptyprocess==0.6.0
|
||||
Pygments==2.5.2
|
||||
PyInquirer==1.0.3
|
||||
pyparsing==2.4.6
|
||||
pyrsistent==0.15.7
|
||||
python-dateutil==2.8.0
|
||||
pytz==2019.3
|
||||
PyYAML==5.3
|
||||
pyzmq==18.1.1
|
||||
qtconsole==4.6.0
|
||||
-e git+git_opytex:/lafrite/recopytex.git@7e026bedb24c1ca8bef3b71b3d63f8b0d6916e81#egg=Recopytex
|
||||
regex==2020.1.8
|
||||
requests==2.22.0
|
||||
scipy==1.4.1
|
||||
Send2Trash==1.5.0
|
||||
six==1.12.0
|
||||
tenacity==6.0.0
|
||||
terminado==0.8.3
|
||||
testpath==0.4.4
|
||||
textwrap3==0.9.2
|
||||
toml==0.10.0
|
||||
tornado==6.0.3
|
||||
tqdm==4.41.1
|
||||
traitlets==4.3.2
|
||||
typed-ast==1.4.1
|
||||
urllib3==1.25.8
|
||||
wcwidth==0.1.8
|
||||
webencodings==0.5.1
|
||||
widgetsnbextension==3.5.1
|
||||
|
6
setup.py
6
setup.py
@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name='Recopytex',
|
||||
version='1.1.1',
|
||||
version='0.1',
|
||||
description='Assessment analysis',
|
||||
author='Benjamin Bertrand',
|
||||
author_email='',
|
||||
@@ -13,10 +13,6 @@ setup(
|
||||
include_package_data=True,
|
||||
install_requires=[
|
||||
'Click',
|
||||
'pandas',
|
||||
'numpy',
|
||||
'papermill',
|
||||
'pyyaml',
|
||||
],
|
||||
entry_points='''
|
||||
[console_scripts]
|
||||
|
13
test_config.yml
Normal file
13
test_config.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
source: ./example
|
||||
output: ./output
|
||||
templates: templates/
|
||||
|
||||
tribes:
|
||||
Tribe1:
|
||||
name: Tribe1
|
||||
type: Type1
|
||||
students: tribe1.csv
|
||||
Tribe2:
|
||||
name: Tribe2
|
||||
students: tribe2.csv
|
Reference in New Issue
Block a user