Compare commits
10 Commits
6889ddd97c
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 7058c79975 | |||
| d488807c57 | |||
| 7e026bedb2 | |||
| 33117cde71 | |||
| 7d2cde304d | |||
| 409b80994a | |||
| 6fb11cb054 | |||
| 7a0bb4179d | |||
| fe3280b91d | |||
| 3e85c3829d |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -122,7 +122,3 @@ dmypy.json
|
|||||||
|
|
||||||
# Pyre type checker
|
# Pyre type checker
|
||||||
.pyre/
|
.pyre/
|
||||||
|
|
||||||
# vim
|
|
||||||
.vim
|
|
||||||
|
|
||||||
|
|||||||
26
README.md
26
README.md
@@ -6,29 +6,3 @@ Cette fois ci, on utilise:
|
|||||||
- Des fichiers yaml pour les infos sur les élèves
|
- Des fichiers yaml pour les infos sur les élèves
|
||||||
- Des notebooks pour l'analyse
|
- Des notebooks pour l'analyse
|
||||||
- Papermill pour produire les notesbooks à partir de template
|
- Papermill pour produire les notesbooks à partir de template
|
||||||
|
|
||||||
## Les fichiers CSV
|
|
||||||
|
|
||||||
les paramètres sont décris dans ./recopytex/config.py
|
|
||||||
|
|
||||||
### Descriptions des questions
|
|
||||||
|
|
||||||
- Trimestre
|
|
||||||
- Nom
|
|
||||||
- Date
|
|
||||||
- Exercice
|
|
||||||
- Question
|
|
||||||
- Competence
|
|
||||||
- Domaine
|
|
||||||
- Commentaire
|
|
||||||
- Bareme
|
|
||||||
- Est_nivele
|
|
||||||
|
|
||||||
|
|
||||||
### Valeurs pour notes les élèves
|
|
||||||
|
|
||||||
- Score: 0, 1, 2, 3
|
|
||||||
- Pas de réponses: .
|
|
||||||
- Absent: a
|
|
||||||
- Dispensé: (vide)
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
Trimestre,Nom,Date,Exercice,Question,Competence,Domaine,Commentaire,Bareme,Est_nivele,Star Tice,Umberto Dingate,Starlin Crangle,Humbert Bourcq,Gabriella Handyside,Stewart Eaves,Erick Going,Ase Praton,Rollins Planks,Dunstan Sarjant,Stacy Guiton,Ange Stanes,Amabelle Elleton,Darn Broomhall,Dyan Chatto,Keane Rennebach,Nari Paulton,Brandy Wase,Jaclyn Firidolfi,Violette Lockney
|
|
||||||
1,DS,12/01/2021,Exercice 1,1,Calculer,Plop,Coucou,1,1,,,1.0,0,1.0,2.0,3.0,0.0,3.0,3.0,2.0,,1.0,,,,,,,
|
|
||||||
1,DS,12/01/2021,Exercice 1,2,Calculer,C'est trop chouette!,Coucou,1,1,,,1.0,2,,,3.0,3.0,,,,,2.0,,,,,,,
|
|
||||||
1,DS,12/01/2021,Exercice 1,3,Calculer,Null,Coucou,1,1,,,,3,2.0,,,,,,,,3.0,,,,,,,
|
|
||||||
1,DS,12/01/2021,Exercice 1,3,Calculer,Nié,DChic,1,1,,,,2,,,,,,,,,,,,,,,,
|
|
||||||
|
@@ -1,5 +0,0 @@
|
|||||||
Trimestre,Nom,Date,Exercice,Question,Competence,Domaine,Commentaire,Bareme,Est_nivele,Star Tice,Umberto Dingate,Starlin Crangle,Humbert Bourcq,Gabriella Handyside,Stewart Eaves,Erick Going,Ase Praton,Rollins Planks,Dunstan Sarjant,Stacy Guiton,Ange Stanes,Amabelle Elleton,Darn Broomhall,Dyan Chatto,Keane Rennebach,Nari Paulton,Brandy Wase,Jaclyn Firidolfi,Violette Lockney
|
|
||||||
1,DS6,22/01/2021,Exercice 1,Sait pas,,,,,,,,,,,,,,,,,,,,,,,,,
|
|
||||||
1,DS6,22/01/2021,Exercice 1,Ha,,,,,,,,,,,,,,,,,,,,,,,,,
|
|
||||||
1,DS6,22/01/2021,Exercice 1,,,,,,,,,,,,,,,,,,,,,,,,,,
|
|
||||||
1,DS6,22/01/2021,Exercice 2,grr,,,,,,,,,,,,,,,,,,,,,,,,,
|
|
||||||
|
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
source: ./
|
|
||||||
output: ./
|
|
||||||
templates: templates/
|
|
||||||
|
|
||||||
tribes:
|
|
||||||
Tribe1:
|
|
||||||
name: Tribe1
|
|
||||||
type: Type1
|
|
||||||
students: tribe1.csv
|
|
||||||
Tribe2:
|
|
||||||
name: Tribe2
|
|
||||||
students: tribe2.csv
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
Nom,email
|
|
||||||
Star Tice,stice0@jalbum.net
|
|
||||||
Umberto Dingate,udingate1@tumblr.com
|
|
||||||
Starlin Crangle,scrangle2@wufoo.com
|
|
||||||
Humbert Bourcq,hbourcq3@g.co
|
|
||||||
Gabriella Handyside,ghandyside4@patch.com
|
|
||||||
Stewart Eaves,seaves5@ycombinator.com
|
|
||||||
Erick Going,egoing6@va.gov
|
|
||||||
Ase Praton,apraton7@va.gov
|
|
||||||
Rollins Planks,rplanks8@delicious.com
|
|
||||||
Dunstan Sarjant,dsarjant9@naver.com
|
|
||||||
Stacy Guiton,sguitona@themeforest.net
|
|
||||||
Ange Stanes,astanesb@marriott.com
|
|
||||||
Amabelle Elleton,aelletonc@squidoo.com
|
|
||||||
Darn Broomhall,dbroomhalld@cisco.com
|
|
||||||
Dyan Chatto,dchattoe@npr.org
|
|
||||||
Keane Rennebach,krennebachf@dot.gov
|
|
||||||
Nari Paulton,npaultong@gov.uk
|
|
||||||
Brandy Wase,bwaseh@ftc.gov
|
|
||||||
Jaclyn Firidolfi,jfiridolfii@reuters.com
|
|
||||||
Violette Lockney,vlockneyj@chron.com
|
|
||||||
|
@@ -1,21 +0,0 @@
|
|||||||
Nom,email
|
|
||||||
Elle McKintosh,emckintosh0@1und1.de
|
|
||||||
Ty Megany,tmegany1@reuters.com
|
|
||||||
Pippa Borrows,pborrows2@a8.net
|
|
||||||
Sonny Eskrick,seskrick3@123-reg.co.uk
|
|
||||||
Mollee Britch,mbritch4@usda.gov
|
|
||||||
Ingram Plaistowe,iplaistowe5@purevolume.com
|
|
||||||
Fay Vanyard,fvanyard6@sbwire.com
|
|
||||||
Nancy Rase,nrase7@omniture.com
|
|
||||||
Rachael Ruxton,rruxton8@bravesites.com
|
|
||||||
Tallie Rushmer,trushmer9@home.pl
|
|
||||||
Seward MacIlhagga,smacilhaggaa@hatena.ne.jp
|
|
||||||
Lizette Searl,lsearlb@list-manage.com
|
|
||||||
Talya Mannagh,tmannaghc@webnode.com
|
|
||||||
Jordan Witherbed,jwitherbedd@unesco.org
|
|
||||||
Reagan Botcherby,rbotcherbye@scientificamerican.com
|
|
||||||
Libbie Shoulder,lshoulderf@desdev.cn
|
|
||||||
Abner Khomich,akhomichg@youtube.com
|
|
||||||
Zollie Kitman,zkitmanh@forbes.com
|
|
||||||
Fiorenze Durden,fdurdeni@feedburner.com
|
|
||||||
Kevyn Race,kracej@seattletimes.com
|
|
||||||
|
4
recoconfig.yml
Normal file
4
recoconfig.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
source: sheets/
|
||||||
|
output: reports/
|
||||||
|
templates: templates/
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from .csv_extraction import flat_df_students, flat_df_for
|
||||||
|
from .df_marks_manip import pp_q_scores
|
||||||
|
|||||||
30
recopytex/config.py
Normal file
30
recopytex/config.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
NO_ST_COLUMNS = {
|
||||||
|
"assessment": "Nom",
|
||||||
|
"term": "Trimestre",
|
||||||
|
"date": "Date",
|
||||||
|
"exercise": "Exercice",
|
||||||
|
"question": "Question",
|
||||||
|
"competence": "Competence",
|
||||||
|
"theme": "Domaine",
|
||||||
|
"comment": "Commentaire",
|
||||||
|
"is_leveled": "Est_nivele",
|
||||||
|
"score_rate": "Bareme",
|
||||||
|
}
|
||||||
|
|
||||||
|
COLUMNS = {
|
||||||
|
**NO_ST_COLUMNS,
|
||||||
|
"student": "Eleve",
|
||||||
|
"score": "Score",
|
||||||
|
"mark": "Note",
|
||||||
|
"level": "Niveau",
|
||||||
|
"normalized": "Normalise",
|
||||||
|
}
|
||||||
|
|
||||||
|
VALIDSCORE = {
|
||||||
|
"NOTFILLED": "", # The item is not scored yet
|
||||||
|
"NOANSWER": ".", # Student gives no answer (this score will impact the fianl mark)
|
||||||
|
"ABS": "a", # Student has absent (this score won't be impact the final mark)
|
||||||
|
}
|
||||||
119
recopytex/csv_extraction.py
Normal file
119
recopytex/csv_extraction.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
""" Extracting data from xlsx files """
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from .config import NO_ST_COLUMNS, COLUMNS, VALIDSCORE
|
||||||
|
|
||||||
|
pd.set_option("Precision", 2)
|
||||||
|
|
||||||
|
|
||||||
|
def try_replace(x, old, new):
|
||||||
|
try:
|
||||||
|
return str(x).replace(old, new)
|
||||||
|
except ValueError:
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def extract_students(df, no_student_columns=NO_ST_COLUMNS.values()):
|
||||||
|
""" Extract the list of students from df
|
||||||
|
|
||||||
|
:param df: the dataframe
|
||||||
|
:param no_student_columns: columns that are not students
|
||||||
|
:return: list of students
|
||||||
|
"""
|
||||||
|
students = df.columns.difference(no_student_columns)
|
||||||
|
return students
|
||||||
|
|
||||||
|
|
||||||
|
def flat_df_students(
|
||||||
|
df, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
||||||
|
):
|
||||||
|
""" Flat the dataframe by returning a dataframe with on student on each line
|
||||||
|
|
||||||
|
:param df: the dataframe (one row per questions)
|
||||||
|
:param no_student_columns: columns that are not students
|
||||||
|
:return: dataframe with one row per questions and students
|
||||||
|
|
||||||
|
Columns of csv files:
|
||||||
|
|
||||||
|
- NO_ST_COLUMNS meta data on questions
|
||||||
|
- one for each students
|
||||||
|
|
||||||
|
This function flat student's columns to "student" and "score"
|
||||||
|
"""
|
||||||
|
students = extract_students(df, no_student_columns)
|
||||||
|
scores = []
|
||||||
|
for st in students:
|
||||||
|
scores.append(
|
||||||
|
pd.melt(
|
||||||
|
df,
|
||||||
|
id_vars=no_student_columns,
|
||||||
|
value_vars=st,
|
||||||
|
var_name=COLUMNS["student"],
|
||||||
|
value_name=COLUMNS["score"],
|
||||||
|
).dropna(subset=[COLUMNS["score"]])
|
||||||
|
)
|
||||||
|
if postprocessing:
|
||||||
|
return postprocess(pd.concat(scores))
|
||||||
|
return pd.concat(scores)
|
||||||
|
|
||||||
|
|
||||||
|
def flat_df_for(
|
||||||
|
df, student, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
|
||||||
|
):
|
||||||
|
""" Extract the data only for one student
|
||||||
|
|
||||||
|
:param df: the dataframe (one row per questions)
|
||||||
|
:param no_student_columns: columns that are not students
|
||||||
|
:return: dataframe with one row per questions and students
|
||||||
|
|
||||||
|
Columns of csv files:
|
||||||
|
|
||||||
|
- NO_ST_COLUMNS meta data on questions
|
||||||
|
- one for each students
|
||||||
|
|
||||||
|
"""
|
||||||
|
students = extract_students(df, no_student_columns)
|
||||||
|
if student not in students:
|
||||||
|
raise KeyError("This student is not in the table")
|
||||||
|
st_df = df[list(no_student_columns) + [student]]
|
||||||
|
st_df = st_df.rename(columns={student: COLUMNS["score"]}).dropna(
|
||||||
|
subset=[COLUMNS["score"]]
|
||||||
|
)
|
||||||
|
if postprocessing:
|
||||||
|
return postprocess(st_df)
|
||||||
|
return st_df
|
||||||
|
|
||||||
|
|
||||||
|
def postprocess(df):
|
||||||
|
""" Postprocessing score dataframe
|
||||||
|
|
||||||
|
- Replace na with an empty string
|
||||||
|
- Replace "NOANSWER" with -1
|
||||||
|
- Turn commas number to dot numbers
|
||||||
|
"""
|
||||||
|
|
||||||
|
df[COLUMNS["question"]].fillna("", inplace=True)
|
||||||
|
df[COLUMNS["exercise"]].fillna("", inplace=True)
|
||||||
|
df[COLUMNS["comment"]].fillna("", inplace=True)
|
||||||
|
df[COLUMNS["competence"]].fillna("", inplace=True)
|
||||||
|
|
||||||
|
df[COLUMNS["score"]] = pd.to_numeric(
|
||||||
|
df[COLUMNS["score"]]
|
||||||
|
.replace(VALIDSCORE["NOANSWER"], -1)
|
||||||
|
.apply(lambda x: try_replace(x, ",", "."))
|
||||||
|
)
|
||||||
|
df[COLUMNS["score_rate"]] = pd.to_numeric(
|
||||||
|
df[COLUMNS["score_rate"]].apply(lambda x: try_replace(x, ",", ".")),
|
||||||
|
errors="coerce",
|
||||||
|
)
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# Reglages pour 'vim'
|
||||||
|
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
||||||
|
# cursor: 16 del
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
import dash
|
|
||||||
import flask
|
|
||||||
|
|
||||||
from .layout.layout import layout
|
|
||||||
|
|
||||||
server = flask.Flask(__name__)
|
|
||||||
app = dash.Dash(
|
|
||||||
__name__,
|
|
||||||
server=server,
|
|
||||||
suppress_callback_exceptions=True,
|
|
||||||
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
app.layout = layout
|
|
||||||
server = app.server
|
|
||||||
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
|
|
||||||
def highlight_scores(highlight_columns, score_color):
|
|
||||||
""" Cells style in a datatable for scores
|
|
||||||
|
|
||||||
:param highlight_columns: columns to highlight
|
|
||||||
:param value_color: dictionnary {"score": "color"}
|
|
||||||
|
|
||||||
"""
|
|
||||||
hight = []
|
|
||||||
for v, color in score_color.items():
|
|
||||||
if v:
|
|
||||||
hight += [
|
|
||||||
{
|
|
||||||
"if": {"filter_query": "{{{}}} = {}".format(col, v), "column_id": col},
|
|
||||||
"backgroundColor": color,
|
|
||||||
"color": "white",
|
|
||||||
}
|
|
||||||
for col in highlight_columns
|
|
||||||
]
|
|
||||||
return hight
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from .app import app, server
|
|
||||||
from .routes import render_page_content
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
app.run_server(debug=True)
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
import dash_html_components as html
|
|
||||||
import dash_core_components as dcc
|
|
||||||
|
|
||||||
content = html.Div(id="page-content")
|
|
||||||
|
|
||||||
layout = html.Div([dcc.Location(id="url"), content])
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
import dash_html_components as html
|
|
||||||
import dash_core_components as dcc
|
|
||||||
from .models import get_tribes, get_exams
|
|
||||||
from .callbacks import *
|
|
||||||
|
|
||||||
layout = html.Div(
|
|
||||||
children=[
|
|
||||||
html.Header(
|
|
||||||
children=[
|
|
||||||
html.H1("Analyse des notes"),
|
|
||||||
html.P("Dernière sauvegarde", id="lastsave"),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
html.Main(
|
|
||||||
children=[
|
|
||||||
html.Section(
|
|
||||||
children=[
|
|
||||||
html.Div(
|
|
||||||
children=[
|
|
||||||
"Classe: ",
|
|
||||||
dcc.Dropdown(
|
|
||||||
id="tribe",
|
|
||||||
options=[
|
|
||||||
{"label": t["name"], "value": t["name"]}
|
|
||||||
for t in get_tribes().values()
|
|
||||||
],
|
|
||||||
value=next(iter(get_tribes().values()))["name"],
|
|
||||||
),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
html.Div(
|
|
||||||
children=[
|
|
||||||
"Evaluation: ",
|
|
||||||
dcc.Dropdown(id="exam_select"),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
],
|
|
||||||
id="selects",
|
|
||||||
),
|
|
||||||
html.Section(
|
|
||||||
children=[
|
|
||||||
html.Div(
|
|
||||||
children=[],
|
|
||||||
id="final_score_table_container",
|
|
||||||
),
|
|
||||||
],
|
|
||||||
id="analysis",
|
|
||||||
),
|
|
||||||
html.Section(
|
|
||||||
children=[
|
|
||||||
dash_table.DataTable(
|
|
||||||
id="scores_table",
|
|
||||||
columns=[],
|
|
||||||
style_data_conditional=[],
|
|
||||||
fixed_columns={},
|
|
||||||
editable=True,
|
|
||||||
)
|
|
||||||
],
|
|
||||||
id="edit",
|
|
||||||
),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
dcc.Store(id="scores"),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
@@ -1,79 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from dash.dependencies import Input, Output, State
|
|
||||||
import dash
|
|
||||||
from dash.exceptions import PreventUpdate
|
|
||||||
import dash_table
|
|
||||||
import json
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
from recopytex.dashboard.app import app
|
|
||||||
from recopytex.dashboard.common.formating import highlight_scores
|
|
||||||
|
|
||||||
from .models import (
|
|
||||||
get_tribes,
|
|
||||||
get_exams,
|
|
||||||
get_unstack_scores,
|
|
||||||
get_students_from_exam,
|
|
||||||
get_score_colors,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
Output("exam_select", "options"),
|
|
||||||
Output("exam_select", "value"),
|
|
||||||
],
|
|
||||||
[Input("tribe", "value")],
|
|
||||||
)
|
|
||||||
def update_exams_choices(tribe):
|
|
||||||
if not tribe:
|
|
||||||
raise PreventUpdate
|
|
||||||
exams = get_exams(tribe)
|
|
||||||
exams.reset_index(inplace=True)
|
|
||||||
if not exams.empty:
|
|
||||||
return [
|
|
||||||
{"label": e["name"], "value": e.to_json()} for i, e in exams.iterrows()
|
|
||||||
], exams.loc[0].to_json()
|
|
||||||
return [], None
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(
|
|
||||||
[
|
|
||||||
Output("scores_table", "columns"),
|
|
||||||
Output("scores_table", "data"),
|
|
||||||
Output("scores_table", "style_data_conditional"),
|
|
||||||
Output("scores_table", "fixed_columns"),
|
|
||||||
],
|
|
||||||
[
|
|
||||||
Input("exam_select", "value"),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def update_scores_store(exam):
|
|
||||||
ctx = dash.callback_context
|
|
||||||
if not exam:
|
|
||||||
return [[], [], [], {}]
|
|
||||||
exam = pd.DataFrame.from_dict([json.loads(exam)])
|
|
||||||
scores = get_unstack_scores(exam)
|
|
||||||
fixed_columns = [
|
|
||||||
"exercise",
|
|
||||||
"question",
|
|
||||||
"competence",
|
|
||||||
"theme",
|
|
||||||
"comment",
|
|
||||||
"score_rate",
|
|
||||||
"is_leveled",
|
|
||||||
]
|
|
||||||
|
|
||||||
students = list(get_students_from_exam(exam))
|
|
||||||
columns = fixed_columns + students
|
|
||||||
|
|
||||||
score_color = get_score_colors()
|
|
||||||
|
|
||||||
return [
|
|
||||||
[{"id": c, "name": c} for c in columns],
|
|
||||||
scores.to_dict("records"),
|
|
||||||
highlight_scores(students, score_color),
|
|
||||||
{"headers": True, "data": len(fixed_columns)},
|
|
||||||
]
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from recopytex.database.filesystem.loader import CSVLoader
|
|
||||||
from recopytex.datalib.dataframe import column_values_to_column
|
|
||||||
|
|
||||||
LOADER = CSVLoader("./test_config.yml")
|
|
||||||
|
|
||||||
|
|
||||||
def get_tribes():
|
|
||||||
return LOADER.get_tribes()
|
|
||||||
|
|
||||||
|
|
||||||
def get_exams(tribe):
|
|
||||||
return LOADER.get_exams([tribe])
|
|
||||||
|
|
||||||
|
|
||||||
def get_record_scores(exam):
|
|
||||||
return LOADER.get_exam_scores(exam)
|
|
||||||
|
|
||||||
|
|
||||||
def get_unstack_scores(exam):
|
|
||||||
flat_scores = LOADER.get_exam_scores(exam)
|
|
||||||
kept_columns = [col for col in LOADER.score_columns if col != "score"]
|
|
||||||
return column_values_to_column("student_name", "score", kept_columns, flat_scores)
|
|
||||||
|
|
||||||
|
|
||||||
def get_students_from_exam(exam):
|
|
||||||
flat_scores = LOADER.get_exam_scores(exam)
|
|
||||||
return flat_scores["student_name"].unique()
|
|
||||||
|
|
||||||
|
|
||||||
def get_score_colors():
|
|
||||||
scores_config = LOADER.get_config()["valid_scores"]
|
|
||||||
score_color = {}
|
|
||||||
for key, score in scores_config.items():
|
|
||||||
score_color[score["value"]] = score["color"]
|
|
||||||
return score_color
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
import dash_html_components as html
|
|
||||||
from recopytex.database.filesystem.loader import CSVLoader
|
|
||||||
from .models import get_tribes, get_exams, get_students
|
|
||||||
|
|
||||||
loader = CSVLoader("./test_config.yml")
|
|
||||||
|
|
||||||
|
|
||||||
def listing(elements, formating=lambda x: x):
|
|
||||||
|
|
||||||
return html.Ul(
|
|
||||||
children=[html.Li(children=formating(element)) for element in elements]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def format_tribe(tribe):
|
|
||||||
children = [html.H3(tribe["name"])]
|
|
||||||
exams = loader.get_exams([tribe["name"]])
|
|
||||||
if exams.empty:
|
|
||||||
children.append(html.P("Pas d'évaluation"))
|
|
||||||
else:
|
|
||||||
exams_html = listing([exam for id, exam in exams.iterrows()], format_exam)
|
|
||||||
children.append(exams_html)
|
|
||||||
return children
|
|
||||||
|
|
||||||
|
|
||||||
def format_exam(exam):
|
|
||||||
children = [html.P(exam["name"])]
|
|
||||||
return children
|
|
||||||
|
|
||||||
|
|
||||||
layout = html.Div(
|
|
||||||
children=[
|
|
||||||
html.H1("Recopytex"),
|
|
||||||
html.H2("Tribes"),
|
|
||||||
html.Div(
|
|
||||||
children=[listing(loader.get_tribes().values(), format_tribe)],
|
|
||||||
id="tribes",
|
|
||||||
),
|
|
||||||
html.H2("Config"),
|
|
||||||
html.Div(
|
|
||||||
children=[
|
|
||||||
html.P(str(loader.get_config())),
|
|
||||||
],
|
|
||||||
id="config",
|
|
||||||
),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from dash.dependencies import Input, Output
|
|
||||||
from recopytex.dashboard.app import app
|
|
||||||
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
|
|
||||||
def get_tribes(loader):
|
|
||||||
return loader.get_tribes()
|
|
||||||
|
|
||||||
|
|
||||||
def get_exams(loader, tribe):
|
|
||||||
return loader.get_exams([tribe])
|
|
||||||
|
|
||||||
|
|
||||||
def get_students(loader, tribe):
|
|
||||||
return loader.get_students([tribe])
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from dash.dependencies import Input, Output
|
|
||||||
|
|
||||||
from .app import app
|
|
||||||
from .pages.home import app as home
|
|
||||||
from .pages.exams_scores import app as exams_scores
|
|
||||||
|
|
||||||
|
|
||||||
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
|
|
||||||
def render_page_content(pathname):
|
|
||||||
if pathname == "/":
|
|
||||||
return home.layout
|
|
||||||
elif pathname == "/exams/scores/":
|
|
||||||
return exams_scores.layout
|
|
||||||
# elif pathname == iris_page_location:
|
|
||||||
# return iris.layout
|
|
||||||
# # If the user tries to reach a different page, return a 404 message
|
|
||||||
return html.Div(
|
|
||||||
[
|
|
||||||
html.H1("404: Not found", className="text-danger"),
|
|
||||||
html.Hr(),
|
|
||||||
html.P(f"The pathname {pathname} was not recognised..."),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
@@ -1,88 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
Adapter to pull data from the filesystem
|
|
||||||
|
|
||||||
# Loader
|
|
||||||
|
|
||||||
# Writer
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class Loader(ABC):
|
|
||||||
|
|
||||||
"""Load data from source"""
|
|
||||||
|
|
||||||
CONFIG = {}
|
|
||||||
|
|
||||||
def __init__(self, configfile=""):
|
|
||||||
"""Init loader
|
|
||||||
|
|
||||||
:param configfile: yaml file with informations on data source
|
|
||||||
"""
|
|
||||||
self._config = self.CONFIG
|
|
||||||
if configfile.endswith(".yml"):
|
|
||||||
with open(configfile, "r") as config:
|
|
||||||
self._config.update(yaml.load(config, Loader=yaml.FullLoader))
|
|
||||||
|
|
||||||
def get_config(self):
|
|
||||||
""" Get config"""
|
|
||||||
return self._config
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_tribes(self):
|
|
||||||
""" Get tribes list """
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_exams(self, tribes=[]):
|
|
||||||
"""Get exams list
|
|
||||||
|
|
||||||
:param tribes: get only exams for those tribes
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_students(self, tribes=[]):
|
|
||||||
"""Get student list
|
|
||||||
|
|
||||||
:param filters: list of filters
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_exam_questions(self, exams=[]):
|
|
||||||
"""Get questions for the exam
|
|
||||||
|
|
||||||
:param exams: questions for those exams only
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_questions_scores(self, questions=[]):
|
|
||||||
"""Get scores of those questions
|
|
||||||
|
|
||||||
:param questions: score for those questions
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
# @abstractmethod
|
|
||||||
# def get_student_scores(self, student):
|
|
||||||
# """Get scores of the student
|
|
||||||
|
|
||||||
# :param student:
|
|
||||||
# """
|
|
||||||
# pass
|
|
||||||
|
|
||||||
|
|
||||||
class Writer(ABC):
|
|
||||||
|
|
||||||
""" Write datas to the source """
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
"""
|
|
||||||
Store data using filesystem for organisation, csv for scores
|
|
||||||
|
|
||||||
## Organisation
|
|
||||||
|
|
||||||
- tribe1.csv # list of students for the tribe
|
|
||||||
- tribe1/
|
|
||||||
- exam1.csv # questions and scores for exam1
|
|
||||||
- exam1.yml # Extra information about exam1
|
|
||||||
- exam2.csv # questions and scores for exam2
|
|
||||||
"""
|
|
||||||
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
---
|
|
||||||
source: ./ # basepath where to start
|
|
||||||
|
|
||||||
competences: # Competences
|
|
||||||
Chercher:
|
|
||||||
name: Chercher
|
|
||||||
abrv: Cher
|
|
||||||
Représenter:
|
|
||||||
name: Représenter
|
|
||||||
abrv: Rep
|
|
||||||
Modéliser:
|
|
||||||
name: Modéliser
|
|
||||||
abrv: Mod
|
|
||||||
Raisonner:
|
|
||||||
name: Raisonner
|
|
||||||
abrv: Rai
|
|
||||||
Calculer:
|
|
||||||
name: Calculer
|
|
||||||
abrv: Cal
|
|
||||||
Communiquer:
|
|
||||||
name: Communiquer
|
|
||||||
abrv: Com
|
|
||||||
|
|
||||||
valid_scores: #
|
|
||||||
BAD: # Everything is bad
|
|
||||||
value: 0
|
|
||||||
color: "#E7472B"
|
|
||||||
FEW: # Few good things
|
|
||||||
value: 1
|
|
||||||
color: "#FF712B"
|
|
||||||
NEARLY: # Nearly good but things are missing
|
|
||||||
value: 2
|
|
||||||
color: "#F2EC4C"
|
|
||||||
GOOD: # Everything is good
|
|
||||||
value: 3
|
|
||||||
color: "#68D42F"
|
|
||||||
NOTFILLED: # The item is not scored yet
|
|
||||||
value: ""
|
|
||||||
color: white
|
|
||||||
NOANSWER: # Student gives no answer (count as 0)
|
|
||||||
value: "."
|
|
||||||
color: black
|
|
||||||
ABS: # Student has absent (this score won't be impact the final mark)
|
|
||||||
value: a
|
|
||||||
color: lightgray
|
|
||||||
|
|
||||||
csv_fields: # dataframe_field: csv_field
|
|
||||||
term: Trimestre
|
|
||||||
exam: Nom
|
|
||||||
date: Date
|
|
||||||
exercise: Exercice
|
|
||||||
question: Question
|
|
||||||
competence: Competence
|
|
||||||
theme: Domaine
|
|
||||||
comment: Commentaire
|
|
||||||
score_rate: Bareme
|
|
||||||
is_leveled: Est_nivele
|
|
||||||
|
|
||||||
id_templates:
|
|
||||||
exam: "{name}_{tribe}"
|
|
||||||
question: "{exam_id}_{exercise}_{question}_{comment}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
from pathlib import Path
|
|
||||||
from unidecode import unidecode
|
|
||||||
|
|
||||||
|
|
||||||
__all__ = ["list_csvs", "extract_fields"]
|
|
||||||
|
|
||||||
|
|
||||||
def list_csvs(path):
|
|
||||||
"""list csv files in path
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> list_csvs("./example/Tribe1/")
|
|
||||||
[PosixPath('example/Tribe1/210112_DS.csv'), PosixPath('example/Tribe1/210122_DS6.csv')]
|
|
||||||
>>> list_csvs("./example/Tribe1")
|
|
||||||
[PosixPath('example/Tribe1/210112_DS.csv'), PosixPath('example/Tribe1/210122_DS6.csv')]
|
|
||||||
"""
|
|
||||||
return list(Path(path).glob("*.csv"))
|
|
||||||
|
|
||||||
|
|
||||||
def extract_fields(csv_filename, fields=[], remove_duplicates=True):
|
|
||||||
"""Extract fields in csv
|
|
||||||
|
|
||||||
:param csv_filename: csv filename (with header)
|
|
||||||
:param fields: list of fields to extract (all fields if empty list - default)
|
|
||||||
:param remove_duplicates: keep uniques rows (default True)
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> extract_fields("./example/Tribe1/210122_DS6.csv", ["Trimestre", "Nom", "Date"])
|
|
||||||
Trimestre Nom Date
|
|
||||||
0 1 DS6 22/01/2021
|
|
||||||
"""
|
|
||||||
df = pd.read_csv(csv_filename)
|
|
||||||
if fields:
|
|
||||||
df = df[fields]
|
|
||||||
if remove_duplicates:
|
|
||||||
return df.drop_duplicates()
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def build_id(template, element):
|
|
||||||
"""Build an id from template to the element
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> element = {"name": "pléà", "place": "here", "foo":"bar"}
|
|
||||||
>>> build_id("{name} {place}", element)
|
|
||||||
'plea_here'
|
|
||||||
"""
|
|
||||||
return unidecode(template.format(**element)).replace(" ", "_")
|
|
||||||
@@ -1,297 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
import os
|
|
||||||
import uuid
|
|
||||||
from pathlib import Path
|
|
||||||
import pandas as pd
|
|
||||||
from .. import Loader
|
|
||||||
from .lib import list_csvs, extract_fields, build_id
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "default_config.yml")
|
|
||||||
with open(DEFAULT_CONFIG_FILE, "r") as config:
|
|
||||||
DEFAULT_CONFIG = yaml.load(config, Loader=yaml.FullLoader)
|
|
||||||
|
|
||||||
|
|
||||||
def maybe_dataframe(datas):
|
|
||||||
try:
|
|
||||||
return [e[1] for e in datas.iterrows()]
|
|
||||||
except AttributeError:
|
|
||||||
return datas
|
|
||||||
|
|
||||||
|
|
||||||
class CSVLoader(Loader):
|
|
||||||
|
|
||||||
"""Loader when scores and metadatas are stored in csv files
|
|
||||||
|
|
||||||
:config:
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> loader = CSVLoader()
|
|
||||||
>>> loader.get_config()
|
|
||||||
{'source': './', 'competences': {'Chercher': {'name': 'Chercher', 'abrv': 'Cher'}, 'Représenter': {'name': 'Représenter', 'abrv': 'Rep'}, 'Modéliser': {'name': 'Modéliser', 'abrv': 'Mod'}, 'Raisonner': {'name': 'Raisonner', 'abrv': 'Rai'}, 'Calculer': {'name': 'Calculer', 'abrv': 'Cal'}, 'Communiquer': {'name': 'Communiquer', 'abrv': 'Com'}}, 'valid_scores': {'BAD': {'value': 0, 'color': '#E7472B'}, 'FEW': {'value': 1, 'color': '#FF712B'}, 'NEARLY': {'value': 2, 'color': '#F2EC4C'}, 'GOOD': {'value': 3, 'color': '#68D42F'}, 'NOTFILLED': {'value': '', 'color': 'white'}, 'NOANSWER': {'value': '.', 'color': 'black'}, 'ABS': {'value': 'a', 'color': 'lightgray'}}, 'csv_fields': {'term': 'Trimestre', 'exam': 'Nom', 'date': 'Date', 'exercise': 'Exercice', 'question': 'Question', 'competence': 'Competence', 'theme': 'Domaine', 'comment': 'Commentaire', 'score_rate': 'Bareme', 'is_leveled': 'Est_nivele'}, 'id_templates': {'exam': '{name}_{tribe}', 'question': '{exam_id}_{exercise}_{question}_{comment}'}}
|
|
||||||
>>> loader = CSVLoader("./test_config.yml")
|
|
||||||
>>> loader.get_config()
|
|
||||||
{'source': './example', 'competences': {'Chercher': {'name': 'Chercher', 'abrv': 'Cher'}, 'Représenter': {'name': 'Représenter', 'abrv': 'Rep'}, 'Modéliser': {'name': 'Modéliser', 'abrv': 'Mod'}, 'Raisonner': {'name': 'Raisonner', 'abrv': 'Rai'}, 'Calculer': {'name': 'Calculer', 'abrv': 'Cal'}, 'Communiquer': {'name': 'Communiquer', 'abrv': 'Com'}}, 'valid_scores': {'BAD': {'value': 0, 'color': '#E7472B'}, 'FEW': {'value': 1, 'color': '#FF712B'}, 'NEARLY': {'value': 2, 'color': '#F2EC4C'}, 'GOOD': {'value': 3, 'color': '#68D42F'}, 'NOTFILLED': {'value': '', 'color': 'white'}, 'NOANSWER': {'value': '.', 'color': 'black'}, 'ABS': {'value': 'a', 'color': 'lightgray'}}, 'csv_fields': {'term': 'Trimestre', 'exam': 'Nom', 'date': 'Date', 'exercise': 'Exercice', 'question': 'Question', 'competence': 'Competence', 'theme': 'Domaine', 'comment': 'Commentaire', 'score_rate': 'Bareme', 'is_leveled': 'Est_nivele'}, 'id_templates': {'exam': '{name}_{tribe}', 'question': '{exam_id}_{exercise}_{question}_{comment}'}, 'output': './output', 'templates': 'templates/', 'tribes': {'Tribe1': {'name': 'Tribe1', 'type': 'Type1', 'students': 'tribe1.csv'}, 'Tribe2': {'name': 'Tribe2', 'students': 'tribe2.csv'}}}
|
|
||||||
"""
|
|
||||||
|
|
||||||
CONFIG = DEFAULT_CONFIG
|
|
||||||
|
|
||||||
def get_config(self):
|
|
||||||
""" Get config """
|
|
||||||
return self._config
|
|
||||||
|
|
||||||
@property
|
|
||||||
def exam_columns(self):
|
|
||||||
return pd.Index(["name", "date", "term", "origin", "tribe", "id"])
|
|
||||||
|
|
||||||
@property
|
|
||||||
def question_columns(self):
|
|
||||||
return pd.Index(
|
|
||||||
[
|
|
||||||
"exercise",
|
|
||||||
"question",
|
|
||||||
"competence",
|
|
||||||
"theme",
|
|
||||||
"comment",
|
|
||||||
"score_rate",
|
|
||||||
"is_leveled",
|
|
||||||
"origin",
|
|
||||||
"exam_id",
|
|
||||||
"id",
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def score_columns(self):
|
|
||||||
return pd.Index(
|
|
||||||
[
|
|
||||||
"term",
|
|
||||||
"exam",
|
|
||||||
"date",
|
|
||||||
"exercise",
|
|
||||||
"question",
|
|
||||||
"competence",
|
|
||||||
"theme",
|
|
||||||
"comment",
|
|
||||||
"score_rate",
|
|
||||||
"is_leveled",
|
|
||||||
"origin",
|
|
||||||
"exam_id",
|
|
||||||
"question_id",
|
|
||||||
"student_name",
|
|
||||||
"score",
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
def rename_columns(self, dataframe):
|
|
||||||
"""Rename dataframe column to match with `csv_fields`
|
|
||||||
|
|
||||||
:param dataframe: the dataframe
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> loader = CSVLoader()
|
|
||||||
>>>
|
|
||||||
|
|
||||||
"""
|
|
||||||
return dataframe.rename(
|
|
||||||
columns={v: k for k, v in self._config["csv_fields"].items()}
|
|
||||||
)
|
|
||||||
|
|
||||||
def reverse_csv_field(self, keys):
|
|
||||||
""" Reverse csv field from keys """
|
|
||||||
return [self._config["csv_fields"][k] for k in keys]
|
|
||||||
|
|
||||||
def get_tribes(self, only_names=False):
|
|
||||||
"""Get tribes list
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> loader = CSVLoader("./test_config.yml")
|
|
||||||
>>> loader.get_tribes()
|
|
||||||
{'Tribe1': {'name': 'Tribe1', 'type': 'Type1', 'students': 'tribe1.csv'}, 'Tribe2': {'name': 'Tribe2', 'students': 'tribe2.csv'}}
|
|
||||||
>>> loader.get_tribes(only_names=True)
|
|
||||||
['Tribe1', 'Tribe2']
|
|
||||||
"""
|
|
||||||
if only_names:
|
|
||||||
return list(self._config["tribes"].keys())
|
|
||||||
return self._config["tribes"]
|
|
||||||
|
|
||||||
def get_exams(self, tribes=[]):
|
|
||||||
"""Get exams list
|
|
||||||
|
|
||||||
:param tribes: get only exams for those tribes
|
|
||||||
:return: dataframe of exams
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> loader = CSVLoader("./test_config.yml")
|
|
||||||
>>> exams = loader.get_exams(["Tribe1"])
|
|
||||||
>>> all(exams.columns == loader.exam_columns)
|
|
||||||
True
|
|
||||||
>>> exams
|
|
||||||
name date term origin tribe id
|
|
||||||
0 DS 12/01/2021 1 example/Tribe1/210112_DS.csv Tribe1 DS_Tribe1
|
|
||||||
0 DS6 22/01/2021 1 example/Tribe1/210122_DS6.csv Tribe1 DS6_Tribe1
|
|
||||||
"""
|
|
||||||
exams = []
|
|
||||||
for tribe in tribes:
|
|
||||||
tribe_path = Path(self._config["source"]) / tribe
|
|
||||||
csvs = list_csvs(tribe_path)
|
|
||||||
for csv in csvs:
|
|
||||||
fields = self.reverse_csv_field(["exam", "date", "term"])
|
|
||||||
exam = extract_fields(csv, fields)
|
|
||||||
exam = self.rename_columns(exam)
|
|
||||||
exam = exam.rename(columns={"exam": "name"})
|
|
||||||
exam["origin"] = str(csv)
|
|
||||||
exam["tribe"] = tribe
|
|
||||||
exam["id"] = build_id(
|
|
||||||
self._config["id_templates"]["exam"], exam.iloc[0]
|
|
||||||
)
|
|
||||||
exams.append(exam)
|
|
||||||
if exams:
|
|
||||||
return pd.concat(exams)
|
|
||||||
return pd.DataFrame(columns=["name", "date", "term", "origin", "tribe", "id"])
|
|
||||||
|
|
||||||
def get_exam_questions(self, exams=[]):
|
|
||||||
"""Get questions for exams stored in score_files
|
|
||||||
|
|
||||||
:param exams: list or dataframe of exams metadatas (need origin field to find the csv)
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> loader = CSVLoader("./test_config.yml")
|
|
||||||
>>> exams = loader.get_exams(["Tribe1"])
|
|
||||||
>>> all(loader.get_exam_questions([exams.iloc[0]]).columns == loader.question_columns)
|
|
||||||
True
|
|
||||||
>>> questions = loader.get_exam_questions(exams)
|
|
||||||
>>> questions.iloc[0]
|
|
||||||
exercise Exercice 1
|
|
||||||
question 1
|
|
||||||
competence Calculer
|
|
||||||
theme Plop
|
|
||||||
comment Coucou
|
|
||||||
score_rate 1.0
|
|
||||||
is_leveled 1.0
|
|
||||||
origin example/Tribe1/210112_DS.csv
|
|
||||||
exam_id DS_Tribe1
|
|
||||||
id DS_Tribe1_Exercice_1_1_Coucou
|
|
||||||
Name: 0, dtype: object
|
|
||||||
"""
|
|
||||||
_exams = maybe_dataframe(exams)
|
|
||||||
|
|
||||||
questions = []
|
|
||||||
for exam in _exams:
|
|
||||||
fields = self.reverse_csv_field(
|
|
||||||
[
|
|
||||||
"exercise",
|
|
||||||
"question",
|
|
||||||
"competence",
|
|
||||||
"theme",
|
|
||||||
"comment",
|
|
||||||
"score_rate",
|
|
||||||
"is_leveled",
|
|
||||||
]
|
|
||||||
)
|
|
||||||
question = extract_fields(exam["origin"], fields)
|
|
||||||
question = self.rename_columns(question)
|
|
||||||
question["origin"] = exam["origin"]
|
|
||||||
question["exam_id"] = exam["id"]
|
|
||||||
question["id"] = build_id(
|
|
||||||
self._config["id_templates"]["question"], question.iloc[0]
|
|
||||||
)
|
|
||||||
questions.append(question)
|
|
||||||
|
|
||||||
return pd.concat(questions)
|
|
||||||
|
|
||||||
def get_questions_scores(self, questions=[]):
|
|
||||||
"""Get scores of those questions
|
|
||||||
|
|
||||||
:param questions: list or dataframe of questions metadatas (need origin field to find the csv)
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> loader = CSVLoader("./test_config.yml")
|
|
||||||
>>> exams = loader.get_exams(["Tribe1"])
|
|
||||||
>>> questions = loader.get_exam_questions(exams)
|
|
||||||
>>> scores = loader.get_questions_scores(questions)
|
|
||||||
>>> all(scores.columns == loader.score_columns)
|
|
||||||
True
|
|
||||||
>>> scores["student_name"].unique()
|
|
||||||
array(['Star Tice', 'Umberto Dingate', 'Starlin Crangle',
|
|
||||||
'Humbert Bourcq', 'Gabriella Handyside', 'Stewart Eaves',
|
|
||||||
'Erick Going', 'Ase Praton', 'Rollins Planks', 'Dunstan Sarjant',
|
|
||||||
'Stacy Guiton', 'Ange Stanes', 'Amabelle Elleton',
|
|
||||||
'Darn Broomhall', 'Dyan Chatto', 'Keane Rennebach', 'Nari Paulton',
|
|
||||||
'Brandy Wase', 'Jaclyn Firidolfi', 'Violette Lockney'],
|
|
||||||
dtype=object)
|
|
||||||
"""
|
|
||||||
scores = []
|
|
||||||
group_questions = questions.groupby("origin")
|
|
||||||
for origin, questions_df in group_questions:
|
|
||||||
scores_df = pd.read_csv(origin)
|
|
||||||
scores_df = self.rename_columns(scores_df)
|
|
||||||
student_names = [
|
|
||||||
c
|
|
||||||
for c in scores_df.columns
|
|
||||||
if c not in self._config["csv_fields"].keys()
|
|
||||||
]
|
|
||||||
|
|
||||||
common_columns = [c for c in questions_df.columns if c in scores_df.columns]
|
|
||||||
scores_df = pd.merge(scores_df, questions_df, on=common_columns)
|
|
||||||
|
|
||||||
kept_columns = [c for c in scores_df if c not in student_names]
|
|
||||||
scores_df = pd.melt(
|
|
||||||
scores_df,
|
|
||||||
id_vars=kept_columns,
|
|
||||||
value_vars=student_names,
|
|
||||||
var_name="student_name",
|
|
||||||
value_name="score",
|
|
||||||
)
|
|
||||||
|
|
||||||
scores_df = scores_df.rename(columns={"id": "question_id"})
|
|
||||||
scores.append(scores_df)
|
|
||||||
|
|
||||||
return pd.concat(scores)
|
|
||||||
|
|
||||||
def get_exam_scores(self, exams=[]):
|
|
||||||
"""Get scores for all question of the exam
|
|
||||||
|
|
||||||
:param exams: list or dataframe of exams metadatas (need origin field to find the csv)
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> loader = CSVLoader("./test_config.yml")
|
|
||||||
>>> exams = loader.get_exams(["Tribe1"])
|
|
||||||
>>> scores = loader.get_exam_scores(exams)
|
|
||||||
>>> scores.columns
|
|
||||||
Index(['term', 'exam', 'date', 'exercise', 'question', 'competence', 'theme',
|
|
||||||
'comment', 'score_rate', 'is_leveled', 'origin', 'exam_id',
|
|
||||||
'question_id', 'student_name', 'score'],
|
|
||||||
dtype='object')
|
|
||||||
"""
|
|
||||||
questions = self.get_exam_questions(exams)
|
|
||||||
return self.get_questions_scores(questions)
|
|
||||||
|
|
||||||
def get_students(self, tribes=[]):
|
|
||||||
"""Get student list
|
|
||||||
|
|
||||||
:param tribes: concerned tribes
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> loader = CSVLoader("./test_config.yml")
|
|
||||||
>>> tribes = loader.get_tribes()
|
|
||||||
>>> students = loader.get_students([tribes["Tribe1"]])
|
|
||||||
>>> students.columns
|
|
||||||
Index(['Nom', 'email', 'origin', 'tribe'], dtype='object')
|
|
||||||
"""
|
|
||||||
students = []
|
|
||||||
for tribe in tribes:
|
|
||||||
students_csv = Path(self._config["source"]) / tribe["students"]
|
|
||||||
students_df = pd.read_csv(students_csv)
|
|
||||||
students_df["origin"] = students_csv
|
|
||||||
students_df["tribe"] = tribe["name"]
|
|
||||||
students.append(students_df)
|
|
||||||
|
|
||||||
return pd.concat(students)
|
|
||||||
|
|
||||||
def get_student_scores(self, student=[]):
|
|
||||||
"""Get all scores for students"""
|
|
||||||
pass
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
|
|
||||||
def column_values_to_column(pivot_column, value_column, kept_columns, df):
|
|
||||||
"""Pivot_column's values go to column with value_column under it, keeping kept_columns
|
|
||||||
|
|
||||||
:param pivot_column: column name where value will become columns
|
|
||||||
:param value_column: column name where value will be under pivot_column
|
|
||||||
:param kept_columns: unchanged columns
|
|
||||||
:param df: DataFrame to work with
|
|
||||||
|
|
||||||
:return: Stack dataframe
|
|
||||||
|
|
||||||
"""
|
|
||||||
if pivot_column in kept_columns:
|
|
||||||
pivot_columns = kept_columns
|
|
||||||
else:
|
|
||||||
pivot_columns = kept_columns + [pivot_column]
|
|
||||||
|
|
||||||
return df.set_index(pivot_columns).unstack(pivot_column)[value_column].reset_index()
|
|
||||||
@@ -1,131 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from math import ceil
|
|
||||||
|
|
||||||
|
|
||||||
def score_to_mark(x, score_max, rounding=lambda x: round(x, 2)):
|
|
||||||
"""Compute the mark from the score
|
|
||||||
|
|
||||||
if the item is leveled then the score is multiply by the score_rate
|
|
||||||
otherwise it copies the score
|
|
||||||
|
|
||||||
:param x: dictionnary with "is_leveled", "score" and "score_rate" keys
|
|
||||||
:param score_max:
|
|
||||||
:param rounding: rounding mark function
|
|
||||||
:return: the mark
|
|
||||||
|
|
||||||
>>> import pandas as pd
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... "score_rate":[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... "is_leveled":[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... "score":[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1.2, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> df.loc[0]
|
|
||||||
Eleve E1
|
|
||||||
score_rate 1
|
|
||||||
is_leveled 0
|
|
||||||
score 1.0
|
|
||||||
Name: 0, dtype: object
|
|
||||||
>>> score_to_mark(df.loc[0], 3)
|
|
||||||
1.0
|
|
||||||
>>> df.loc[10]
|
|
||||||
Eleve E2
|
|
||||||
score_rate 2
|
|
||||||
is_leveled 1
|
|
||||||
score 2.0
|
|
||||||
Name: 10, dtype: object
|
|
||||||
>>> score_to_mark(df.loc[10], 3)
|
|
||||||
1.33
|
|
||||||
>>> from .on_value import round_half_point
|
|
||||||
>>> score_to_mark(df.loc[10], 3, round_half_point)
|
|
||||||
1.5
|
|
||||||
>>> df.loc[1]
|
|
||||||
Eleve E1
|
|
||||||
score_rate 1
|
|
||||||
is_leveled 0
|
|
||||||
score 0.33
|
|
||||||
Name: 1, dtype: object
|
|
||||||
>>> score_to_mark(df.loc[1], 3)
|
|
||||||
0.33
|
|
||||||
"""
|
|
||||||
if x["is_leveled"]:
|
|
||||||
if x["score"] not in list(range(score_max + 1)):
|
|
||||||
raise ValueError(f"The evaluation is out of range: {x['score']} at {x}")
|
|
||||||
return rounding(x["score"] * x["score_rate"] / score_max)
|
|
||||||
|
|
||||||
return rounding(x["score"])
|
|
||||||
|
|
||||||
|
|
||||||
def score_to_level(x, level_max=3):
|
|
||||||
"""Compute the level (".",0,1,2,3).
|
|
||||||
|
|
||||||
:param x: dictionnary with "is_leveled", "score" and "score_rate" keys
|
|
||||||
:return: the level
|
|
||||||
|
|
||||||
>>> import pandas as pd
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... "score_rate":[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... "is_leveled":[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... "score":[1, 0.33, 0, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> df
|
|
||||||
Eleve score_rate is_leveled score
|
|
||||||
0 E1 1 0 1.000
|
|
||||||
1 E1 1 0 0.330
|
|
||||||
2 E1 2 0 0.000
|
|
||||||
3 E1 2 0 1.500
|
|
||||||
4 E1 2 1 1.000
|
|
||||||
5 E1 2 1 3.000
|
|
||||||
6 E2 1 0 0.666
|
|
||||||
7 E2 1 0 1.000
|
|
||||||
8 E2 2 0 1.500
|
|
||||||
9 E2 2 0 1.000
|
|
||||||
10 E2 2 1 2.000
|
|
||||||
11 E2 2 1 3.000
|
|
||||||
>>> df.apply(score_to_level, axis=1)
|
|
||||||
0 3
|
|
||||||
1 1
|
|
||||||
2 0
|
|
||||||
3 3
|
|
||||||
4 1
|
|
||||||
5 3
|
|
||||||
6 2
|
|
||||||
7 3
|
|
||||||
8 3
|
|
||||||
9 2
|
|
||||||
10 2
|
|
||||||
11 3
|
|
||||||
dtype: int64
|
|
||||||
>>> df.apply(lambda x: score_to_level(x, 5), axis=1)
|
|
||||||
0 5
|
|
||||||
1 2
|
|
||||||
2 0
|
|
||||||
3 4
|
|
||||||
4 1
|
|
||||||
5 3
|
|
||||||
6 4
|
|
||||||
7 5
|
|
||||||
8 4
|
|
||||||
9 3
|
|
||||||
10 2
|
|
||||||
11 3
|
|
||||||
dtype: int64
|
|
||||||
"""
|
|
||||||
if x["is_leveled"]:
|
|
||||||
return int(x["score"])
|
|
||||||
|
|
||||||
if x["score"] > x["score_rate"]:
|
|
||||||
raise ValueError(
|
|
||||||
f"score is higher than score_rate ({x['score']} > {x['score_rate']}) for {x}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return int(ceil(x["score"] / x["score_rate"] * level_max))
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# Reglages pour 'vim'
|
|
||||||
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
|
||||||
# cursor: 16 del
|
|
||||||
@@ -1,141 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from .on_score_column import score_to_mark, score_to_level
|
|
||||||
|
|
||||||
|
|
||||||
def compute_marks(df, score_max, rounding=lambda x: round(x, 2)):
|
|
||||||
"""Compute the mark for the dataframe
|
|
||||||
|
|
||||||
apply score_to_mark to each row
|
|
||||||
|
|
||||||
:param df: DataFrame with "score", "is_leveled" and "score_rate" columns.
|
|
||||||
|
|
||||||
>>> import pandas as pd
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... "score_rate":[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... "is_leveled":[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... "score":[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> df
|
|
||||||
Eleve score_rate is_leveled score
|
|
||||||
0 E1 1 0 1.000
|
|
||||||
1 E1 1 0 0.330
|
|
||||||
2 E1 2 0 2.000
|
|
||||||
3 E1 2 0 1.500
|
|
||||||
4 E1 2 1 1.000
|
|
||||||
5 E1 2 1 3.000
|
|
||||||
6 E2 1 0 0.666
|
|
||||||
7 E2 1 0 1.000
|
|
||||||
8 E2 2 0 1.500
|
|
||||||
9 E2 2 0 1.000
|
|
||||||
10 E2 2 1 2.000
|
|
||||||
11 E2 2 1 3.000
|
|
||||||
>>> compute_marks(df, 3)
|
|
||||||
0 1.00
|
|
||||||
1 0.33
|
|
||||||
2 2.00
|
|
||||||
3 1.50
|
|
||||||
4 0.67
|
|
||||||
5 2.00
|
|
||||||
6 0.67
|
|
||||||
7 1.00
|
|
||||||
8 1.50
|
|
||||||
9 1.00
|
|
||||||
10 1.33
|
|
||||||
11 2.00
|
|
||||||
dtype: float64
|
|
||||||
>>> from .on_value import round_half_point
|
|
||||||
>>> compute_marks(df, 3, round_half_point)
|
|
||||||
0 1.0
|
|
||||||
1 0.5
|
|
||||||
2 2.0
|
|
||||||
3 1.5
|
|
||||||
4 0.5
|
|
||||||
5 2.0
|
|
||||||
6 0.5
|
|
||||||
7 1.0
|
|
||||||
8 1.5
|
|
||||||
9 1.0
|
|
||||||
10 1.5
|
|
||||||
11 2.0
|
|
||||||
dtype: float64
|
|
||||||
"""
|
|
||||||
return df[["score", "is_leveled", "score_rate"]].apply(
|
|
||||||
lambda x: score_to_mark(x, score_max, rounding), axis=1
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def compute_level(df, level_max=3):
|
|
||||||
"""Compute level for the dataframe
|
|
||||||
|
|
||||||
Applies score_to_level to each row
|
|
||||||
|
|
||||||
:param df: DataFrame with "score", "is_leveled" and "score_rate" columns.
|
|
||||||
:return: Columns with level
|
|
||||||
|
|
||||||
>>> import pandas as pd
|
|
||||||
>>> import numpy as np
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... "score_rate":[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... "is_leveled":[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... "score":[0, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> compute_level(df)
|
|
||||||
0 0
|
|
||||||
1 1
|
|
||||||
2 3
|
|
||||||
3 3
|
|
||||||
4 1
|
|
||||||
5 3
|
|
||||||
6 2
|
|
||||||
7 3
|
|
||||||
8 3
|
|
||||||
9 2
|
|
||||||
10 2
|
|
||||||
11 3
|
|
||||||
dtype: int64
|
|
||||||
"""
|
|
||||||
return df[["score", "is_leveled", "score_rate"]].apply(
|
|
||||||
lambda x: score_to_level(x, level_max), axis=1
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def compute_normalized(df, rounding=lambda x: round(x, 2)):
|
|
||||||
"""Compute the normalized mark (Mark / score_rate)
|
|
||||||
|
|
||||||
:param df: DataFrame with "Mark" and "score_rate" columns
|
|
||||||
:return: column with normalized mark
|
|
||||||
|
|
||||||
>>> import pandas as pd
|
|
||||||
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
|
||||||
... "score_rate":[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
|
||||||
... "is_leveled":[0]*4+[1]*2 + [0]*4+[1]*2,
|
|
||||||
... "score":[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
|
||||||
... }
|
|
||||||
>>> df = pd.DataFrame(d)
|
|
||||||
>>> df["mark"] = compute_marks(df, 3)
|
|
||||||
>>> compute_normalized(df)
|
|
||||||
0 1.00
|
|
||||||
1 0.33
|
|
||||||
2 1.00
|
|
||||||
3 0.75
|
|
||||||
4 0.34
|
|
||||||
5 1.00
|
|
||||||
6 0.67
|
|
||||||
7 1.00
|
|
||||||
8 0.75
|
|
||||||
9 0.50
|
|
||||||
10 0.66
|
|
||||||
11 1.00
|
|
||||||
dtype: float64
|
|
||||||
"""
|
|
||||||
return rounding(df["mark"] / df["score_rate"])
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# Reglages pour 'vim'
|
|
||||||
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
|
||||||
# cursor: 16 del
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from math import ceil, floor
|
|
||||||
|
|
||||||
|
|
||||||
def round_with_base(x, base=0.5):
|
|
||||||
"""Round to a multiple of base
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> round_with_base(1.33, 0.1)
|
|
||||||
1.3
|
|
||||||
>>> round_with_base(1.33, 0.2)
|
|
||||||
1.4
|
|
||||||
>>> round_with_base(1.33, 1)
|
|
||||||
1
|
|
||||||
>>> round_with_base(1.33, 2)
|
|
||||||
2
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
prec = len(str(base).split(".")[1])
|
|
||||||
except IndexError:
|
|
||||||
prec = 0
|
|
||||||
return round(base * round(float(x) / base), prec)
|
|
||||||
|
|
||||||
|
|
||||||
def round_half_point(x):
|
|
||||||
"""Round to nearest half point
|
|
||||||
|
|
||||||
:example:
|
|
||||||
>>> round_half_point(1.33)
|
|
||||||
1.5
|
|
||||||
>>> round_half_point(1.1)
|
|
||||||
1.0
|
|
||||||
>>> round_half_point(1.66)
|
|
||||||
1.5
|
|
||||||
>>> round_half_point(1.76)
|
|
||||||
2.0
|
|
||||||
"""
|
|
||||||
return round_with_base(x, base=0.5)
|
|
||||||
206
recopytex/df_marks_manip.py
Normal file
206
recopytex/df_marks_manip.py
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from math import ceil, floor
|
||||||
|
from .config import COLUMNS, VALIDSCORE
|
||||||
|
|
||||||
|
# Values manipulations
|
||||||
|
|
||||||
|
|
||||||
|
def round_half_point(val):
|
||||||
|
try:
|
||||||
|
return 0.5 * ceil(2.0 * val)
|
||||||
|
except ValueError:
|
||||||
|
return val
|
||||||
|
except TypeError:
|
||||||
|
return val
|
||||||
|
|
||||||
|
|
||||||
|
def score_to_mark(x):
|
||||||
|
""" Compute the mark
|
||||||
|
|
||||||
|
if the item is leveled then the score is multiply by the score_rate
|
||||||
|
otherwise it copies the score
|
||||||
|
|
||||||
|
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
||||||
|
|
||||||
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
|
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
|
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||||
|
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> score_to_mark(df.loc[0])
|
||||||
|
1.0
|
||||||
|
>>> score_to_mark(df.loc[10])
|
||||||
|
1.3333333333333333
|
||||||
|
"""
|
||||||
|
# -1 is no answer
|
||||||
|
if x[COLUMNS["score"]] == -1:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if x[COLUMNS["is_leveled"]]:
|
||||||
|
if x[COLUMNS["score"]] not in [0, 1, 2, 3]:
|
||||||
|
raise ValueError(f"The evaluation is out of range: {x[COLUMNS['score']]} at {x}")
|
||||||
|
#return round_half_point(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3)
|
||||||
|
return round(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3, 2)
|
||||||
|
|
||||||
|
if x[COLUMNS["score"]] > x[COLUMNS["score_rate"]]:
|
||||||
|
raise ValueError(
|
||||||
|
f"The score ({x['score']}) is greated than the rating scale ({x[COLUMNS['score_rate']]}) at {x}"
|
||||||
|
)
|
||||||
|
return x[COLUMNS["score"]]
|
||||||
|
|
||||||
|
|
||||||
|
def score_to_level(x):
|
||||||
|
""" Compute the level (".",0,1,2,3).
|
||||||
|
|
||||||
|
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
|
||||||
|
|
||||||
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
|
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
|
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||||
|
... COLUMNS["score"]:[1, 0.33, np.nan, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> score_to_level(df.loc[0])
|
||||||
|
3
|
||||||
|
>>> score_to_level(df.loc[1])
|
||||||
|
1
|
||||||
|
>>> score_to_level(df.loc[2])
|
||||||
|
'na'
|
||||||
|
>>> score_to_level(df.loc[3])
|
||||||
|
3
|
||||||
|
>>> score_to_level(df.loc[5])
|
||||||
|
3
|
||||||
|
>>> score_to_level(df.loc[10])
|
||||||
|
2
|
||||||
|
"""
|
||||||
|
# negatives are no answer or negatives points
|
||||||
|
if x[COLUMNS["score"]] <= -1:
|
||||||
|
return np.nan
|
||||||
|
|
||||||
|
if x[COLUMNS["is_leveled"]]:
|
||||||
|
return int(x[COLUMNS["score"]])
|
||||||
|
|
||||||
|
return int(ceil(x[COLUMNS["score"]] / x[COLUMNS["score_rate"]] * 3))
|
||||||
|
|
||||||
|
|
||||||
|
# DataFrame columns manipulations
|
||||||
|
|
||||||
|
|
||||||
|
def compute_mark(df):
|
||||||
|
""" Add Mark column to df
|
||||||
|
|
||||||
|
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
||||||
|
|
||||||
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
|
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
|
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||||
|
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> compute_mark(df)
|
||||||
|
0 1.00
|
||||||
|
1 0.33
|
||||||
|
2 2.00
|
||||||
|
3 1.50
|
||||||
|
4 0.67
|
||||||
|
5 2.00
|
||||||
|
6 0.67
|
||||||
|
7 1.00
|
||||||
|
8 1.50
|
||||||
|
9 1.00
|
||||||
|
10 1.33
|
||||||
|
11 2.00
|
||||||
|
dtype: float64
|
||||||
|
"""
|
||||||
|
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
|
||||||
|
score_to_mark, axis=1
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_level(df):
|
||||||
|
""" Add Mark column to df
|
||||||
|
|
||||||
|
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
|
||||||
|
|
||||||
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
|
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
|
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||||
|
... COLUMNS["score"]:[np.nan, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> compute_level(df)
|
||||||
|
0 na
|
||||||
|
1 1
|
||||||
|
2 3
|
||||||
|
3 3
|
||||||
|
4 1
|
||||||
|
5 3
|
||||||
|
6 2
|
||||||
|
7 3
|
||||||
|
8 3
|
||||||
|
9 2
|
||||||
|
10 2
|
||||||
|
11 3
|
||||||
|
dtype: object
|
||||||
|
"""
|
||||||
|
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
|
||||||
|
score_to_level, axis=1
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_normalized(df):
|
||||||
|
""" Compute the normalized mark (Mark / score_rate)
|
||||||
|
|
||||||
|
:param df: DataFrame with "Mark" and COLUMNS["score_rate"] columns
|
||||||
|
|
||||||
|
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
|
||||||
|
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
|
||||||
|
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
|
||||||
|
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
|
||||||
|
... }
|
||||||
|
>>> df = pd.DataFrame(d)
|
||||||
|
>>> df["Mark"] = compute_marks(df)
|
||||||
|
>>> compute_normalized(df)
|
||||||
|
0 1.00
|
||||||
|
1 0.33
|
||||||
|
2 1.00
|
||||||
|
3 0.75
|
||||||
|
4 0.33
|
||||||
|
5 1.00
|
||||||
|
6 0.67
|
||||||
|
7 1.00
|
||||||
|
8 0.75
|
||||||
|
9 0.50
|
||||||
|
10 0.67
|
||||||
|
11 1.00
|
||||||
|
dtype: float64
|
||||||
|
"""
|
||||||
|
return df[COLUMNS["mark"]] / df[COLUMNS["score_rate"]]
|
||||||
|
|
||||||
|
|
||||||
|
# Postprocessing question scores
|
||||||
|
|
||||||
|
|
||||||
|
def pp_q_scores(df):
|
||||||
|
""" Postprocessing questions scores dataframe
|
||||||
|
|
||||||
|
:param df: questions-scores dataframe
|
||||||
|
:return: same data frame with mark, level and normalize columns
|
||||||
|
"""
|
||||||
|
assign = {
|
||||||
|
COLUMNS["mark"]: compute_mark,
|
||||||
|
COLUMNS["level"]: compute_level,
|
||||||
|
COLUMNS["normalized"]: compute_normalized,
|
||||||
|
}
|
||||||
|
return df.assign(**assign)
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# Reglages pour 'vim'
|
||||||
|
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
|
||||||
|
# cursor: 16 del
|
||||||
10
recopytex/scripts/config.py
Normal file
10
recopytex/scripts/config.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
CONFIGPATH = "recoconfig.yml"
|
||||||
|
|
||||||
|
with open(CONFIGPATH, "r") as configfile:
|
||||||
|
config = yaml.load(configfile, Loader=yaml.FullLoader)
|
||||||
|
|
||||||
160
recopytex/scripts/prepare_csv.py
Normal file
160
recopytex/scripts/prepare_csv.py
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
import click
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
from PyInquirer import prompt, print_json
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .config import config
|
||||||
|
from ..config import NO_ST_COLUMNS
|
||||||
|
|
||||||
|
|
||||||
|
class PromptAbortException(EOFError):
|
||||||
|
def __init__(self, message, errors=None):
|
||||||
|
|
||||||
|
# Call the base class constructor with the parameters it needs
|
||||||
|
super(PromptAbortException, self).__init__("Abort questionnary", errors)
|
||||||
|
|
||||||
|
|
||||||
|
def get_tribes(answers):
|
||||||
|
""" List tribes based on subdirectory of config["source"] which have an "eleves.csv" file inside """
|
||||||
|
return [
|
||||||
|
p.name for p in Path(config["source"]).iterdir() if (p / "eleves.csv").exists()
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_csv():
|
||||||
|
items = new_eval()
|
||||||
|
|
||||||
|
item = items[0]
|
||||||
|
# item = {"tribe": "308", "date": datetime.today(), "assessment": "plop"}
|
||||||
|
csv_output = (
|
||||||
|
Path(config["source"])
|
||||||
|
/ item["tribe"]
|
||||||
|
/ f"{item['date']:%y%m%d}_{item['assessment']}.csv"
|
||||||
|
)
|
||||||
|
|
||||||
|
students = pd.read_csv(Path(config["source"]) / item["tribe"] / "eleves.csv")["Nom"]
|
||||||
|
|
||||||
|
columns = list(NO_ST_COLUMNS.keys())
|
||||||
|
items = [[it[c] for c in columns] for it in items]
|
||||||
|
columns = list(NO_ST_COLUMNS.values())
|
||||||
|
items_df = pd.DataFrame.from_records(items, columns=columns)
|
||||||
|
for s in students:
|
||||||
|
items_df[s] = np.nan
|
||||||
|
|
||||||
|
items_df.to_csv(csv_output, index=False, date_format="%d/%m/%Y")
|
||||||
|
click.echo(f"Saving csv file to {csv_output}")
|
||||||
|
|
||||||
|
|
||||||
|
def new_eval(answers={}):
|
||||||
|
click.echo(f"Préparation d'un nouveau devoir")
|
||||||
|
|
||||||
|
eval_questions = [
|
||||||
|
{"type": "input", "name": "assessment", "message": "Nom de l'évaluation",},
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"name": "tribe",
|
||||||
|
"message": "Classe concernée",
|
||||||
|
"choices": get_tribes,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "input",
|
||||||
|
"name": "date",
|
||||||
|
"message": "Date du devoir (%y%m%d)",
|
||||||
|
"default": datetime.today().strftime("%y%m%d"),
|
||||||
|
"filter": lambda val: datetime.strptime(val, "%y%m%d"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"name": "term",
|
||||||
|
"message": "Trimestre",
|
||||||
|
"choices": ["1", "2", "3"],
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
eval_ans = prompt(eval_questions)
|
||||||
|
|
||||||
|
items = []
|
||||||
|
add_exo = True
|
||||||
|
while add_exo:
|
||||||
|
ex_items = new_exercice(eval_ans)
|
||||||
|
items += ex_items
|
||||||
|
add_exo = prompt(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"type": "confirm",
|
||||||
|
"name": "add_exo",
|
||||||
|
"message": "Ajouter un autre exercice",
|
||||||
|
"default": True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)["add_exo"]
|
||||||
|
return items
|
||||||
|
|
||||||
|
|
||||||
|
def new_exercice(answers={}):
|
||||||
|
exercise_questions = [
|
||||||
|
{"type": "input", "name": "exercise", "message": "Nom de l'exercice"},
|
||||||
|
]
|
||||||
|
|
||||||
|
click.echo(f"Nouvel exercice")
|
||||||
|
exercise_ans = prompt(exercise_questions, answers=answers)
|
||||||
|
|
||||||
|
items = []
|
||||||
|
|
||||||
|
add_item = True
|
||||||
|
while add_item:
|
||||||
|
try:
|
||||||
|
item_ans = new_item(exercise_ans)
|
||||||
|
except PromptAbortException:
|
||||||
|
click.echo("Création de l'item annulée")
|
||||||
|
else:
|
||||||
|
items.append(item_ans)
|
||||||
|
add_item = prompt(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"type": "confirm",
|
||||||
|
"name": "add_item",
|
||||||
|
"message": f"Ajouter un autre item pour l'exercice {exercise_ans['exercise']}",
|
||||||
|
"default": True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)["add_item"]
|
||||||
|
|
||||||
|
return items
|
||||||
|
|
||||||
|
|
||||||
|
def new_item(answers={}):
|
||||||
|
item_questions = [
|
||||||
|
{"type": "input", "name": "question", "message": "Nom de l'item",},
|
||||||
|
{"type": "input", "name": "comment", "message": "Commentaire",},
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"name": "competence",
|
||||||
|
"message": "Competence",
|
||||||
|
"choices": ["Cher", "Rep", "Mod", "Rai", "Cal", "Com"],
|
||||||
|
},
|
||||||
|
{"type": "input", "name": "theme", "message": "Domaine",},
|
||||||
|
{
|
||||||
|
"type": "confirm",
|
||||||
|
"name": "is_leveled",
|
||||||
|
"message": "Évaluation par niveau",
|
||||||
|
"default": True,
|
||||||
|
},
|
||||||
|
{"type": "input", "name": "score_rate", "message": "Bareme"},
|
||||||
|
{
|
||||||
|
"type": "confirm",
|
||||||
|
"name": "correct",
|
||||||
|
"message": "Tout est correct?",
|
||||||
|
"default": True,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
click.echo(f"Nouvelle question pour l'exercice {answers['exercise']}")
|
||||||
|
item_ans = prompt(item_questions, answers=answers)
|
||||||
|
if item_ans["correct"]:
|
||||||
|
return item_ans
|
||||||
|
raise PromptAbortException("Abort item creation")
|
||||||
@@ -2,7 +2,14 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
|
||||||
import click
|
import click
|
||||||
from recopytex.dashboard.app import app as dash
|
from pathlib import Path
|
||||||
|
import yaml
|
||||||
|
import sys
|
||||||
|
import papermill as pm
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from .prepare_csv import prepare_csv
|
||||||
|
from .config import config
|
||||||
|
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
@@ -11,6 +18,85 @@ def cli():
|
|||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.option("--debug", default=0, help="Debug mode for dash")
|
def print_config():
|
||||||
def dashboard(debug):
|
click.echo(f"Config file is {CONFIGPATH}")
|
||||||
dash.run_server(debug=bool(debug))
|
click.echo("It contains")
|
||||||
|
click.echo(config)
|
||||||
|
|
||||||
|
|
||||||
|
def reporting(csv_file):
|
||||||
|
# csv_file = Path(csv_file)
|
||||||
|
tribe_dir = csv_file.parent
|
||||||
|
csv_filename = csv_file.name.split(".")[0]
|
||||||
|
|
||||||
|
assessment = str(csv_filename).split("_")[-1].capitalize()
|
||||||
|
date = str(csv_filename).split("_")[0]
|
||||||
|
try:
|
||||||
|
date = datetime.strptime(date, "%y%m%d")
|
||||||
|
except ValueError:
|
||||||
|
date = datetime.today().strptime(date, "%y%m%d")
|
||||||
|
|
||||||
|
tribe = str(tribe_dir).split("/")[-1]
|
||||||
|
|
||||||
|
template = Path(config["templates"]) / "tpl_evaluation.ipynb"
|
||||||
|
|
||||||
|
dest = Path(config["output"]) / tribe / csv_filename
|
||||||
|
dest.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
click.echo(f"Building {assessment} ({date:%d/%m/%y}) report")
|
||||||
|
pm.execute_notebook(
|
||||||
|
str(template),
|
||||||
|
str(dest / f"{assessment}.ipynb"),
|
||||||
|
parameters=dict(
|
||||||
|
tribe=tribe,
|
||||||
|
assessment=assessment,
|
||||||
|
date=f"{date:%d/%m/%y}",
|
||||||
|
csv_file=str(csv_file.absolute()),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.argument("target", required=False)
|
||||||
|
def report(target=""):
|
||||||
|
""" Make a report for the eval
|
||||||
|
|
||||||
|
:param target: csv file or a directory where csvs are
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if target.endswith(".csv"):
|
||||||
|
csv = Path(target)
|
||||||
|
if not csv.exists():
|
||||||
|
click.echo(f"{target} does not exists")
|
||||||
|
sys.exit(1)
|
||||||
|
if csv.suffix != ".csv":
|
||||||
|
click.echo(f"{target} has to be a csv file")
|
||||||
|
sys.exit(1)
|
||||||
|
csvs = [csv]
|
||||||
|
else:
|
||||||
|
csvs = list(Path(target).glob("**/*.csv"))
|
||||||
|
except AttributeError:
|
||||||
|
csvs = list(Path(config["source"]).glob("**/*.csv"))
|
||||||
|
|
||||||
|
for csv in csvs:
|
||||||
|
click.echo(f"Processing {csv}")
|
||||||
|
try:
|
||||||
|
reporting(csv)
|
||||||
|
except pm.exceptions.PapermillExecutionError as e:
|
||||||
|
click.echo(f"Error with {csv}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
def prepare():
|
||||||
|
""" Prepare csv file """
|
||||||
|
|
||||||
|
items = prepare_csv()
|
||||||
|
|
||||||
|
click.echo(items)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.argument("tribe")
|
||||||
|
def random_pick(tribe):
|
||||||
|
""" Randomly pick a student """
|
||||||
|
pass
|
||||||
|
|||||||
@@ -1,4 +1,76 @@
|
|||||||
pandas
|
ansiwrap==0.8.4
|
||||||
click
|
appdirs==1.4.3
|
||||||
papermill
|
attrs==19.1.0
|
||||||
prompt_toolkit
|
backcall==0.1.0
|
||||||
|
black==19.10b0
|
||||||
|
bleach==3.1.0
|
||||||
|
certifi==2019.6.16
|
||||||
|
chardet==3.0.4
|
||||||
|
Click==7.0
|
||||||
|
colorama==0.4.1
|
||||||
|
cycler==0.10.0
|
||||||
|
decorator==4.4.0
|
||||||
|
defusedxml==0.6.0
|
||||||
|
entrypoints==0.3
|
||||||
|
future==0.17.1
|
||||||
|
idna==2.8
|
||||||
|
importlib-resources==1.0.2
|
||||||
|
ipykernel==5.1.3
|
||||||
|
ipython==7.11.1
|
||||||
|
ipython-genutils==0.2.0
|
||||||
|
ipywidgets==7.5.1
|
||||||
|
jedi==0.15.2
|
||||||
|
Jinja2==2.10.3
|
||||||
|
jsonschema==3.2.0
|
||||||
|
jupyter==1.0.0
|
||||||
|
jupyter-client==5.3.4
|
||||||
|
jupyter-console==6.1.0
|
||||||
|
jupyter-core==4.6.1
|
||||||
|
jupytex==0.0.3
|
||||||
|
kiwisolver==1.1.0
|
||||||
|
Markdown==3.1.1
|
||||||
|
MarkupSafe==1.1.1
|
||||||
|
matplotlib==3.1.2
|
||||||
|
mistune==0.8.4
|
||||||
|
nbconvert==5.6.1
|
||||||
|
nbformat==5.0.3
|
||||||
|
notebook==6.0.3
|
||||||
|
numpy==1.18.1
|
||||||
|
pandas==0.25.3
|
||||||
|
pandocfilters==1.4.2
|
||||||
|
papermill==1.2.1
|
||||||
|
parso==0.5.2
|
||||||
|
pathspec==0.7.0
|
||||||
|
pexpect==4.8.0
|
||||||
|
pickleshare==0.7.5
|
||||||
|
prometheus-client==0.7.1
|
||||||
|
prompt-toolkit==1.0.14
|
||||||
|
ptyprocess==0.6.0
|
||||||
|
Pygments==2.5.2
|
||||||
|
PyInquirer==1.0.3
|
||||||
|
pyparsing==2.4.6
|
||||||
|
pyrsistent==0.15.7
|
||||||
|
python-dateutil==2.8.0
|
||||||
|
pytz==2019.3
|
||||||
|
PyYAML==5.3
|
||||||
|
pyzmq==18.1.1
|
||||||
|
qtconsole==4.6.0
|
||||||
|
-e git+git_opytex:/lafrite/recopytex.git@7e026bedb24c1ca8bef3b71b3d63f8b0d6916e81#egg=Recopytex
|
||||||
|
regex==2020.1.8
|
||||||
|
requests==2.22.0
|
||||||
|
scipy==1.4.1
|
||||||
|
Send2Trash==1.5.0
|
||||||
|
six==1.12.0
|
||||||
|
tenacity==6.0.0
|
||||||
|
terminado==0.8.3
|
||||||
|
testpath==0.4.4
|
||||||
|
textwrap3==0.9.2
|
||||||
|
toml==0.10.0
|
||||||
|
tornado==6.0.3
|
||||||
|
tqdm==4.41.1
|
||||||
|
traitlets==4.3.2
|
||||||
|
typed-ast==1.4.1
|
||||||
|
urllib3==1.25.8
|
||||||
|
wcwidth==0.1.8
|
||||||
|
webencodings==0.5.1
|
||||||
|
widgetsnbextension==3.5.1
|
||||||
|
|||||||
@@ -1,69 +0,0 @@
|
|||||||
ansiwrap
|
|
||||||
attrs
|
|
||||||
backcall
|
|
||||||
bleach
|
|
||||||
certifi
|
|
||||||
chardet
|
|
||||||
Click
|
|
||||||
colorama
|
|
||||||
cycler
|
|
||||||
decorator
|
|
||||||
defusedxml
|
|
||||||
entrypoints
|
|
||||||
future
|
|
||||||
idna
|
|
||||||
importlib-resources
|
|
||||||
ipykernel
|
|
||||||
ipython
|
|
||||||
ipython-genutils
|
|
||||||
ipywidgets
|
|
||||||
jedi
|
|
||||||
Jinja2
|
|
||||||
jsonschema
|
|
||||||
jupyter
|
|
||||||
jupyter-client
|
|
||||||
jupyter-console
|
|
||||||
jupyter-core
|
|
||||||
jupytex
|
|
||||||
kiwisolver
|
|
||||||
MarkupSafe
|
|
||||||
matplotlib
|
|
||||||
mistune
|
|
||||||
nbconvert
|
|
||||||
nbformat
|
|
||||||
notebook
|
|
||||||
numpy
|
|
||||||
pandas
|
|
||||||
pandocfilters
|
|
||||||
papermill
|
|
||||||
parso
|
|
||||||
pexpect
|
|
||||||
pickleshare
|
|
||||||
prometheus-client
|
|
||||||
prompt-toolkit
|
|
||||||
ptyprocess
|
|
||||||
Pygments
|
|
||||||
pyparsing
|
|
||||||
pyrsistent
|
|
||||||
python-dateutil
|
|
||||||
pytz
|
|
||||||
PyYAML
|
|
||||||
pyzmq
|
|
||||||
qtconsole
|
|
||||||
-e git+git_opytex:/lafrite/recopytex.git@e9a8310f151ead60434ae944d726a2fd22b23d06#egg=Recopytex
|
|
||||||
requests
|
|
||||||
scipy
|
|
||||||
seaborn
|
|
||||||
Send2Trash
|
|
||||||
six
|
|
||||||
tenacity
|
|
||||||
terminado
|
|
||||||
testpath
|
|
||||||
textwrap3
|
|
||||||
tornado
|
|
||||||
tqdm
|
|
||||||
traitlets
|
|
||||||
urllib3
|
|
||||||
wcwidth
|
|
||||||
webencodings
|
|
||||||
widgetsnbextension
|
|
||||||
7
setup.py
7
setup.py
@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='Recopytex',
|
name='Recopytex',
|
||||||
version='0.1',
|
version='1.1.1',
|
||||||
description='Assessment analysis',
|
description='Assessment analysis',
|
||||||
author='Benjamin Bertrand',
|
author='Benjamin Bertrand',
|
||||||
author_email='',
|
author_email='',
|
||||||
@@ -13,6 +13,11 @@ setup(
|
|||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
install_requires=[
|
install_requires=[
|
||||||
'Click',
|
'Click',
|
||||||
|
'pandas',
|
||||||
|
'numpy',
|
||||||
|
'papermill',
|
||||||
|
'pyyaml',
|
||||||
|
'PyInquirer',
|
||||||
],
|
],
|
||||||
entry_points='''
|
entry_points='''
|
||||||
[console_scripts]
|
[console_scripts]
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
source: ./example
|
|
||||||
output: ./output
|
|
||||||
templates: templates/
|
|
||||||
|
|
||||||
tribes:
|
|
||||||
Tribe1:
|
|
||||||
name: Tribe1
|
|
||||||
type: Type1
|
|
||||||
students: tribe1.csv
|
|
||||||
Tribe2:
|
|
||||||
name: Tribe2
|
|
||||||
students: tribe2.csv
|
|
||||||
Reference in New Issue
Block a user