Compare commits

10 Commits

42 changed files with 706 additions and 913 deletions

4
.gitignore vendored
View File

@@ -122,7 +122,3 @@ dmypy.json
# Pyre type checker
.pyre/
# vim
.vim

View File

@@ -6,29 +6,3 @@ Cette fois ci, on utilise:
- Des fichiers yaml pour les infos sur les élèves
- Des notebooks pour l'analyse
- Papermill pour produire les notesbooks à partir de template
## Les fichiers CSV
les paramètres sont décris dans ./recopytex/config.py
### Descriptions des questions
- Trimestre
- Nom
- Date
- Exercice
- Question
- Competence
- Domaine
- Commentaire
- Bareme
- Est_nivele
### Valeurs pour notes les élèves
- Score: 0, 1, 2, 3
- Pas de réponses: .
- Absent: a
- Dispensé: (vide)

View File

@@ -1,5 +0,0 @@
Trimestre,Nom,Date,Exercice,Question,Competence,Domaine,Commentaire,Bareme,Est_nivele,Star Tice,Umberto Dingate,Starlin Crangle,Humbert Bourcq,Gabriella Handyside,Stewart Eaves,Erick Going,Ase Praton,Rollins Planks,Dunstan Sarjant,Stacy Guiton,Ange Stanes,Amabelle Elleton,Darn Broomhall,Dyan Chatto,Keane Rennebach,Nari Paulton,Brandy Wase,Jaclyn Firidolfi,Violette Lockney
1,DS,12/01/2021,Exercice 1,1,Calculer,Plop,Coucou,1,1,,,1.0,0,1.0,2.0,3.0,0.0,3.0,3.0,2.0,,1.0,,,,,,,
1,DS,12/01/2021,Exercice 1,2,Calculer,C'est trop chouette!,Coucou,1,1,,,1.0,2,,,3.0,3.0,,,,,2.0,,,,,,,
1,DS,12/01/2021,Exercice 1,3,Calculer,Null,Coucou,1,1,,,,3,2.0,,,,,,,,3.0,,,,,,,
1,DS,12/01/2021,Exercice 1,3,Calculer,Nié,DChic,1,1,,,,2,,,,,,,,,,,,,,,,
1 Trimestre Nom Date Exercice Question Competence Domaine Commentaire Bareme Est_nivele Star Tice Umberto Dingate Starlin Crangle Humbert Bourcq Gabriella Handyside Stewart Eaves Erick Going Ase Praton Rollins Planks Dunstan Sarjant Stacy Guiton Ange Stanes Amabelle Elleton Darn Broomhall Dyan Chatto Keane Rennebach Nari Paulton Brandy Wase Jaclyn Firidolfi Violette Lockney
2 1 DS 12/01/2021 Exercice 1 1 Calculer Plop Coucou 1 1 1.0 0 1.0 2.0 3.0 0.0 3.0 3.0 2.0 1.0
3 1 DS 12/01/2021 Exercice 1 2 Calculer C'est trop chouette! Coucou 1 1 1.0 2 3.0 3.0 2.0
4 1 DS 12/01/2021 Exercice 1 3 Calculer Null Coucou 1 1 3 2.0 3.0
5 1 DS 12/01/2021 Exercice 1 3 Calculer Nié DChic 1 1 2

View File

@@ -1,5 +0,0 @@
Trimestre,Nom,Date,Exercice,Question,Competence,Domaine,Commentaire,Bareme,Est_nivele,Star Tice,Umberto Dingate,Starlin Crangle,Humbert Bourcq,Gabriella Handyside,Stewart Eaves,Erick Going,Ase Praton,Rollins Planks,Dunstan Sarjant,Stacy Guiton,Ange Stanes,Amabelle Elleton,Darn Broomhall,Dyan Chatto,Keane Rennebach,Nari Paulton,Brandy Wase,Jaclyn Firidolfi,Violette Lockney
1,DS6,22/01/2021,Exercice 1,Sait pas,,,,,,,,,,,,,,,,,,,,,,,,,
1,DS6,22/01/2021,Exercice 1,Ha,,,,,,,,,,,,,,,,,,,,,,,,,
1,DS6,22/01/2021,Exercice 1,,,,,,,,,,,,,,,,,,,,,,,,,,
1,DS6,22/01/2021,Exercice 2,grr,,,,,,,,,,,,,,,,,,,,,,,,,
1 Trimestre Nom Date Exercice Question Competence Domaine Commentaire Bareme Est_nivele Star Tice Umberto Dingate Starlin Crangle Humbert Bourcq Gabriella Handyside Stewart Eaves Erick Going Ase Praton Rollins Planks Dunstan Sarjant Stacy Guiton Ange Stanes Amabelle Elleton Darn Broomhall Dyan Chatto Keane Rennebach Nari Paulton Brandy Wase Jaclyn Firidolfi Violette Lockney
2 1 DS6 22/01/2021 Exercice 1 Sait pas
3 1 DS6 22/01/2021 Exercice 1 Ha
4 1 DS6 22/01/2021 Exercice 1
5 1 DS6 22/01/2021 Exercice 2 grr

View File

@@ -1,13 +0,0 @@
---
source: ./
output: ./
templates: templates/
tribes:
Tribe1:
name: Tribe1
type: Type1
students: tribe1.csv
Tribe2:
name: Tribe2
students: tribe2.csv

View File

@@ -1,21 +0,0 @@
Nom,email
Star Tice,stice0@jalbum.net
Umberto Dingate,udingate1@tumblr.com
Starlin Crangle,scrangle2@wufoo.com
Humbert Bourcq,hbourcq3@g.co
Gabriella Handyside,ghandyside4@patch.com
Stewart Eaves,seaves5@ycombinator.com
Erick Going,egoing6@va.gov
Ase Praton,apraton7@va.gov
Rollins Planks,rplanks8@delicious.com
Dunstan Sarjant,dsarjant9@naver.com
Stacy Guiton,sguitona@themeforest.net
Ange Stanes,astanesb@marriott.com
Amabelle Elleton,aelletonc@squidoo.com
Darn Broomhall,dbroomhalld@cisco.com
Dyan Chatto,dchattoe@npr.org
Keane Rennebach,krennebachf@dot.gov
Nari Paulton,npaultong@gov.uk
Brandy Wase,bwaseh@ftc.gov
Jaclyn Firidolfi,jfiridolfii@reuters.com
Violette Lockney,vlockneyj@chron.com
1 Nom email
2 Star Tice stice0@jalbum.net
3 Umberto Dingate udingate1@tumblr.com
4 Starlin Crangle scrangle2@wufoo.com
5 Humbert Bourcq hbourcq3@g.co
6 Gabriella Handyside ghandyside4@patch.com
7 Stewart Eaves seaves5@ycombinator.com
8 Erick Going egoing6@va.gov
9 Ase Praton apraton7@va.gov
10 Rollins Planks rplanks8@delicious.com
11 Dunstan Sarjant dsarjant9@naver.com
12 Stacy Guiton sguitona@themeforest.net
13 Ange Stanes astanesb@marriott.com
14 Amabelle Elleton aelletonc@squidoo.com
15 Darn Broomhall dbroomhalld@cisco.com
16 Dyan Chatto dchattoe@npr.org
17 Keane Rennebach krennebachf@dot.gov
18 Nari Paulton npaultong@gov.uk
19 Brandy Wase bwaseh@ftc.gov
20 Jaclyn Firidolfi jfiridolfii@reuters.com
21 Violette Lockney vlockneyj@chron.com

View File

@@ -1,21 +0,0 @@
Nom,email
Elle McKintosh,emckintosh0@1und1.de
Ty Megany,tmegany1@reuters.com
Pippa Borrows,pborrows2@a8.net
Sonny Eskrick,seskrick3@123-reg.co.uk
Mollee Britch,mbritch4@usda.gov
Ingram Plaistowe,iplaistowe5@purevolume.com
Fay Vanyard,fvanyard6@sbwire.com
Nancy Rase,nrase7@omniture.com
Rachael Ruxton,rruxton8@bravesites.com
Tallie Rushmer,trushmer9@home.pl
Seward MacIlhagga,smacilhaggaa@hatena.ne.jp
Lizette Searl,lsearlb@list-manage.com
Talya Mannagh,tmannaghc@webnode.com
Jordan Witherbed,jwitherbedd@unesco.org
Reagan Botcherby,rbotcherbye@scientificamerican.com
Libbie Shoulder,lshoulderf@desdev.cn
Abner Khomich,akhomichg@youtube.com
Zollie Kitman,zkitmanh@forbes.com
Fiorenze Durden,fdurdeni@feedburner.com
Kevyn Race,kracej@seattletimes.com
1 Nom email
2 Elle McKintosh emckintosh0@1und1.de
3 Ty Megany tmegany1@reuters.com
4 Pippa Borrows pborrows2@a8.net
5 Sonny Eskrick seskrick3@123-reg.co.uk
6 Mollee Britch mbritch4@usda.gov
7 Ingram Plaistowe iplaistowe5@purevolume.com
8 Fay Vanyard fvanyard6@sbwire.com
9 Nancy Rase nrase7@omniture.com
10 Rachael Ruxton rruxton8@bravesites.com
11 Tallie Rushmer trushmer9@home.pl
12 Seward MacIlhagga smacilhaggaa@hatena.ne.jp
13 Lizette Searl lsearlb@list-manage.com
14 Talya Mannagh tmannaghc@webnode.com
15 Jordan Witherbed jwitherbedd@unesco.org
16 Reagan Botcherby rbotcherbye@scientificamerican.com
17 Libbie Shoulder lshoulderf@desdev.cn
18 Abner Khomich akhomichg@youtube.com
19 Zollie Kitman zkitmanh@forbes.com
20 Fiorenze Durden fdurdeni@feedburner.com
21 Kevyn Race kracej@seattletimes.com

4
recoconfig.yml Normal file
View File

@@ -0,0 +1,4 @@
---
source: sheets/
output: reports/
templates: templates/

View File

@@ -0,0 +1,5 @@
#!/usr/bin/env python
# encoding: utf-8
from .csv_extraction import flat_df_students, flat_df_for
from .df_marks_manip import pp_q_scores

30
recopytex/config.py Normal file
View File

@@ -0,0 +1,30 @@
#!/usr/bin/env python
# encoding: utf-8
NO_ST_COLUMNS = {
"assessment": "Nom",
"term": "Trimestre",
"date": "Date",
"exercise": "Exercice",
"question": "Question",
"competence": "Competence",
"theme": "Domaine",
"comment": "Commentaire",
"is_leveled": "Est_nivele",
"score_rate": "Bareme",
}
COLUMNS = {
**NO_ST_COLUMNS,
"student": "Eleve",
"score": "Score",
"mark": "Note",
"level": "Niveau",
"normalized": "Normalise",
}
VALIDSCORE = {
"NOTFILLED": "", # The item is not scored yet
"NOANSWER": ".", # Student gives no answer (this score will impact the fianl mark)
"ABS": "a", # Student has absent (this score won't be impact the final mark)
}

119
recopytex/csv_extraction.py Normal file
View File

@@ -0,0 +1,119 @@
#!/usr/bin/env python
# encoding: utf-8
""" Extracting data from xlsx files """
import pandas as pd
from .config import NO_ST_COLUMNS, COLUMNS, VALIDSCORE
pd.set_option("Precision", 2)
def try_replace(x, old, new):
try:
return str(x).replace(old, new)
except ValueError:
return x
def extract_students(df, no_student_columns=NO_ST_COLUMNS.values()):
""" Extract the list of students from df
:param df: the dataframe
:param no_student_columns: columns that are not students
:return: list of students
"""
students = df.columns.difference(no_student_columns)
return students
def flat_df_students(
df, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
):
""" Flat the dataframe by returning a dataframe with on student on each line
:param df: the dataframe (one row per questions)
:param no_student_columns: columns that are not students
:return: dataframe with one row per questions and students
Columns of csv files:
- NO_ST_COLUMNS meta data on questions
- one for each students
This function flat student's columns to "student" and "score"
"""
students = extract_students(df, no_student_columns)
scores = []
for st in students:
scores.append(
pd.melt(
df,
id_vars=no_student_columns,
value_vars=st,
var_name=COLUMNS["student"],
value_name=COLUMNS["score"],
).dropna(subset=[COLUMNS["score"]])
)
if postprocessing:
return postprocess(pd.concat(scores))
return pd.concat(scores)
def flat_df_for(
df, student, no_student_columns=NO_ST_COLUMNS.values(), postprocessing=True
):
""" Extract the data only for one student
:param df: the dataframe (one row per questions)
:param no_student_columns: columns that are not students
:return: dataframe with one row per questions and students
Columns of csv files:
- NO_ST_COLUMNS meta data on questions
- one for each students
"""
students = extract_students(df, no_student_columns)
if student not in students:
raise KeyError("This student is not in the table")
st_df = df[list(no_student_columns) + [student]]
st_df = st_df.rename(columns={student: COLUMNS["score"]}).dropna(
subset=[COLUMNS["score"]]
)
if postprocessing:
return postprocess(st_df)
return st_df
def postprocess(df):
""" Postprocessing score dataframe
- Replace na with an empty string
- Replace "NOANSWER" with -1
- Turn commas number to dot numbers
"""
df[COLUMNS["question"]].fillna("", inplace=True)
df[COLUMNS["exercise"]].fillna("", inplace=True)
df[COLUMNS["comment"]].fillna("", inplace=True)
df[COLUMNS["competence"]].fillna("", inplace=True)
df[COLUMNS["score"]] = pd.to_numeric(
df[COLUMNS["score"]]
.replace(VALIDSCORE["NOANSWER"], -1)
.apply(lambda x: try_replace(x, ",", "."))
)
df[COLUMNS["score_rate"]] = pd.to_numeric(
df[COLUMNS["score_rate"]].apply(lambda x: try_replace(x, ",", ".")),
errors="coerce",
)
return df
# -----------------------------
# Reglages pour 'vim'
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
# cursor: 16 del

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
import dash
import flask
from .layout.layout import layout
server = flask.Flask(__name__)
app = dash.Dash(
__name__,
server=server,
suppress_callback_exceptions=True,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
)
app.layout = layout
server = app.server

View File

@@ -1,8 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
from .app import app, server
from .routes import render_page_content
if __name__ == "__main__":
app.run_server(debug=True)

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
import dash_html_components as html
import dash_core_components as dcc
content = html.Div(id="page-content")
layout = html.Div([dcc.Location(id="url"), content])

View File

@@ -1,76 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
import dash_html_components as html
import dash_core_components as dcc
from .models import get_tribes, get_exams
from .callbacks import *
layout = html.Div(
children=[
html.Header(
children=[
html.H1("Analyse des notes"),
html.P("Dernière sauvegarde", id="lastsave"),
],
),
html.Main(
html.Section(
[
html.Div(
[
"Classe: ",
dcc.Dropdown(
id="tribe",
options=[
{"label": t["name"], "value": t["name"]}
for t in get_tribes().values()
],
value=next(iter(get_tribes().values()))["name"],
),
],
),
html.Div(
[
"Evaluation: ",
dcc.Dropdown(id="exam_select"),
],
html.P(id="test"),
],
id="select",
),
html.Section(
[
html.Div(
dash_table.DataTable(
id="final_score_table",
columns=[
{"id": "Eleve", "name": "Élève"},
{"id": "Note", "name": "Note"},
{"id": "Bareme", "name": "Barème"},
],
data=[],
style_data_conditional=[
{
"if": {"row_index": "odd"},
"backgroundColor": "rgb(248, 248, 248)",
}
],
style_data={
"width": "100px",
"maxWidth": "100px",
"minWidth": "100px",
},
),
id="final_score_table_container",
),
],
id="analysis",
),
html.Section(
id="scores_table",
),
),
dcc.Store(id="scores"),
],
)

View File

@@ -1,53 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
from ...app import app
from .models import get_tribes, get_exams
@app.callback(
[
Output("exam_select", "options"),
Output("exam_select", "value"),
],
[Input("tribe", "value")],
)
def update_csvs(value):
if not value:
raise PreventUpdate
exams = get_exams(value)
exams.reset_index(inplace=True)
print(exams.loc[0, "name"])
if not exams.empty:
return [
{"label": e["name"], "value": e.to_json()} for i, e in exams.iterrows()
], exams.loc[0].to_json()
return [], None
@app.callback(
[
dash.dependencies.Output("final_score", "data"),
],
[dash.dependencies.Input("scores_table", "data")],
)
def update_final_scores(data):
if not data:
raise PreventUpdate
scores = pd.DataFrame.from_records(data)
try:
if scores.iloc[0]["Commentaire"] == "commentaire":
scores.drop([0], inplace=True)
except KeyError:
pass
scores = flat_df_students(scores).dropna(subset=["Score"])
if scores.empty:
return [{}]
scores = pp_q_scores(scores)
assessment_scores = scores.groupby(["Eleve"]).agg({"Note": "sum", "Bareme": "sum"})
return [assessment_scores.reset_index().to_dict("records")]

View File

@@ -1,15 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
from ....database.filesystem.loader import CSVLoader
LOADER = CSVLoader("./test_config.yml")
def get_tribes():
return LOADER.get_tribes()
def get_exams(tribe):
return LOADER.get_exams([tribe])

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
import dash_html_components as html
from ....database.filesystem.loader import CSVLoader
from .models import get_tribes, get_exams, get_students
loader = CSVLoader("./test_config.yml")
def listing(elements, formating=lambda x: x):
return html.Ul(
children=[html.Li(children=formating(element)) for element in elements]
)
def format_tribe(tribe):
children = [html.H3(tribe["name"])]
exams = loader.get_exams([tribe["name"]])
if exams.empty:
children.append(html.P("Pas d'évaluation"))
else:
exams_html = listing([exam for id, exam in exams.iterrows()], format_exam)
children.append(exams_html)
return children
def format_exam(exam):
children = [html.P(exam["name"])]
return children
layout = html.Div(
children=[
html.H1("Recopytex"),
html.H2("Tribes"),
html.Div(
children=[listing(loader.get_tribes().values(), format_tribe)],
id="tribes",
),
html.H2("Config"),
html.Div(
children=[
html.P(str(loader.get_config())),
],
id="config",
),
]
)

View File

@@ -1,6 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
from dash.dependencies import Input, Output
from ...app import app

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
def get_tribes(loader):
return loader.get_tribes()
def get_exams(loader, tribe):
return loader.get_exams([tribe])
def get_students(loader, tribe):
return loader.get_students([tribe])

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
from dash.dependencies import Input, Output
from .app import app
from .pages.home import app as home
from .pages.exams_scores import app as exams_scores
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
def render_page_content(pathname):
if pathname == "/":
return home.layout
elif pathname == "/exams/scores/":
return exams_scores.layout
# elif pathname == iris_page_location:
# return iris.layout
# # If the user tries to reach a different page, return a 404 message
return html.Div(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognised..."),
]
)

View File

@@ -1,88 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
from abc import ABC, abstractmethod
import yaml
"""
Adapter to pull data from the filesystem
# Loader
# Writer
"""
class Loader(ABC):
"""Load data from source"""
CONFIG = {}
def __init__(self, configfile=""):
"""Init loader
:param configfile: yaml file with informations on data source
"""
self._config = self.CONFIG
if configfile.endswith(".yml"):
with open(configfile, "r") as config:
self._config.update(yaml.load(config, Loader=yaml.FullLoader))
def get_config(self):
""" Get config"""
return self._config
@abstractmethod
def get_tribes(self):
""" Get tribes list """
pass
@abstractmethod
def get_exams(self, tribes=[]):
"""Get exams list
:param tribes: get only exams for those tribes
"""
pass
@abstractmethod
def get_students(self, tribes=[]):
"""Get student list
:param filters: list of filters
"""
pass
@abstractmethod
def get_exam_questions(self, exams=[]):
"""Get questions for the exam
:param exams: questions for those exams only
"""
pass
@abstractmethod
def get_questions_scores(self, questions=[]):
"""Get scores of those questions
:param questions: score for those questions
"""
pass
# @abstractmethod
# def get_student_scores(self, student):
# """Get scores of the student
# :param student:
# """
# pass
class Writer(ABC):
""" Write datas to the source """
def __init__(self):
pass

View File

@@ -1,15 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
"""
Store data using filesystem for organisation, csv for scores
## Organisation
- tribe1.csv # list of students for the tribe
- tribe1/
- exam1.csv # questions and scores for exam1
- exam1.yml # Extra information about exam1
- exam2.csv # questions and scores for exam2
"""

View File

@@ -1,47 +0,0 @@
---
source: ./ # basepath where to start
competences: # Competences
Chercher:
name: Chercher
abrv: Cher
Représenter:
name: Représenter
abrv: Rep
Modéliser:
name: Modéliser
abrv: Mod
Raisonner:
name: Raisonner
abrv: Rai
Calculer:
name: Calculer
abrv: Cal
Communiquer:
name: Communiquer
abrv: Com
valid_scores: #
BAD: 0 # Everything is bad
FEW: 1 # Few good things
NEARLY: 2 # Nearly good but things are missing
GOOD: 3 # Everything is good
NOTFILLED: # The item is not scored yet
NOANSWER: . # Student gives no answer (count as 0)
ABS: "a" # Student has absent (this score won't be impact the final mark)
csv_fields: # dataframe_field: csv_field
term: Trimestre
exam: Nom
date: Date
exercise: Exercice
question: Question
competence: Competence
theme: Domaine
comment: Commentaire
score_rate: Bareme
is_leveled: Est_nivele
id_templates:
exam: "{name}_{tribe}"
question: "{exam_id}_{exercise}_{question}_{comment}"

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
import pandas as pd
from pathlib import Path
from unidecode import unidecode
__all__ = ["list_csvs", "extract_fields"]
def list_csvs(path):
"""list csv files in path
:example:
>>> list_csvs("./example/Tribe1/")
[PosixPath('example/Tribe1/210112_DS.csv'), PosixPath('example/Tribe1/210122_DS6.csv')]
>>> list_csvs("./example/Tribe1")
[PosixPath('example/Tribe1/210112_DS.csv'), PosixPath('example/Tribe1/210122_DS6.csv')]
"""
return list(Path(path).glob("*.csv"))
def extract_fields(csv_filename, fields=[], remove_duplicates=True):
"""Extract fields in csv
:param csv_filename: csv filename (with header)
:param fields: list of fields to extract (all fields if empty list - default)
:param remove_duplicates: keep uniques rows (default True)
:example:
>>> extract_fields("./example/Tribe1/210122_DS6.csv", ["Trimestre", "Nom", "Date"])
Trimestre Nom Date
0 1 DS6 22/01/2021
"""
df = pd.read_csv(csv_filename)
if fields:
df = df[fields]
if remove_duplicates:
return df.drop_duplicates()
return df
def build_id(template, element):
"""Build an id from template to the element
:example:
>>> element = {"name": "pléà", "place": "here", "foo":"bar"}
>>> build_id("{name} {place}", element)
'plea_here'
"""
return unidecode(template.format(**element)).replace(" ", "_")

View File

@@ -1,241 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
import yaml
import os
import uuid
from pathlib import Path
import pandas as pd
from .. import Loader
from .lib import list_csvs, extract_fields, build_id
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "default_config.yml")
with open(DEFAULT_CONFIG_FILE, "r") as config:
DEFAULT_CONFIG = yaml.load(config, Loader=yaml.FullLoader)
def maybe_dataframe(datas):
try:
return [e[1] for e in datas.iterrows()]
except AttributeError:
return datas
class CSVLoader(Loader):
"""Loader when scores and metadatas are stored in csv files
:config:
:example:
>>> loader = CSVLoader()
>>> loader.get_config()
{'source': './', 'competences': {'Chercher': {'name': 'Chercher', 'abrv': 'Cher'}, 'Représenter': {'name': 'Représenter', 'abrv': 'Rep'}, 'Modéliser': {'name': 'Modéliser', 'abrv': 'Mod'}, 'Raisonner': {'name': 'Raisonner', 'abrv': 'Rai'}, 'Calculer': {'name': 'Calculer', 'abrv': 'Cal'}, 'Communiquer': {'name': 'Communiquer', 'abrv': 'Com'}}, 'valid_scores': {'BAD': 0, 'FEW': 1, 'NEARLY': 2, 'GOOD': 3, 'NOTFILLED': None, 'NOANSWER': '.', 'ABS': 'a'}, 'csv_fields': {'term': 'Trimestre', 'exam': 'Nom', 'date': 'Date', 'exercise': 'Exercice', 'question': 'Question', 'competence': 'Competence', 'theme': 'Domaine', 'comment': 'Commentaire', 'score_rate': 'Bareme', 'is_leveled': 'Est_nivele'}, 'id_templates': {'exam': '{name}_{tribe}', 'question': '{exam_id}_{exercise}_{question}_{comment}'}}
>>> loader = CSVLoader("./test_config.yml")
>>> loader.get_config()
{'source': './example', 'competences': {'Chercher': {'name': 'Chercher', 'abrv': 'Cher'}, 'Représenter': {'name': 'Représenter', 'abrv': 'Rep'}, 'Modéliser': {'name': 'Modéliser', 'abrv': 'Mod'}, 'Raisonner': {'name': 'Raisonner', 'abrv': 'Rai'}, 'Calculer': {'name': 'Calculer', 'abrv': 'Cal'}, 'Communiquer': {'name': 'Communiquer', 'abrv': 'Com'}}, 'valid_scores': {'BAD': 0, 'FEW': 1, 'NEARLY': 2, 'GOOD': 3, 'NOTFILLED': None, 'NOANSWER': '.', 'ABS': 'a'}, 'csv_fields': {'term': 'Trimestre', 'exam': 'Nom', 'date': 'Date', 'exercise': 'Exercice', 'question': 'Question', 'competence': 'Competence', 'theme': 'Domaine', 'comment': 'Commentaire', 'score_rate': 'Bareme', 'is_leveled': 'Est_nivele'}, 'id_templates': {'exam': '{name}_{tribe}', 'question': '{exam_id}_{exercise}_{question}_{comment}'}, 'output': './output', 'templates': 'templates/', 'tribes': {'Tribe1': {'name': 'Tribe1', 'type': 'Type1', 'students': 'tribe1.csv'}, 'Tribe2': {'name': 'Tribe2', 'students': 'tribe2.csv'}}}
"""
CONFIG = DEFAULT_CONFIG
def get_config(self):
""" Get config """
return self._config
def rename_columns(self, dataframe):
"""Rename dataframe column to match with `csv_fields`
:param dataframe: the dataframe
:example:
>>> loader = CSVLoader()
>>>
"""
return dataframe.rename(
columns={v: k for k, v in self._config["csv_fields"].items()}
)
def reverse_csv_field(self, keys):
""" Reverse csv field from keys """
return [self._config["csv_fields"][k] for k in keys]
def get_tribes(self, only_names=False):
"""Get tribes list
:example:
>>> loader = CSVLoader("./test_config.yml")
>>> loader.get_tribes()
{'Tribe1': {'name': 'Tribe1', 'type': 'Type1', 'students': 'tribe1.csv'}, 'Tribe2': {'name': 'Tribe2', 'students': 'tribe2.csv'}}
>>> loader.get_tribes(only_names=True)
['Tribe1', 'Tribe2']
"""
if only_names:
return list(self._config["tribes"].keys())
return self._config["tribes"]
def get_exams(self, tribes=[]):
"""Get exams list
:param tribes: get only exams for those tribes
:return: dataframe of exams
:example:
>>> loader = CSVLoader("./test_config.yml")
>>> exams = loader.get_exams(["Tribe1"])
>>> exams.columns
Index(['name', 'date', 'term', 'origin', 'tribe', 'id'], dtype='object')
>>> exams
name date term origin tribe id
0 DS 12/01/2021 1 example/Tribe1/210112_DS.csv Tribe1 DS_Tribe1
0 DS6 22/01/2021 1 example/Tribe1/210122_DS6.csv Tribe1 DS6_Tribe1
"""
exams = []
for tribe in tribes:
tribe_path = Path(self._config["source"]) / tribe
csvs = list_csvs(tribe_path)
for csv in csvs:
fields = self.reverse_csv_field(["exam", "date", "term"])
exam = extract_fields(csv, fields)
exam = self.rename_columns(exam)
exam = exam.rename(columns={"exam": "name"})
exam["origin"] = str(csv)
exam["tribe"] = tribe
exam["id"] = build_id(
self._config["id_templates"]["exam"], exam.iloc[0]
)
exams.append(exam)
if exams:
return pd.concat(exams)
return pd.DataFrame(columns=["name", "date", "term", "origin", "tribe", "id"])
def get_exam_questions(self, exams=[]):
"""Get questions for exams stored in score_files
:param exams: list or dataframe of exams metadatas (need origin field to find the csv)
:example:
>>> loader = CSVLoader("./test_config.yml")
>>> exams = loader.get_exams(["Tribe1"])
>>> loader.get_exam_questions([exams.iloc[0]]).columns
Index(['exercise', 'question', 'competence', 'theme', 'comment', 'score_rate',
'is_leveled', 'origin', 'exam_id', 'id'],
dtype='object')
>>> questions = loader.get_exam_questions(exams)
>>> questions.iloc[0]
exercise Exercice 1
question 1
competence Calculer
theme Plop
comment Coucou
score_rate 1.0
is_leveled 1.0
origin example/Tribe1/210112_DS.csv
exam_id DS_Tribe1
id DS_Tribe1_Exercice_1_1_Coucou
Name: 0, dtype: object
"""
_exams = maybe_dataframe(exams)
questions = []
for exam in _exams:
fields = self.reverse_csv_field(
[
"exercise",
"question",
"competence",
"theme",
"comment",
"score_rate",
"is_leveled",
]
)
question = extract_fields(exam["origin"], fields)
question = self.rename_columns(question)
question["origin"] = exam["origin"]
question["exam_id"] = exam["id"]
question["id"] = build_id(
self._config["id_templates"]["question"], question.iloc[0]
)
questions.append(question)
return pd.concat(questions)
def get_questions_scores(self, questions=[]):
"""Get scores of those questions
:param questions: list or dataframe of questions metadatas (need origin field to find the csv)
:example:
>>> loader = CSVLoader("./test_config.yml")
>>> exams = loader.get_exams(["Tribe1"])
>>> questions = loader.get_exam_questions(exams)
>>> scores = loader.get_questions_scores(questions)
>>> scores.columns
Index(['term', 'exam', 'date', 'exercise', 'question', 'competence', 'theme',
'comment', 'score_rate', 'is_leveled', 'origin', 'exam_id',
'question_id', 'student_name', 'score'],
dtype='object')
>>> scores["student_name"].unique()
array(['Star Tice', 'Umberto Dingate', 'Starlin Crangle',
'Humbert Bourcq', 'Gabriella Handyside', 'Stewart Eaves',
'Erick Going', 'Ase Praton', 'Rollins Planks', 'Dunstan Sarjant',
'Stacy Guiton', 'Ange Stanes', 'Amabelle Elleton',
'Darn Broomhall', 'Dyan Chatto', 'Keane Rennebach', 'Nari Paulton',
'Brandy Wase', 'Jaclyn Firidolfi', 'Violette Lockney'],
dtype=object)
"""
scores = []
group_questions = questions.groupby("origin")
for origin, questions_df in group_questions:
scores_df = pd.read_csv(origin)
scores_df = self.rename_columns(scores_df)
student_names = [
c
for c in scores_df.columns
if c not in self._config["csv_fields"].keys()
]
common_columns = [c for c in questions_df.columns if c in scores_df.columns]
scores_df = pd.merge(scores_df, questions_df, on=common_columns)
kept_columns = [c for c in scores_df if c not in student_names]
scores_df = pd.melt(
scores_df,
id_vars=kept_columns,
value_vars=student_names,
var_name="student_name",
value_name="score",
)
scores_df = scores_df.rename(columns={"id": "question_id"})
scores.append(scores_df)
return pd.concat(scores)
def get_students(self, tribes=[]):
"""Get student list
:param tribes: concerned tribes
:example:
>>> loader = CSVLoader("./test_config.yml")
>>> tribes = loader.get_tribes()
>>> students = loader.get_students([tribes["Tribe1"]])
>>> students.columns
Index(['Nom', 'email', 'origin', 'tribe'], dtype='object')
"""
students = []
for tribe in tribes:
students_csv = Path(self._config["source"]) / tribe["students"]
students_df = pd.read_csv(students_csv)
students_df["origin"] = students_csv
students_df["tribe"] = tribe["name"]
students.append(students_df)
return pd.concat(students)
def get_student_scores(self, student=[]):
"""Get all scores for students"""
pass

View File

@@ -1,7 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
"""
"""

206
recopytex/df_marks_manip.py Normal file
View File

@@ -0,0 +1,206 @@
#!/usr/bin/env python
# encoding: utf-8
import pandas as pd
import numpy as np
from math import ceil, floor
from .config import COLUMNS, VALIDSCORE
# Values manipulations
def round_half_point(val):
try:
return 0.5 * ceil(2.0 * val)
except ValueError:
return val
except TypeError:
return val
def score_to_mark(x):
""" Compute the mark
if the item is leveled then the score is multiply by the score_rate
otherwise it copies the score
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
... }
>>> df = pd.DataFrame(d)
>>> score_to_mark(df.loc[0])
1.0
>>> score_to_mark(df.loc[10])
1.3333333333333333
"""
# -1 is no answer
if x[COLUMNS["score"]] == -1:
return 0
if x[COLUMNS["is_leveled"]]:
if x[COLUMNS["score"]] not in [0, 1, 2, 3]:
raise ValueError(f"The evaluation is out of range: {x[COLUMNS['score']]} at {x}")
#return round_half_point(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3)
return round(x[COLUMNS["score"]] * x[COLUMNS["score_rate"]] / 3, 2)
if x[COLUMNS["score"]] > x[COLUMNS["score_rate"]]:
raise ValueError(
f"The score ({x['score']}) is greated than the rating scale ({x[COLUMNS['score_rate']]}) at {x}"
)
return x[COLUMNS["score"]]
def score_to_level(x):
""" Compute the level (".",0,1,2,3).
:param x: dictionnary with COLUMNS["is_leveled"], COLUMNS["score"] and COLUMNS["score_rate"] keys
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
... COLUMNS["score"]:[1, 0.33, np.nan, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
... }
>>> df = pd.DataFrame(d)
>>> score_to_level(df.loc[0])
3
>>> score_to_level(df.loc[1])
1
>>> score_to_level(df.loc[2])
'na'
>>> score_to_level(df.loc[3])
3
>>> score_to_level(df.loc[5])
3
>>> score_to_level(df.loc[10])
2
"""
# negatives are no answer or negatives points
if x[COLUMNS["score"]] <= -1:
return np.nan
if x[COLUMNS["is_leveled"]]:
return int(x[COLUMNS["score"]])
return int(ceil(x[COLUMNS["score"]] / x[COLUMNS["score_rate"]] * 3))
# DataFrame columns manipulations
def compute_mark(df):
""" Add Mark column to df
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
... }
>>> df = pd.DataFrame(d)
>>> compute_mark(df)
0 1.00
1 0.33
2 2.00
3 1.50
4 0.67
5 2.00
6 0.67
7 1.00
8 1.50
9 1.00
10 1.33
11 2.00
dtype: float64
"""
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
score_to_mark, axis=1
)
def compute_level(df):
""" Add Mark column to df
:param df: DataFrame with COLUMNS["score"], COLUMNS["is_leveled"] and COLUMNS["score_rate"] columns.
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
... COLUMNS["score"]:[np.nan, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
... }
>>> df = pd.DataFrame(d)
>>> compute_level(df)
0 na
1 1
2 3
3 3
4 1
5 3
6 2
7 3
8 3
9 2
10 2
11 3
dtype: object
"""
return df[[COLUMNS["score"], COLUMNS["is_leveled"], COLUMNS["score_rate"]]].apply(
score_to_level, axis=1
)
def compute_normalized(df):
""" Compute the normalized mark (Mark / score_rate)
:param df: DataFrame with "Mark" and COLUMNS["score_rate"] columns
>>> d = {"Eleve":["E1"]*6 + ["E2"]*6,
... COLUMNS["score_rate"]:[1]*2+[2]*2+[2]*2 + [1]*2+[2]*2+[2]*2,
... COLUMNS["is_leveled"]:[0]*4+[1]*2 + [0]*4+[1]*2,
... COLUMNS["score"]:[1, 0.33, 2, 1.5, 1, 3, 0.666, 1, 1.5, 1, 2, 3],
... }
>>> df = pd.DataFrame(d)
>>> df["Mark"] = compute_marks(df)
>>> compute_normalized(df)
0 1.00
1 0.33
2 1.00
3 0.75
4 0.33
5 1.00
6 0.67
7 1.00
8 0.75
9 0.50
10 0.67
11 1.00
dtype: float64
"""
return df[COLUMNS["mark"]] / df[COLUMNS["score_rate"]]
# Postprocessing question scores
def pp_q_scores(df):
""" Postprocessing questions scores dataframe
:param df: questions-scores dataframe
:return: same data frame with mark, level and normalize columns
"""
assign = {
COLUMNS["mark"]: compute_mark,
COLUMNS["level"]: compute_level,
COLUMNS["normalized"]: compute_normalized,
}
return df.assign(**assign)
# -----------------------------
# Reglages pour 'vim'
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
# cursor: 16 del

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
# encoding: utf-8
import yaml
CONFIGPATH = "recoconfig.yml"
with open(CONFIGPATH, "r") as configfile:
config = yaml.load(configfile, Loader=yaml.FullLoader)

View File

@@ -0,0 +1,160 @@
#!/usr/bin/env python
# encoding: utf-8
import click
from pathlib import Path
from datetime import datetime
from PyInquirer import prompt, print_json
import pandas as pd
import numpy as np
from .config import config
from ..config import NO_ST_COLUMNS
class PromptAbortException(EOFError):
def __init__(self, message, errors=None):
# Call the base class constructor with the parameters it needs
super(PromptAbortException, self).__init__("Abort questionnary", errors)
def get_tribes(answers):
""" List tribes based on subdirectory of config["source"] which have an "eleves.csv" file inside """
return [
p.name for p in Path(config["source"]).iterdir() if (p / "eleves.csv").exists()
]
def prepare_csv():
items = new_eval()
item = items[0]
# item = {"tribe": "308", "date": datetime.today(), "assessment": "plop"}
csv_output = (
Path(config["source"])
/ item["tribe"]
/ f"{item['date']:%y%m%d}_{item['assessment']}.csv"
)
students = pd.read_csv(Path(config["source"]) / item["tribe"] / "eleves.csv")["Nom"]
columns = list(NO_ST_COLUMNS.keys())
items = [[it[c] for c in columns] for it in items]
columns = list(NO_ST_COLUMNS.values())
items_df = pd.DataFrame.from_records(items, columns=columns)
for s in students:
items_df[s] = np.nan
items_df.to_csv(csv_output, index=False, date_format="%d/%m/%Y")
click.echo(f"Saving csv file to {csv_output}")
def new_eval(answers={}):
click.echo(f"Préparation d'un nouveau devoir")
eval_questions = [
{"type": "input", "name": "assessment", "message": "Nom de l'évaluation",},
{
"type": "list",
"name": "tribe",
"message": "Classe concernée",
"choices": get_tribes,
},
{
"type": "input",
"name": "date",
"message": "Date du devoir (%y%m%d)",
"default": datetime.today().strftime("%y%m%d"),
"filter": lambda val: datetime.strptime(val, "%y%m%d"),
},
{
"type": "list",
"name": "term",
"message": "Trimestre",
"choices": ["1", "2", "3"],
},
]
eval_ans = prompt(eval_questions)
items = []
add_exo = True
while add_exo:
ex_items = new_exercice(eval_ans)
items += ex_items
add_exo = prompt(
[
{
"type": "confirm",
"name": "add_exo",
"message": "Ajouter un autre exercice",
"default": True,
}
]
)["add_exo"]
return items
def new_exercice(answers={}):
exercise_questions = [
{"type": "input", "name": "exercise", "message": "Nom de l'exercice"},
]
click.echo(f"Nouvel exercice")
exercise_ans = prompt(exercise_questions, answers=answers)
items = []
add_item = True
while add_item:
try:
item_ans = new_item(exercise_ans)
except PromptAbortException:
click.echo("Création de l'item annulée")
else:
items.append(item_ans)
add_item = prompt(
[
{
"type": "confirm",
"name": "add_item",
"message": f"Ajouter un autre item pour l'exercice {exercise_ans['exercise']}",
"default": True,
}
]
)["add_item"]
return items
def new_item(answers={}):
item_questions = [
{"type": "input", "name": "question", "message": "Nom de l'item",},
{"type": "input", "name": "comment", "message": "Commentaire",},
{
"type": "list",
"name": "competence",
"message": "Competence",
"choices": ["Cher", "Rep", "Mod", "Rai", "Cal", "Com"],
},
{"type": "input", "name": "theme", "message": "Domaine",},
{
"type": "confirm",
"name": "is_leveled",
"message": "Évaluation par niveau",
"default": True,
},
{"type": "input", "name": "score_rate", "message": "Bareme"},
{
"type": "confirm",
"name": "correct",
"message": "Tout est correct?",
"default": True,
},
]
click.echo(f"Nouvelle question pour l'exercice {answers['exercise']}")
item_ans = prompt(item_questions, answers=answers)
if item_ans["correct"]:
return item_ans
raise PromptAbortException("Abort item creation")

View File

@@ -2,7 +2,14 @@
# encoding: utf-8
import click
from ..dashboard.app import app as dash
from pathlib import Path
import yaml
import sys
import papermill as pm
from datetime import datetime
from .prepare_csv import prepare_csv
from .config import config
@click.group()
@@ -11,6 +18,85 @@ def cli():
@cli.command()
@click.option("--debug", default=0, help="Debug mode for dash")
def dashboard(debug):
dash.run_server(debug=bool(debug))
def print_config():
click.echo(f"Config file is {CONFIGPATH}")
click.echo("It contains")
click.echo(config)
def reporting(csv_file):
# csv_file = Path(csv_file)
tribe_dir = csv_file.parent
csv_filename = csv_file.name.split(".")[0]
assessment = str(csv_filename).split("_")[-1].capitalize()
date = str(csv_filename).split("_")[0]
try:
date = datetime.strptime(date, "%y%m%d")
except ValueError:
date = datetime.today().strptime(date, "%y%m%d")
tribe = str(tribe_dir).split("/")[-1]
template = Path(config["templates"]) / "tpl_evaluation.ipynb"
dest = Path(config["output"]) / tribe / csv_filename
dest.mkdir(parents=True, exist_ok=True)
click.echo(f"Building {assessment} ({date:%d/%m/%y}) report")
pm.execute_notebook(
str(template),
str(dest / f"{assessment}.ipynb"),
parameters=dict(
tribe=tribe,
assessment=assessment,
date=f"{date:%d/%m/%y}",
csv_file=str(csv_file.absolute()),
),
)
@cli.command()
@click.argument("target", required=False)
def report(target=""):
""" Make a report for the eval
:param target: csv file or a directory where csvs are
"""
try:
if target.endswith(".csv"):
csv = Path(target)
if not csv.exists():
click.echo(f"{target} does not exists")
sys.exit(1)
if csv.suffix != ".csv":
click.echo(f"{target} has to be a csv file")
sys.exit(1)
csvs = [csv]
else:
csvs = list(Path(target).glob("**/*.csv"))
except AttributeError:
csvs = list(Path(config["source"]).glob("**/*.csv"))
for csv in csvs:
click.echo(f"Processing {csv}")
try:
reporting(csv)
except pm.exceptions.PapermillExecutionError as e:
click.echo(f"Error with {csv}: {e}")
@cli.command()
def prepare():
""" Prepare csv file """
items = prepare_csv()
click.echo(items)
@cli.command()
@click.argument("tribe")
def random_pick(tribe):
""" Randomly pick a student """
pass

View File

@@ -1,4 +1,76 @@
pandas
click
papermill
prompt_toolkit
ansiwrap==0.8.4
appdirs==1.4.3
attrs==19.1.0
backcall==0.1.0
black==19.10b0
bleach==3.1.0
certifi==2019.6.16
chardet==3.0.4
Click==7.0
colorama==0.4.1
cycler==0.10.0
decorator==4.4.0
defusedxml==0.6.0
entrypoints==0.3
future==0.17.1
idna==2.8
importlib-resources==1.0.2
ipykernel==5.1.3
ipython==7.11.1
ipython-genutils==0.2.0
ipywidgets==7.5.1
jedi==0.15.2
Jinja2==2.10.3
jsonschema==3.2.0
jupyter==1.0.0
jupyter-client==5.3.4
jupyter-console==6.1.0
jupyter-core==4.6.1
jupytex==0.0.3
kiwisolver==1.1.0
Markdown==3.1.1
MarkupSafe==1.1.1
matplotlib==3.1.2
mistune==0.8.4
nbconvert==5.6.1
nbformat==5.0.3
notebook==6.0.3
numpy==1.18.1
pandas==0.25.3
pandocfilters==1.4.2
papermill==1.2.1
parso==0.5.2
pathspec==0.7.0
pexpect==4.8.0
pickleshare==0.7.5
prometheus-client==0.7.1
prompt-toolkit==1.0.14
ptyprocess==0.6.0
Pygments==2.5.2
PyInquirer==1.0.3
pyparsing==2.4.6
pyrsistent==0.15.7
python-dateutil==2.8.0
pytz==2019.3
PyYAML==5.3
pyzmq==18.1.1
qtconsole==4.6.0
-e git+git_opytex:/lafrite/recopytex.git@7e026bedb24c1ca8bef3b71b3d63f8b0d6916e81#egg=Recopytex
regex==2020.1.8
requests==2.22.0
scipy==1.4.1
Send2Trash==1.5.0
six==1.12.0
tenacity==6.0.0
terminado==0.8.3
testpath==0.4.4
textwrap3==0.9.2
toml==0.10.0
tornado==6.0.3
tqdm==4.41.1
traitlets==4.3.2
typed-ast==1.4.1
urllib3==1.25.8
wcwidth==0.1.8
webencodings==0.5.1
widgetsnbextension==3.5.1

View File

@@ -1,69 +0,0 @@
ansiwrap
attrs
backcall
bleach
certifi
chardet
Click
colorama
cycler
decorator
defusedxml
entrypoints
future
idna
importlib-resources
ipykernel
ipython
ipython-genutils
ipywidgets
jedi
Jinja2
jsonschema
jupyter
jupyter-client
jupyter-console
jupyter-core
jupytex
kiwisolver
MarkupSafe
matplotlib
mistune
nbconvert
nbformat
notebook
numpy
pandas
pandocfilters
papermill
parso
pexpect
pickleshare
prometheus-client
prompt-toolkit
ptyprocess
Pygments
pyparsing
pyrsistent
python-dateutil
pytz
PyYAML
pyzmq
qtconsole
-e git+git_opytex:/lafrite/recopytex.git@e9a8310f151ead60434ae944d726a2fd22b23d06#egg=Recopytex
requests
scipy
seaborn
Send2Trash
six
tenacity
terminado
testpath
textwrap3
tornado
tqdm
traitlets
urllib3
wcwidth
webencodings
widgetsnbextension

View File

@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
setup(
name='Recopytex',
version='0.1',
version='1.1.1',
description='Assessment analysis',
author='Benjamin Bertrand',
author_email='',
@@ -13,6 +13,11 @@ setup(
include_package_data=True,
install_requires=[
'Click',
'pandas',
'numpy',
'papermill',
'pyyaml',
'PyInquirer',
],
entry_points='''
[console_scripts]

View File

@@ -1,13 +0,0 @@
---
source: ./example
output: ./output
templates: templates/
tribes:
Tribe1:
name: Tribe1
type: Type1
students: tribe1.csv
Tribe2:
name: Tribe2
students: tribe2.csv