Files
notytex/models.py

926 lines
39 KiB
Python

from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from sqlalchemy import CheckConstraint, Enum
from typing import Optional, Dict, Any
db = SQLAlchemy()
class GradingCalculator:
"""
Calculateur unifié pour tous types de notation.
Utilise le feature flag USE_STRATEGY_PATTERN pour basculer entre
l'ancienne logique conditionnelle et le nouveau Pattern Strategy.
"""
@staticmethod
def calculate_score(grade_value: str, grading_type: str, max_points: float) -> Optional[float]:
"""
UN seul point d'entrée pour tous les calculs de score.
Args:
grade_value: Valeur de la note (ex: '15.5', '2', '.', 'd')
grading_type: Type de notation ('notes' ou 'score')
max_points: Points maximum de l'élément de notation
Returns:
Score calculé ou None pour les valeurs dispensées
"""
# Feature flag pour basculer vers le Pattern Strategy
from config.feature_flags import is_feature_enabled, FeatureFlag
if is_feature_enabled(FeatureFlag.USE_STRATEGY_PATTERN):
# === NOUVELLE IMPLÉMENTATION : Pattern Strategy ===
return GradingCalculator._calculate_score_with_strategy(grade_value, grading_type, max_points)
else:
# === ANCIENNE IMPLÉMENTATION : Logique conditionnelle ===
return GradingCalculator._calculate_score_legacy(grade_value, grading_type, max_points)
@staticmethod
def _calculate_score_with_strategy(grade_value: str, grading_type: str, max_points: float) -> Optional[float]:
"""
Nouvelle implémentation utilisant le Pattern Strategy et l'injection de dépendances.
"""
from services.assessment_services import UnifiedGradingCalculator
from providers.concrete_providers import ConfigManagerProvider
# Injection de dépendances pour éviter les imports circulaires
config_provider = ConfigManagerProvider()
unified_calculator = UnifiedGradingCalculator(config_provider)
return unified_calculator.calculate_score(grade_value, grading_type, max_points)
@staticmethod
def _calculate_score_legacy(grade_value: str, grading_type: str, max_points: float) -> Optional[float]:
"""
Ancienne implémentation avec logique conditionnelle (pour compatibilité).
"""
# Éviter les imports circulaires en important à l'utilisation
from app_config import config_manager
# Valeurs spéciales en premier
if config_manager.is_special_value(grade_value):
special_config = config_manager.get_special_values()[grade_value]
special_value = special_config['value']
if special_value is None: # Dispensé
return None
return float(special_value) # 0 pour '.', 'a'
# Calcul selon type (logique conditionnelle legacy)
try:
if grading_type == 'notes':
return float(grade_value)
elif grading_type == 'score':
# Score 0-3 converti en proportion du max_points
score_int = int(grade_value)
if 0 <= score_int <= 3:
return (score_int / 3) * max_points
return 0.0
except (ValueError, TypeError):
return 0.0
return 0.0
@staticmethod
def is_counted_in_total(grade_value: str, grading_type: str) -> bool:
"""
Détermine si une note doit être comptée dans le total.
Utilise le feature flag USE_STRATEGY_PATTERN pour basculer vers les nouveaux services.
Returns:
True si la note compte dans le total, False sinon (ex: dispensé)
"""
# Feature flag pour basculer vers le Pattern Strategy
from config.feature_flags import is_feature_enabled, FeatureFlag
if is_feature_enabled(FeatureFlag.USE_STRATEGY_PATTERN):
# === NOUVELLE IMPLÉMENTATION : Pattern Strategy ===
return GradingCalculator._is_counted_in_total_with_strategy(grade_value)
else:
# === ANCIENNE IMPLÉMENTATION : Logique directe ===
return GradingCalculator._is_counted_in_total_legacy(grade_value)
@staticmethod
def _is_counted_in_total_with_strategy(grade_value: str) -> bool:
"""
Nouvelle implémentation utilisant l'injection de dépendances.
"""
from services.assessment_services import UnifiedGradingCalculator
from providers.concrete_providers import ConfigManagerProvider
# Injection de dépendances pour éviter les imports circulaires
config_provider = ConfigManagerProvider()
unified_calculator = UnifiedGradingCalculator(config_provider)
return unified_calculator.is_counted_in_total(grade_value)
@staticmethod
def _is_counted_in_total_legacy(grade_value: str) -> bool:
"""
Ancienne implémentation avec accès direct au config_manager.
"""
from app_config import config_manager
# Valeurs spéciales
if config_manager.is_special_value(grade_value):
special_config = config_manager.get_special_values()[grade_value]
return special_config['counts']
# Toutes les autres valeurs comptent
return True
class ClassGroup(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False, unique=True)
description = db.Column(db.Text)
year = db.Column(db.String(20), nullable=False)
students = db.relationship('Student', backref='class_group', lazy=True)
assessments = db.relationship('Assessment', backref='class_group', lazy=True)
def get_trimester_statistics(self, trimester=None):
"""
Retourne les statistiques globales pour un trimestre ou toutes les évaluations.
Args:
trimester: Trimestre à filtrer (1, 2, 3) ou None pour toutes les évaluations
Returns:
Dict avec nombre total, répartition par statut (terminées/en cours/non commencées)
"""
try:
# Utiliser les évaluations filtrées si disponibles depuis le repository
if hasattr(self, '_filtered_assessments'):
assessments = self._filtered_assessments
else:
# Construire la requête de base avec jointures optimisées
query = Assessment.query.filter(Assessment.class_group_id == self.id)
# Filtrage par trimestre si spécifié
if trimester is not None:
query = query.filter(Assessment.trimester == trimester)
# Récupérer toutes les évaluations avec leurs exercices et éléments
assessments = query.options(
db.joinedload(Assessment.exercises).joinedload(Exercise.grading_elements)
).all()
# Compter le nombre d'élèves dans la classe
students_count = len(self.students)
# Initialiser les compteurs
total_assessments = len(assessments)
completed_count = 0
in_progress_count = 0
not_started_count = 0
# Analyser le statut de chaque évaluation
for assessment in assessments:
# Utiliser la propriété grading_progress existante
progress = assessment.grading_progress
status = progress['status']
if status == 'completed':
completed_count += 1
elif status in ['in_progress']:
in_progress_count += 1
else: # not_started, no_students, no_elements
not_started_count += 1
return {
'total': total_assessments,
'completed': completed_count,
'in_progress': in_progress_count,
'not_started': not_started_count,
'students_count': students_count,
'trimester': trimester
}
except Exception as e:
from flask import current_app
current_app.logger.error(f"Erreur dans get_trimester_statistics: {e}", exc_info=True)
return {
'total': 0,
'completed': 0,
'in_progress': 0,
'not_started': 0,
'students_count': 0,
'trimester': trimester
}
def get_domain_analysis(self, trimester=None):
"""
Analyse les domaines couverts dans les évaluations d'un trimestre.
Args:
trimester: Trimestre à filtrer (1, 2, 3) ou None pour toutes les évaluations
Returns:
Dict avec liste des domaines, points totaux et nombre d'éléments par domaine
"""
try:
# Utiliser les évaluations filtrées si disponibles
if hasattr(self, '_filtered_assessments'):
assessment_ids = [a.id for a in self._filtered_assessments]
if not assessment_ids:
return {'domains': [], 'trimester': trimester}
query = db.session.query(
GradingElement.domain_id,
Domain.name.label('domain_name'),
Domain.color.label('domain_color'),
db.func.sum(GradingElement.max_points).label('total_points'),
db.func.count(GradingElement.id).label('elements_count')
).select_from(GradingElement)\
.join(Exercise, GradingElement.exercise_id == Exercise.id)\
.outerjoin(Domain, GradingElement.domain_id == Domain.id)\
.filter(Exercise.assessment_id.in_(assessment_ids))
else:
# Requête originale avec toutes les jointures nécessaires
query = db.session.query(
GradingElement.domain_id,
Domain.name.label('domain_name'),
Domain.color.label('domain_color'),
db.func.sum(GradingElement.max_points).label('total_points'),
db.func.count(GradingElement.id).label('elements_count')
).select_from(GradingElement)\
.join(Exercise, GradingElement.exercise_id == Exercise.id)\
.join(Assessment, Exercise.assessment_id == Assessment.id)\
.outerjoin(Domain, GradingElement.domain_id == Domain.id)\
.filter(Assessment.class_group_id == self.id)
# Filtrage par trimestre si spécifié
if trimester is not None:
query = query.filter(Assessment.trimester == trimester)
# Grouper par domaine (y compris les éléments sans domaine)
query = query.group_by(
GradingElement.domain_id,
Domain.name,
Domain.color
)
results = query.all()
domains = []
for result in results:
if result.domain_id is not None:
# Domaine défini
domains.append({
'id': result.domain_id,
'name': result.domain_name,
'color': result.domain_color,
'total_points': float(result.total_points) if result.total_points else 0.0,
'elements_count': result.elements_count
})
else:
# Éléments sans domaine assigné
domains.append({
'id': None,
'name': 'Sans domaine',
'color': '#6B7280', # Gris neutre
'total_points': float(result.total_points) if result.total_points else 0.0,
'elements_count': result.elements_count
})
# Trier par ordre alphabétique, avec "Sans domaine" en dernier
domains.sort(key=lambda x: (x['name'] == 'Sans domaine', x['name'].lower()))
return {
'domains': domains,
'trimester': trimester
}
except Exception as e:
from flask import current_app
current_app.logger.error(f"Erreur dans get_domain_analysis: {e}", exc_info=True)
return {
'domains': [],
'trimester': trimester
}
def get_competence_analysis(self, trimester=None):
"""
Analyse les compétences évaluées dans un trimestre.
Args:
trimester: Trimestre à filtrer (1, 2, 3) ou None pour toutes les évaluations
Returns:
Dict avec liste des compétences, points totaux et nombre d'éléments par compétence
"""
try:
# Utiliser les évaluations filtrées si disponibles
if hasattr(self, '_filtered_assessments'):
assessment_ids = [a.id for a in self._filtered_assessments]
if not assessment_ids:
return {'competences': [], 'trimester': trimester}
query = db.session.query(
GradingElement.skill.label('skill_name'),
db.func.sum(GradingElement.max_points).label('total_points'),
db.func.count(GradingElement.id).label('elements_count')
).select_from(GradingElement)\
.join(Exercise, GradingElement.exercise_id == Exercise.id)\
.filter(Exercise.assessment_id.in_(assessment_ids))\
.filter(GradingElement.skill.isnot(None))\
.filter(GradingElement.skill != '')
else:
# Requête optimisée pour analyser les compétences
query = db.session.query(
GradingElement.skill.label('skill_name'),
db.func.sum(GradingElement.max_points).label('total_points'),
db.func.count(GradingElement.id).label('elements_count')
).select_from(GradingElement)\
.join(Exercise, GradingElement.exercise_id == Exercise.id)\
.join(Assessment, Exercise.assessment_id == Assessment.id)\
.filter(Assessment.class_group_id == self.id)\
.filter(GradingElement.skill.isnot(None))\
.filter(GradingElement.skill != '')
# Filtrage par trimestre si spécifié
if trimester is not None:
query = query.filter(Assessment.trimester == trimester)
# Grouper par compétence
query = query.group_by(GradingElement.skill)
results = query.all()
# Récupérer la configuration des compétences pour les couleurs
from app_config import config_manager
competences_config = {comp['name']: comp for comp in config_manager.get_competences_list()}
competences = []
for result in results:
skill_name = result.skill_name
# Récupérer la couleur depuis la configuration ou utiliser une couleur par défaut
config = competences_config.get(skill_name, {})
color = config.get('color', '#6B7280') # Gris neutre par défaut
competences.append({
'name': skill_name,
'color': color,
'total_points': float(result.total_points) if result.total_points else 0.0,
'elements_count': result.elements_count
})
# Trier par ordre alphabétique
competences.sort(key=lambda x: x['name'].lower())
return {
'competences': competences,
'trimester': trimester
}
except Exception as e:
from flask import current_app
current_app.logger.error(f"Erreur dans get_competence_analysis: {e}", exc_info=True)
return {
'competences': [],
'trimester': trimester
}
def get_class_results(self, trimester=None):
"""
Statistiques de résultats pour la classe sur un trimestre.
Args:
trimester: Trimestre à filtrer (1, 2, 3) ou None pour toutes les évaluations
Returns:
Dict avec moyennes, distribution des notes et métriques statistiques
"""
try:
# Utiliser les évaluations filtrées si disponibles
if hasattr(self, '_filtered_assessments'):
assessments = self._filtered_assessments
else:
# Construire la requête des évaluations avec filtres
assessments_query = Assessment.query.filter(Assessment.class_group_id == self.id)
if trimester is not None:
assessments_query = assessments_query.filter(Assessment.trimester == trimester)
assessments = assessments_query.all()
if not assessments:
return {
'trimester': trimester,
'assessments_count': 0,
'students_count': len(self.students),
'class_averages': [],
'overall_statistics': {
'count': 0,
'mean': 0,
'median': 0,
'min': 0,
'max': 0,
'std_dev': 0
},
'distribution': []
}
# Calculer les moyennes par évaluation
class_averages = []
all_individual_scores = [] # Toutes les notes individuelles pour statistiques globales
for assessment in assessments:
# Utiliser la méthode existante calculate_student_scores
students_scores, _ = assessment.calculate_student_scores()
# Extraire les scores individuels
individual_scores = []
for student_data in students_scores.values():
score = student_data['total_score']
max_points = student_data['total_max_points']
if max_points > 0: # Éviter la division par zéro
# Normaliser sur 20 pour comparaison
normalized_score = (score / max_points) * 20
individual_scores.append(normalized_score)
all_individual_scores.append(normalized_score)
# Calculer la moyenne de classe pour cette évaluation
if individual_scores:
import statistics
class_average = statistics.mean(individual_scores)
class_averages.append({
'assessment_id': assessment.id,
'assessment_title': assessment.title,
'date': assessment.date.isoformat() if assessment.date else None,
'class_average': round(class_average, 2),
'students_evaluated': len(individual_scores),
'max_possible': 20 # Normalisé sur 20
})
# Statistiques globales sur toutes les notes du trimestre
overall_stats = {
'count': 0,
'mean': 0,
'median': 0,
'min': 0,
'max': 0,
'std_dev': 0
}
distribution = []
if all_individual_scores:
import statistics
import math
overall_stats = {
'count': len(all_individual_scores),
'mean': round(statistics.mean(all_individual_scores), 2),
'median': round(statistics.median(all_individual_scores), 2),
'min': round(min(all_individual_scores), 2),
'max': round(max(all_individual_scores), 2),
'std_dev': round(statistics.stdev(all_individual_scores) if len(all_individual_scores) > 1 else 0, 2)
}
# Créer l'histogramme de distribution (bins de 1 point sur 20)
bins = list(range(0, 22)) # 0-1, 1-2, ..., 19-20, 20+
bin_counts = [0] * (len(bins) - 1)
for score in all_individual_scores:
# Trouver le bon bin
bin_index = min(int(score), len(bin_counts) - 1)
bin_counts[bin_index] += 1
# Formatage pour Chart.js
for i in range(len(bin_counts)):
if i == len(bin_counts) - 1:
label = f"{bins[i]}+"
else:
label = f"{bins[i]}-{bins[i+1]}"
distribution.append({
'range': label,
'count': bin_counts[i]
})
return {
'trimester': trimester,
'assessments_count': len(assessments),
'students_count': len(self.students),
'class_averages': class_averages,
'overall_statistics': overall_stats,
'distribution': distribution
}
except Exception as e:
from flask import current_app
current_app.logger.error(f"Erreur dans get_class_results: {e}", exc_info=True)
return {
'trimester': trimester,
'assessments_count': 0,
'students_count': len(self.students) if hasattr(self, 'students') else 0,
'class_averages': [],
'overall_statistics': {
'count': 0,
'mean': 0,
'median': 0,
'min': 0,
'max': 0,
'std_dev': 0
},
'distribution': []
}
def __repr__(self):
return f'<ClassGroup {self.name}>'
class Student(db.Model):
id = db.Column(db.Integer, primary_key=True)
last_name = db.Column(db.String(100), nullable=False)
first_name = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(120), unique=True)
class_group_id = db.Column(db.Integer, db.ForeignKey('class_group.id'), nullable=False)
grades = db.relationship('Grade', backref='student', lazy=True)
def __repr__(self):
return f'<Student {self.first_name} {self.last_name}>'
@property
def full_name(self):
return f"{self.first_name} {self.last_name}"
class Assessment(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200), nullable=False)
description = db.Column(db.Text)
date = db.Column(db.Date, nullable=False, default=datetime.utcnow)
trimester = db.Column(db.Integer, nullable=False) # 1, 2, ou 3
class_group_id = db.Column(db.Integer, db.ForeignKey('class_group.id'), nullable=False)
coefficient = db.Column(db.Float, default=1.0) # Garder Float pour compatibilité
exercises = db.relationship('Exercise', backref='assessment', lazy=True, cascade='all, delete-orphan')
__table_args__ = (
CheckConstraint('trimester IN (1, 2, 3)', name='check_trimester_valid'),
)
def __repr__(self):
return f'<Assessment {self.title}>'
@property
def grading_progress(self):
"""
Calcule le pourcentage de progression des notes saisies pour cette évaluation.
Utilise le feature flag USE_REFACTORED_ASSESSMENT pour basculer entre
l'ancienne logique et le nouveau AssessmentProgressService optimisé.
Returns:
Dict avec les statistiques de progression
"""
# Feature flag pour migration progressive vers AssessmentProgressService
from config.feature_flags import is_feature_enabled, FeatureFlag
if is_feature_enabled(FeatureFlag.USE_REFACTORED_ASSESSMENT):
# === NOUVELLE IMPLÉMENTATION : AssessmentProgressService ===
return self._grading_progress_with_service()
else:
# === ANCIENNE IMPLÉMENTATION : Logique dans le modèle ===
return self._grading_progress_legacy()
def _grading_progress_with_service(self):
"""
Nouvelle implémentation utilisant AssessmentProgressService avec injection de dépendances.
Optimise les requêtes pour éviter les problèmes N+1.
"""
from providers.concrete_providers import AssessmentServicesFactory
# Injection de dépendances pour éviter les imports circulaires
services_facade = AssessmentServicesFactory.create_facade()
progress_result = services_facade.get_grading_progress(self)
# Conversion du ProgressResult vers le format dict attendu
return {
'percentage': progress_result.percentage,
'completed': progress_result.completed,
'total': progress_result.total,
'status': progress_result.status,
'students_count': progress_result.students_count
}
def _grading_progress_legacy(self):
"""
Ancienne implémentation avec requêtes multiples (pour compatibilité).
"""
# Obtenir tous les éléments de notation pour cette évaluation
total_elements = 0
completed_elements = 0
total_students = len(self.class_group.students)
if total_students == 0:
return {
'percentage': 0,
'completed': 0,
'total': 0,
'status': 'no_students',
'students_count': 0
}
# Parcourir tous les exercices et leurs éléments de notation
for exercise in self.exercises:
for grading_element in exercise.grading_elements:
total_elements += total_students
# Compter les notes saisies (valeur non nulle et non vide, y compris '.')
completed_for_element = db.session.query(Grade).filter(
Grade.grading_element_id == grading_element.id,
Grade.value.isnot(None),
Grade.value != ''
).count()
completed_elements += completed_for_element
if total_elements == 0:
return {
'percentage': 0,
'completed': 0,
'total': 0,
'status': 'no_elements',
'students_count': total_students
}
percentage = round((completed_elements / total_elements) * 100)
# Déterminer le statut
if percentage == 0:
status = 'not_started'
elif percentage == 100:
status = 'completed'
else:
status = 'in_progress'
return {
'percentage': percentage,
'completed': completed_elements,
'total': total_elements,
'status': status,
'students_count': total_students
}
def calculate_student_scores(self, grade_repo=None):
"""Calcule les scores de tous les élèves pour cette évaluation.
Retourne un dictionnaire avec les scores par élève et par exercice.
Logique de calcul simplifiée avec 2 types seulement.
Args:
grade_repo: Repository des notes (optionnel, pour l'injection de dépendances)
"""
# Feature flag pour migration progressive vers services optimisés
from config.feature_flags import is_feature_enabled, FeatureFlag
if is_feature_enabled(FeatureFlag.USE_REFACTORED_ASSESSMENT):
return self._calculate_student_scores_optimized()
return self._calculate_student_scores_legacy(grade_repo)
def _calculate_student_scores_optimized(self):
"""Version optimisée avec services découplés et requête unique."""
from providers.concrete_providers import AssessmentServicesFactory
services = AssessmentServicesFactory.create_facade()
students_scores_data, exercise_scores_data = services.score_calculator.calculate_student_scores(self)
# Conversion vers format legacy pour compatibilité
students_scores = {}
exercise_scores = {}
for student_id, score_data in students_scores_data.items():
# Récupérer l'objet étudiant pour compatibilité
student_obj = next(s for s in self.class_group.students if s.id == student_id)
students_scores[student_id] = {
'student': student_obj,
'total_score': score_data.total_score,
'total_max_points': score_data.total_max_points,
'exercises': score_data.exercises
}
for exercise_id, student_scores in exercise_scores_data.items():
exercise_scores[exercise_id] = dict(student_scores)
return students_scores, exercise_scores
def _calculate_student_scores_legacy(self, grade_repo=None):
"""Version legacy avec requêtes N+1 - à conserver temporairement."""
from collections import defaultdict
students_scores = {}
exercise_scores = defaultdict(lambda: defaultdict(float))
for student in self.class_group.students:
total_score = 0
total_max_points = 0
student_exercises = {}
for exercise in self.exercises:
exercise_score = 0
exercise_max_points = 0
for element in exercise.grading_elements:
if grade_repo:
grade = grade_repo.find_by_student_and_element(student.id, element.id)
else:
# Fallback vers l'ancienne méthode
grade = Grade.query.filter_by(
student_id=student.id,
grading_element_id=element.id
).first()
# Si une note a été saisie pour cet élément (y compris valeurs spéciales)
if grade and grade.value and grade.value != '':
# Utiliser la nouvelle logique unifiée
calculated_score = GradingCalculator.calculate_score(
grade.value.strip(),
element.grading_type,
element.max_points
)
# Vérifier si cette note compte dans le total
if GradingCalculator.is_counted_in_total(grade.value.strip(), element.grading_type):
if calculated_score is not None: # Pas dispensé
exercise_score += calculated_score
exercise_max_points += element.max_points
# Si pas compté ou dispensé, on ignore complètement
student_exercises[exercise.id] = {
'score': exercise_score,
'max_points': exercise_max_points,
'title': exercise.title
}
total_score += exercise_score
total_max_points += exercise_max_points
exercise_scores[exercise.id][student.id] = exercise_score
students_scores[student.id] = {
'student': student,
'total_score': round(total_score, 2),
'total_max_points': total_max_points,
'exercises': student_exercises
}
return students_scores, dict(exercise_scores)
def get_assessment_statistics(self):
"""
Calcule les statistiques descriptives pour cette évaluation.
Utilise le feature flag USE_REFACTORED_ASSESSMENT pour basculer entre
l'ancien système et les nouveaux services refactorisés.
"""
from config.feature_flags import FeatureFlag, is_feature_enabled
if is_feature_enabled(FeatureFlag.USE_REFACTORED_ASSESSMENT):
from providers.concrete_providers import AssessmentServicesFactory
services = AssessmentServicesFactory.create_facade()
result = services.statistics_service.get_assessment_statistics(self)
# Conversion du StatisticsResult vers le format dict legacy
return {
'count': result.count,
'mean': result.mean,
'median': result.median,
'min': result.min,
'max': result.max,
'std_dev': result.std_dev
}
return self._get_assessment_statistics_legacy()
def _get_assessment_statistics_legacy(self):
"""Version legacy des statistiques - À supprimer après migration complète."""
students_scores, _ = self.calculate_student_scores()
scores = [data['total_score'] for data in students_scores.values()]
if not scores:
return {
'count': 0,
'mean': 0,
'median': 0,
'min': 0,
'max': 0,
'std_dev': 0
}
import statistics
import math
return {
'count': len(scores),
'mean': round(statistics.mean(scores), 2),
'median': round(statistics.median(scores), 2),
'min': min(scores),
'max': max(scores),
'std_dev': round(statistics.stdev(scores) if len(scores) > 1 else 0, 2)
}
def get_total_max_points(self):
"""Calcule le total des points maximum pour cette évaluation."""
total = 0
for exercise in self.exercises:
for element in exercise.grading_elements:
# Logique simplifiée avec 2 types : notes et score
total += element.max_points
return total
class Exercise(db.Model):
id = db.Column(db.Integer, primary_key=True)
assessment_id = db.Column(db.Integer, db.ForeignKey('assessment.id'), nullable=False)
title = db.Column(db.String(200), nullable=False)
description = db.Column(db.Text)
order = db.Column(db.Integer, default=1)
grading_elements = db.relationship('GradingElement', backref='exercise', lazy=True, cascade='all, delete-orphan')
def __repr__(self):
return f'<Exercise {self.title}>'
class GradingElement(db.Model):
id = db.Column(db.Integer, primary_key=True)
exercise_id = db.Column(db.Integer, db.ForeignKey('exercise.id'), nullable=False)
label = db.Column(db.String(200), nullable=False)
description = db.Column(db.Text)
skill = db.Column(db.String(200))
max_points = db.Column(db.Float, nullable=False) # Garder Float pour compatibilité
# NOUVEAU : Types enum directement
grading_type = db.Column(Enum('notes', 'score', name='grading_types'), nullable=False, default='notes')
# Ajout du champ domain_id
domain_id = db.Column(db.Integer, db.ForeignKey('domains.id'), nullable=True) # Optionnel
grades = db.relationship('Grade', backref='grading_element', lazy=True, cascade='all, delete-orphan')
def __repr__(self):
return f'<GradingElement {self.label}>'
class Grade(db.Model):
id = db.Column(db.Integer, primary_key=True)
student_id = db.Column(db.Integer, db.ForeignKey('student.id'), nullable=False)
grading_element_id = db.Column(db.Integer, db.ForeignKey('grading_element.id'), nullable=False)
value = db.Column(db.String(10)) # Garder l'ancien format pour compatibilité
comment = db.Column(db.Text)
def __repr__(self):
return f'<Grade {self.value} for {self.student.first_name if self.student else "Unknown"}>'
# Configuration tables
class AppConfig(db.Model):
"""Configuration simple de l'application (clé-valeur)."""
__tablename__ = 'app_config'
key = db.Column(db.String(100), primary_key=True)
value = db.Column(db.Text, nullable=False)
description = db.Column(db.Text)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
def __repr__(self):
return f'<AppConfig {self.key}={self.value}>'
class CompetenceScaleValue(db.Model):
"""Valeurs de l'échelle des compétences (0, 1, 2, 3, ., d, etc.)."""
__tablename__ = 'competence_scale_values'
value = db.Column(db.String(10), primary_key=True) # '0', '1', '2', '3', '.', 'd', etc.
label = db.Column(db.String(100), nullable=False)
color = db.Column(db.String(7), nullable=False) # Format #RRGGBB
included_in_total = db.Column(db.Boolean, default=True, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
def __repr__(self):
return f'<CompetenceScaleValue {self.value}: {self.label}>'
class Competence(db.Model):
"""Liste des compétences (Calculer, Raisonner, etc.)."""
__tablename__ = 'competences'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, nullable=False)
color = db.Column(db.String(7), nullable=False) # Format #RRGGBB
icon = db.Column(db.String(50), nullable=False)
order_index = db.Column(db.Integer, default=0) # Pour l'ordre d'affichage
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
def __repr__(self):
return f'<Competence {self.name}>'
class Domain(db.Model):
"""Domaines/tags pour les éléments de notation."""
__tablename__ = 'domains'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, nullable=False)
color = db.Column(db.String(7), nullable=False, default='#6B7280') # Format #RRGGBB
description = db.Column(db.Text)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
# Relation inverse
grading_elements = db.relationship('GradingElement', backref='domain', lazy=True)
def __repr__(self):
return f'<Domain {self.name}>'