refact: clean code and update doc

This commit is contained in:
2025-08-09 21:49:09 +02:00
parent ac2762218e
commit 4f8ab0925b
18 changed files with 4050 additions and 3275 deletions

View File

@@ -1,448 +0,0 @@
"""
Tests de migration pour AssessmentProgressService (JOUR 4 - Étape 2.2)
Ce module teste la migration de la propriété grading_progress du modèle Assessment
vers le nouveau AssessmentProgressService, en validant que :
1. Les deux implémentations donnent des résultats identiques
2. Le feature flag fonctionne correctement
3. Les performances sont améliorées (moins de requêtes N+1)
4. Tous les cas de bord sont couverts
Conformément au plan MIGRATION_PROGRESSIVE.md, cette migration utilise le
feature flag USE_REFACTORED_ASSESSMENT pour permettre un rollback instantané.
"""
import pytest
from unittest.mock import patch, MagicMock
from datetime import datetime, date
import time
from models import db, Assessment, ClassGroup, Student, Exercise, GradingElement, Grade
from config.feature_flags import FeatureFlag
from services.assessment_services import ProgressResult
from providers.concrete_providers import AssessmentServicesFactory
class TestAssessmentProgressMigration:
"""
Suite de tests pour valider la migration de grading_progress.
"""
def test_feature_flag_disabled_uses_legacy_implementation(self, app, sample_assessment_with_grades):
"""
RÈGLE MÉTIER : Quand le feature flag USE_REFACTORED_ASSESSMENT est désactivé,
la propriété grading_progress doit utiliser l'ancienne implémentation.
"""
assessment, _, _ = sample_assessment_with_grades
# GIVEN : Feature flag désactivé (par défaut)
from config.feature_flags import feature_flags
assert not feature_flags.is_enabled(FeatureFlag.USE_REFACTORED_ASSESSMENT)
# WHEN : On accède à grading_progress
with patch.object(assessment, '_grading_progress_legacy') as mock_legacy:
mock_legacy.return_value = {
'percentage': 50,
'completed': 10,
'total': 20,
'status': 'in_progress',
'students_count': 5
}
result = assessment.grading_progress
# THEN : La méthode legacy est appelée
mock_legacy.assert_called_once()
assert result['percentage'] == 50
def test_feature_flag_enabled_uses_new_service(self, app, sample_assessment_with_grades):
"""
RÈGLE MÉTIER : Quand le feature flag USE_REFACTORED_ASSESSMENT est activé,
la propriété grading_progress doit utiliser AssessmentProgressService.
"""
assessment, _, _ = sample_assessment_with_grades
# GIVEN : Feature flag activé
from config.feature_flags import feature_flags
feature_flags.enable(FeatureFlag.USE_REFACTORED_ASSESSMENT, "Test migration")
try:
# WHEN : On accède à grading_progress
with patch.object(assessment, '_grading_progress_with_service') as mock_service:
mock_service.return_value = {
'percentage': 50,
'completed': 10,
'total': 20,
'status': 'in_progress',
'students_count': 5
}
result = assessment.grading_progress
# THEN : La méthode service est appelée
mock_service.assert_called_once()
assert result['percentage'] == 50
finally:
# Cleanup : Réinitialiser le feature flag
feature_flags.disable(FeatureFlag.USE_REFACTORED_ASSESSMENT, "Fin de test")
def test_legacy_and_service_implementations_return_identical_results(self, app, sample_assessment_with_grades):
"""
RÈGLE CRITIQUE : Les deux implémentations doivent retourner exactement
les mêmes résultats pour éviter les régressions.
"""
assessment, students, grades = sample_assessment_with_grades
# WHEN : On calcule avec les deux implémentations
legacy_result = assessment._grading_progress_legacy()
service_result = assessment._grading_progress_with_service()
# THEN : Les résultats doivent être identiques
assert legacy_result == service_result, (
f"Legacy: {legacy_result} != Service: {service_result}"
)
# Vérification de tous les champs
for key in ['percentage', 'completed', 'total', 'status', 'students_count']:
assert legacy_result[key] == service_result[key], (
f"Différence sur le champ {key}: {legacy_result[key]} != {service_result[key]}"
)
def test_empty_assessment_handling_consistency(self, app):
"""
CAS DE BORD : Assessment vide (pas d'exercices) - les deux implémentations
doivent gérer ce cas identiquement.
"""
# GIVEN : Assessment sans exercices mais avec des élèves
class_group = ClassGroup(name='Test Class', year='2025')
student1 = Student(first_name='John', last_name='Doe', class_group=class_group)
student2 = Student(first_name='Jane', last_name='Smith', class_group=class_group)
assessment = Assessment(
title='Empty Assessment',
date=date.today(),
trimester=1,
class_group=class_group
)
db.session.add_all([class_group, student1, student2, assessment])
db.session.commit()
# WHEN : On calcule avec les deux implémentations
legacy_result = assessment._grading_progress_legacy()
service_result = assessment._grading_progress_with_service()
# THEN : Résultats identiques pour cas vide
assert legacy_result == service_result
assert legacy_result['status'] == 'no_elements'
assert legacy_result['percentage'] == 0
assert legacy_result['students_count'] == 2
def test_no_students_handling_consistency(self, app):
"""
CAS DE BORD : Assessment avec exercices mais sans élèves.
"""
# GIVEN : Assessment avec exercices mais sans élèves
class_group = ClassGroup(name='Empty Class', year='2025')
assessment = Assessment(
title='Assessment No Students',
date=date.today(),
trimester=1,
class_group=class_group
)
exercise = Exercise(title='Exercise 1', assessment=assessment)
element = GradingElement(
label='Question 1',
max_points=10,
grading_type='notes',
exercise=exercise
)
db.session.add_all([class_group, assessment, exercise, element])
db.session.commit()
# WHEN : On calcule avec les deux implémentations
legacy_result = assessment._grading_progress_legacy()
service_result = assessment._grading_progress_with_service()
# THEN : Résultats identiques pour classe vide
assert legacy_result == service_result
assert legacy_result['status'] == 'no_students'
assert legacy_result['percentage'] == 0
assert legacy_result['students_count'] == 0
def test_partial_grading_scenarios(self, app):
"""
CAS COMPLEXE : Différents scénarios de notation partielle.
"""
# GIVEN : Assessment avec notation partielle complexe
class_group = ClassGroup(name='Test Class', year='2025')
students = [
Student(first_name=f'Student{i}', last_name=f'Test{i}', class_group=class_group)
for i in range(3)
]
assessment = Assessment(
title='Partial Assessment',
date=date.today(),
trimester=1,
class_group=class_group
)
exercise1 = Exercise(title='Ex1', assessment=assessment)
exercise2 = Exercise(title='Ex2', assessment=assessment)
element1 = GradingElement(
label='Q1', max_points=10, grading_type='notes', exercise=exercise1
)
element2 = GradingElement(
label='Q2', max_points=5, grading_type='notes', exercise=exercise1
)
element3 = GradingElement(
label='Q3', max_points=3, grading_type='score', exercise=exercise2
)
db.session.add_all([
class_group, assessment, exercise1, exercise2,
element1, element2, element3, *students
])
db.session.commit()
# Notation partielle :
# - Student0 : toutes les notes (3/3 = 100%)
# - Student1 : 2 notes sur 3 (2/3 = 67%)
# - Student2 : 1 note sur 3 (1/3 = 33%)
# Total : 6/9 = 67%
grades = [
# Student 0 : toutes les notes
Grade(student=students[0], grading_element=element1, value='8'),
Grade(student=students[0], grading_element=element2, value='4'),
Grade(student=students[0], grading_element=element3, value='2'),
# Student 1 : 2 notes
Grade(student=students[1], grading_element=element1, value='7'),
Grade(student=students[1], grading_element=element2, value='3'),
# Student 2 : 1 note
Grade(student=students[2], grading_element=element1, value='6'),
]
db.session.add_all(grades)
db.session.commit()
# WHEN : On calcule avec les deux implémentations
legacy_result = assessment._grading_progress_legacy()
service_result = assessment._grading_progress_with_service()
# THEN : Résultats identiques
assert legacy_result == service_result
expected_percentage = round((6 / 9) * 100) # 67%
assert legacy_result['percentage'] == expected_percentage
assert legacy_result['completed'] == 6
assert legacy_result['total'] == 9
assert legacy_result['status'] == 'in_progress'
assert legacy_result['students_count'] == 3
def test_special_values_handling(self, app):
"""
CAS COMPLEXE : Gestion des valeurs spéciales (., d, etc.).
"""
# GIVEN : Assessment avec valeurs spéciales
class_group = ClassGroup(name='Special Class', year='2025')
student = Student(first_name='John', last_name='Doe', class_group=class_group)
assessment = Assessment(
title='Special Values Assessment',
date=date.today(),
trimester=1,
class_group=class_group
)
exercise = Exercise(title='Exercise', assessment=assessment)
element1 = GradingElement(
label='Q1', max_points=10, grading_type='notes', exercise=exercise
)
element2 = GradingElement(
label='Q2', max_points=5, grading_type='notes', exercise=exercise
)
db.session.add_all([class_group, student, assessment, exercise, element1, element2])
db.session.commit()
# Notes avec valeurs spéciales
grades = [
Grade(student=student, grading_element=element1, value='.'), # Pas de réponse
Grade(student=student, grading_element=element2, value='d'), # Dispensé
]
db.session.add_all(grades)
db.session.commit()
# WHEN : On calcule avec les deux implémentations
legacy_result = assessment._grading_progress_legacy()
service_result = assessment._grading_progress_with_service()
# THEN : Les valeurs spéciales sont comptées comme saisies
assert legacy_result == service_result
assert legacy_result['percentage'] == 100 # 2/2 notes saisies
assert legacy_result['completed'] == 2
assert legacy_result['total'] == 2
assert legacy_result['status'] == 'completed'
class TestPerformanceImprovement:
"""
Tests de performance pour valider les améliorations de requêtes.
"""
def test_service_makes_fewer_queries_than_legacy(self, app):
"""
PERFORMANCE : Le service optimisé doit faire moins de requêtes que l'implémentation legacy.
"""
# GIVEN : Assessment avec beaucoup d'éléments pour amplifier le problème N+1
class_group = ClassGroup(name='Big Class', year='2025')
students = [
Student(first_name=f'Student{i}', last_name='Test', class_group=class_group)
for i in range(5) # 5 étudiants
]
assessment = Assessment(
title='Big Assessment',
date=date.today(),
trimester=1,
class_group=class_group
)
exercises = []
elements = []
grades = []
# 3 exercices avec 2 éléments chacun = 6 éléments total
for ex_idx in range(3):
exercise = Exercise(title=f'Ex{ex_idx}', assessment=assessment)
exercises.append(exercise)
for elem_idx in range(2):
element = GradingElement(
label=f'Q{ex_idx}-{elem_idx}',
max_points=10,
grading_type='notes',
exercise=exercise
)
elements.append(element)
# Chaque étudiant a une note pour chaque élément
for student in students:
grade = Grade(
student=student,
grading_element=element,
value=str(8 + elem_idx) # Notes variables
)
grades.append(grade)
db.session.add_all([
class_group, assessment, *students, *exercises, *elements, *grades
])
db.session.commit()
# WHEN : On mesure les requêtes pour chaque implémentation
from sqlalchemy import event
# Compteur de requêtes pour legacy
legacy_query_count = [0]
def count_legacy_queries(conn, cursor, statement, parameters, context, executemany):
legacy_query_count[0] += 1
event.listen(db.engine, "before_cursor_execute", count_legacy_queries)
try:
legacy_result = assessment._grading_progress_legacy()
finally:
event.remove(db.engine, "before_cursor_execute", count_legacy_queries)
# Compteur de requêtes pour service
service_query_count = [0]
def count_service_queries(conn, cursor, statement, parameters, context, executemany):
service_query_count[0] += 1
event.listen(db.engine, "before_cursor_execute", count_service_queries)
try:
service_result = assessment._grading_progress_with_service()
finally:
event.remove(db.engine, "before_cursor_execute", count_service_queries)
# THEN : Le service doit faire significativement moins de requêtes
print(f"Legacy queries: {legacy_query_count[0]}")
print(f"Service queries: {service_query_count[0]}")
assert service_query_count[0] < legacy_query_count[0], (
f"Service ({service_query_count[0]} queries) devrait faire moins de requêtes "
f"que legacy ({legacy_query_count[0]} queries)"
)
# Les résultats doivent toujours être identiques
assert legacy_result == service_result
def test_service_performance_scales_better(self, app):
"""
PERFORMANCE : Le service doit avoir une complexité O(1) au lieu de O(n*m).
"""
# Ce test nécessiterait des données plus volumineuses pour être significatif
# En production, on pourrait mesurer les temps d'exécution
pass
@pytest.fixture
def sample_assessment_with_grades(app):
"""
Fixture créant un assessment avec quelques notes pour les tests.
"""
class_group = ClassGroup(name='Test Class', year='2025')
students = [
Student(first_name='Alice', last_name='Test', class_group=class_group),
Student(first_name='Bob', last_name='Test', class_group=class_group),
]
assessment = Assessment(
title='Sample Assessment',
date=date.today(),
trimester=1,
class_group=class_group
)
exercise = Exercise(title='Exercise 1', assessment=assessment)
element1 = GradingElement(
label='Question 1',
max_points=10,
grading_type='notes',
exercise=exercise
)
element2 = GradingElement(
label='Question 2',
max_points=5,
grading_type='notes',
exercise=exercise
)
db.session.add_all([
class_group, assessment, exercise, element1, element2, *students
])
db.session.commit()
# Notes partielles : Alice a 2 notes, Bob a 1 note
grades = [
Grade(student=students[0], grading_element=element1, value='8'),
Grade(student=students[0], grading_element=element2, value='4'),
Grade(student=students[1], grading_element=element1, value='7'),
# Bob n'a pas de note pour element2
]
db.session.add_all(grades)
db.session.commit()
return assessment, students, grades

View File

@@ -1,426 +0,0 @@
"""
Tests pour la migration de get_assessment_statistics() vers AssessmentStatisticsService.
Cette étape 3.2 de migration valide que :
1. Les calculs statistiques sont identiques (legacy vs refactored)
2. Les performances sont maintenues ou améliorées
3. L'interface reste compatible (format dict inchangé)
4. Le feature flag USE_REFACTORED_ASSESSMENT contrôle la migration
"""
import pytest
from unittest.mock import patch
import time
from models import Assessment, ClassGroup, Student, Exercise, GradingElement, Grade, db
from config.feature_flags import FeatureFlag
from app_config import config_manager
class TestAssessmentStatisticsMigration:
def test_statistics_migration_flag_off_uses_legacy(self, app):
"""
RÈGLE MÉTIER : Quand le feature flag USE_REFACTORED_ASSESSMENT est désactivé,
get_assessment_statistics() doit utiliser la version legacy.
"""
with app.app_context():
# Désactiver le feature flag
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
# Créer des données de test
assessment = self._create_assessment_with_scores()
# Mock pour s'assurer que les services refactorisés ne sont pas appelés
with patch('services.assessment_services.create_assessment_services') as mock_services:
stats = assessment.get_assessment_statistics()
# Les services refactorisés ne doivent PAS être appelés
mock_services.assert_not_called()
# Vérifier le format de retour
assert isinstance(stats, dict)
assert 'count' in stats
assert 'mean' in stats
assert 'median' in stats
assert 'min' in stats
assert 'max' in stats
assert 'std_dev' in stats
def test_statistics_migration_flag_on_uses_refactored(self, app):
"""
RÈGLE MÉTIER : Quand le feature flag USE_REFACTORED_ASSESSMENT est activé,
get_assessment_statistics() doit utiliser les services refactorisés.
"""
with app.app_context():
# Activer le feature flag
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
try:
# Créer des données de test
assessment = self._create_assessment_with_scores()
# Appeler la méthode
stats = assessment.get_assessment_statistics()
# Vérifier le format de retour (identique au legacy)
assert isinstance(stats, dict)
assert 'count' in stats
assert 'mean' in stats
assert 'median' in stats
assert 'min' in stats
assert 'max' in stats
assert 'std_dev' in stats
# Vérifier que les valeurs sont cohérentes
assert stats['count'] == 3 # 3 étudiants
assert stats['mean'] > 0
assert stats['median'] > 0
assert stats['min'] <= stats['mean'] <= stats['max']
assert stats['std_dev'] >= 0
finally:
# Remettre le flag par défaut
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
def test_statistics_results_identical_legacy_vs_refactored(self, app):
"""
RÈGLE CRITIQUE : Les résultats calculés par la version legacy et refactored
doivent être EXACTEMENT identiques.
"""
with app.app_context():
# Créer des données de test complexes
assessment = self._create_complex_assessment_with_scores()
# Test avec flag OFF (legacy)
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
legacy_stats = assessment.get_assessment_statistics()
# Test avec flag ON (refactored)
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
try:
refactored_stats = assessment.get_assessment_statistics()
# Comparaison exacte
assert legacy_stats['count'] == refactored_stats['count']
assert legacy_stats['mean'] == refactored_stats['mean']
assert legacy_stats['median'] == refactored_stats['median']
assert legacy_stats['min'] == refactored_stats['min']
assert legacy_stats['max'] == refactored_stats['max']
assert legacy_stats['std_dev'] == refactored_stats['std_dev']
# Test d'identité complète
assert legacy_stats == refactored_stats
finally:
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
def test_statistics_empty_assessment_both_versions(self, app):
"""
Test des cas limites : évaluation sans notes.
"""
with app.app_context():
# Créer une évaluation sans notes
class_group = ClassGroup(name="Test Class", year="2025-2026")
db.session.add(class_group)
db.session.commit()
assessment = Assessment(
title="Test Assessment",
description="Test Description",
date=None,
class_group_id=class_group.id,
trimester=1
)
db.session.add(assessment)
db.session.commit()
# Test legacy
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
legacy_stats = assessment.get_assessment_statistics()
# Test refactored
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
try:
refactored_stats = assessment.get_assessment_statistics()
# Vérifier que les deux versions gèrent correctement le cas vide
expected_empty = {
'count': 0,
'mean': 0,
'median': 0,
'min': 0,
'max': 0,
'std_dev': 0
}
assert legacy_stats == expected_empty
assert refactored_stats == expected_empty
finally:
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
def test_statistics_performance_comparison(self, app):
"""
PERFORMANCE : Vérifier que la version refactored n'est pas plus lente.
"""
with app.app_context():
# Créer une évaluation avec beaucoup de données
assessment = self._create_large_assessment_with_scores()
# Mesurer le temps legacy
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
start_time = time.perf_counter()
legacy_stats = assessment.get_assessment_statistics()
legacy_time = time.perf_counter() - start_time
# Mesurer le temps refactored
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
try:
start_time = time.perf_counter()
refactored_stats = assessment.get_assessment_statistics()
refactored_time = time.perf_counter() - start_time
# Les résultats doivent être identiques
assert legacy_stats == refactored_stats
# La version refactored ne doit pas être 2x plus lente
assert refactored_time <= legacy_time * 2, (
f"Refactored trop lent: {refactored_time:.4f}s vs Legacy: {legacy_time:.4f}s"
)
print(f"Performance comparison - Legacy: {legacy_time:.4f}s, Refactored: {refactored_time:.4f}s")
finally:
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
def test_statistics_integration_with_results_page(self, app, client):
"""
Test d'intégration : la page de résultats doit fonctionner avec les deux versions.
"""
with app.app_context():
assessment = self._create_assessment_with_scores()
# Test avec legacy
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
response = client.get(f'/assessments/{assessment.id}/results')
assert response.status_code == 200
assert b'Statistiques' in response.data # Vérifier que les stats s'affichent
# Test avec refactored
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
try:
response = client.get(f'/assessments/{assessment.id}/results')
assert response.status_code == 200
assert b'Statistiques' in response.data # Vérifier que les stats s'affichent
finally:
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
# === Méthodes utilitaires ===
def _create_assessment_with_scores(self):
"""Crée une évaluation simple avec quelques scores."""
# Classe et étudiants
class_group = ClassGroup(name="Test Class", year="2025-2026")
db.session.add(class_group)
db.session.commit()
students = [
Student(first_name="Alice", last_name="Dupont", class_group_id=class_group.id),
Student(first_name="Bob", last_name="Martin", class_group_id=class_group.id),
Student(first_name="Charlie", last_name="Durand", class_group_id=class_group.id)
]
for student in students:
db.session.add(student)
db.session.commit()
# Évaluation
assessment = Assessment(
title="Test Assessment",
description="Test Description",
date=None,
class_group_id=class_group.id,
trimester=1
)
db.session.add(assessment)
db.session.commit()
# Exercice
exercise = Exercise(
title="Exercise 1",
assessment_id=assessment.id,
)
db.session.add(exercise)
db.session.commit()
# Éléments de notation
element = GradingElement(
label="Question 1",
exercise_id=exercise.id,
max_points=20,
grading_type="notes",
)
db.session.add(element)
db.session.commit()
# Notes
grades = [
Grade(student_id=students[0].id, grading_element_id=element.id, value="15"),
Grade(student_id=students[1].id, grading_element_id=element.id, value="18"),
Grade(student_id=students[2].id, grading_element_id=element.id, value="12")
]
for grade in grades:
db.session.add(grade)
db.session.commit()
return assessment
def _create_complex_assessment_with_scores(self):
"""Crée une évaluation complexe avec différents types de scores."""
# Classe et étudiants
class_group = ClassGroup(name="Complex Class", year="2025-2026")
db.session.add(class_group)
db.session.commit()
students = [
Student(first_name="Alice", last_name="Dupont", class_group_id=class_group.id),
Student(first_name="Bob", last_name="Martin", class_group_id=class_group.id),
Student(first_name="Charlie", last_name="Durand", class_group_id=class_group.id),
Student(first_name="Diana", last_name="Petit", class_group_id=class_group.id)
]
for student in students:
db.session.add(student)
db.session.commit()
# Évaluation
assessment = Assessment(
title="Complex Assessment",
description="Test Description",
date=None,
class_group_id=class_group.id,
trimester=1
)
db.session.add(assessment)
db.session.commit()
# Exercice 1 - Notes
exercise1 = Exercise(
title="Exercise Points",
assessment_id=assessment.id,
)
db.session.add(exercise1)
db.session.commit()
element1 = GradingElement(
label="Question Points",
exercise_id=exercise1.id,
max_points=20,
grading_type="notes",
)
db.session.add(element1)
db.session.commit()
# Exercice 2 - Scores
exercise2 = Exercise(
title="Exercise Competences",
assessment_id=assessment.id,
order=2
)
db.session.add(exercise2)
db.session.commit()
element2 = GradingElement(
label="Competence",
exercise_id=exercise2.id,
max_points=3,
grading_type="score",
)
db.session.add(element2)
db.session.commit()
# Notes variées avec cas spéciaux
grades = [
# Étudiant 1 - bonnes notes
Grade(student_id=students[0].id, grading_element_id=element1.id, value="18"),
Grade(student_id=students[0].id, grading_element_id=element2.id, value="3"),
# Étudiant 2 - notes moyennes
Grade(student_id=students[1].id, grading_element_id=element1.id, value="14"),
Grade(student_id=students[1].id, grading_element_id=element2.id, value="2"),
# Étudiant 3 - notes faibles avec cas spécial
Grade(student_id=students[2].id, grading_element_id=element1.id, value="8"),
Grade(student_id=students[2].id, grading_element_id=element2.id, value="."), # Pas de réponse
# Étudiant 4 - dispensé
Grade(student_id=students[3].id, grading_element_id=element1.id, value="d"), # Dispensé
Grade(student_id=students[3].id, grading_element_id=element2.id, value="1"),
]
for grade in grades:
db.session.add(grade)
db.session.commit()
return assessment
def _create_large_assessment_with_scores(self):
"""Crée une évaluation avec beaucoup de données pour les tests de performance."""
# Classe et étudiants
class_group = ClassGroup(name="Large Class", year="2025-2026")
db.session.add(class_group)
db.session.commit()
# Créer 20 étudiants
students = []
for i in range(20):
student = Student(
first_name=f"Student{i}",
last_name=f"Test{i}",
class_group_id=class_group.id
)
students.append(student)
db.session.add(student)
db.session.commit()
# Évaluation
assessment = Assessment(
title="Large Assessment",
description="Performance test",
date=None,
class_group_id=class_group.id,
trimester=1
)
db.session.add(assessment)
db.session.commit()
# Créer 5 exercices avec plusieurs éléments
for ex_num in range(5):
exercise = Exercise(
title=f"Exercise {ex_num + 1}",
assessment_id=assessment.id,
)
db.session.add(exercise)
db.session.commit()
# 3 éléments par exercice
for elem_num in range(3):
element = GradingElement(
label=f"Question {elem_num + 1}",
exercise_id=exercise.id,
max_points=10,
grading_type="notes",
)
db.session.add(element)
db.session.commit()
# Notes pour tous les étudiants
for student in students:
score = 5 + (i + ex_num + elem_num) % 6 # Scores variés entre 5 et 10
grade = Grade(
student_id=student.id,
grading_element_id=element.id,
value=str(score)
)
db.session.add(grade)
db.session.commit()
return assessment

View File

@@ -1,408 +0,0 @@
"""
Tests pour le système de Feature Flags
Tests complets du système de feature flags utilisé pour la migration progressive.
Couvre tous les cas d'usage critiques : activation/désactivation, configuration
environnement, rollback, logging, et validation.
"""
import pytest
import os
from unittest.mock import patch
from datetime import datetime
from config.feature_flags import (
FeatureFlag,
FeatureFlagConfig,
FeatureFlagManager,
feature_flags,
is_feature_enabled
)
class TestFeatureFlagConfig:
"""Tests pour la classe de configuration FeatureFlagConfig."""
def test_feature_flag_config_creation(self):
"""Test création d'une configuration de feature flag."""
config = FeatureFlagConfig(
enabled=True,
description="Test feature flag",
migration_day=3,
rollback_safe=True
)
assert config.enabled is True
assert config.description == "Test feature flag"
assert config.migration_day == 3
assert config.rollback_safe is True
assert config.created_at is not None
assert config.updated_at is not None
assert isinstance(config.created_at, datetime)
assert isinstance(config.updated_at, datetime)
def test_feature_flag_config_defaults(self):
"""Test valeurs par défaut de FeatureFlagConfig."""
config = FeatureFlagConfig(enabled=False, description="Test")
assert config.migration_day is None
assert config.rollback_safe is True # Défaut sécurisé
assert config.created_at is not None
assert config.updated_at is not None
class TestFeatureFlagEnum:
"""Tests pour l'énumération des feature flags."""
def test_feature_flag_enum_values(self):
"""Test que tous les feature flags de migration sont définis."""
# Migration core (Jour 3-4)
assert FeatureFlag.USE_STRATEGY_PATTERN.value == "use_strategy_pattern"
assert FeatureFlag.USE_REFACTORED_ASSESSMENT.value == "use_refactored_assessment"
# Migration avancée (Jour 5-6)
assert FeatureFlag.USE_NEW_STUDENT_SCORE_CALCULATOR.value == "use_new_student_score_calculator"
assert FeatureFlag.USE_NEW_ASSESSMENT_STATISTICS_SERVICE.value == "use_new_assessment_statistics_service"
# Fonctionnalités avancées
assert FeatureFlag.ENABLE_PERFORMANCE_MONITORING.value == "enable_performance_monitoring"
assert FeatureFlag.ENABLE_QUERY_OPTIMIZATION.value == "enable_query_optimization"
def test_feature_flag_enum_uniqueness(self):
"""Test que toutes les valeurs de feature flags sont uniques."""
values = [flag.value for flag in FeatureFlag]
assert len(values) == len(set(values)) # Pas de doublons
class TestFeatureFlagManager:
"""Tests pour la classe FeatureFlagManager."""
def test_manager_initialization(self):
"""Test initialisation du gestionnaire."""
manager = FeatureFlagManager()
# Vérification que tous les flags sont initialisés
for flag in FeatureFlag:
config = manager.get_config(flag)
assert config is not None
assert isinstance(config, FeatureFlagConfig)
# Par défaut, tous désactivés pour sécurité
assert config.enabled is False
def test_is_enabled_default_false(self):
"""Test que tous les flags sont désactivés par défaut."""
manager = FeatureFlagManager()
for flag in FeatureFlag:
assert manager.is_enabled(flag) is False
def test_enable_flag(self):
"""Test activation d'un feature flag."""
manager = FeatureFlagManager()
flag = FeatureFlag.USE_STRATEGY_PATTERN
# Initialement désactivé
assert manager.is_enabled(flag) is False
# Activation
success = manager.enable(flag, "Test activation")
assert success is True
assert manager.is_enabled(flag) is True
# Vérification des métadonnées
config = manager.get_config(flag)
assert config.enabled is True
assert config.updated_at is not None
def test_disable_flag(self):
"""Test désactivation d'un feature flag."""
manager = FeatureFlagManager()
flag = FeatureFlag.USE_STRATEGY_PATTERN
# Activer d'abord
manager.enable(flag, "Test")
assert manager.is_enabled(flag) is True
# Désactiver
success = manager.disable(flag, "Test désactivation")
assert success is True
assert manager.is_enabled(flag) is False
# Vérification des métadonnées
config = manager.get_config(flag)
assert config.enabled is False
assert config.updated_at is not None
def test_enable_unknown_flag(self):
"""Test activation d'un flag inexistant."""
manager = FeatureFlagManager()
# Création d'un flag fictif pour le test
class FakeFlag:
value = "nonexistent_flag"
fake_flag = FakeFlag()
success = manager.enable(fake_flag, "Test")
assert success is False
def test_disable_unknown_flag(self):
"""Test désactivation d'un flag inexistant."""
manager = FeatureFlagManager()
# Création d'un flag fictif pour le test
class FakeFlag:
value = "nonexistent_flag"
fake_flag = FakeFlag()
success = manager.disable(fake_flag, "Test")
assert success is False
def test_get_status_summary(self):
"""Test du résumé des statuts."""
manager = FeatureFlagManager()
# Activer quelques flags
manager.enable(FeatureFlag.USE_STRATEGY_PATTERN, "Test")
manager.enable(FeatureFlag.ENABLE_PERFORMANCE_MONITORING, "Test")
summary = manager.get_status_summary()
# Structure du résumé
assert 'flags' in summary
assert 'migration_status' in summary
assert 'total_enabled' in summary
assert 'last_updated' in summary
# Vérification du compte
assert summary['total_enabled'] == 2
# Vérification des flags individuels
assert summary['flags']['use_strategy_pattern']['enabled'] is True
assert summary['flags']['enable_performance_monitoring']['enabled'] is True
assert summary['flags']['use_refactored_assessment']['enabled'] is False
def test_migration_day_status(self):
"""Test du statut de migration par jour."""
manager = FeatureFlagManager()
summary = manager.get_status_summary()
# Initialement, aucun jour n'est prêt
assert summary['migration_status']['day_3_ready'] is False
assert summary['migration_status']['day_4_ready'] is False
assert summary['migration_status']['day_5_ready'] is False
assert summary['migration_status']['day_6_ready'] is False
# Activer le jour 3
manager.enable(FeatureFlag.USE_STRATEGY_PATTERN, "Test Jour 3")
summary = manager.get_status_summary()
assert summary['migration_status']['day_3_ready'] is True
assert summary['migration_status']['day_4_ready'] is False
def test_enable_migration_day(self):
"""Test activation des flags pour un jour de migration."""
manager = FeatureFlagManager()
# Activer le jour 3
results = manager.enable_migration_day(3, "Test migration jour 3")
assert 'use_strategy_pattern' in results
assert results['use_strategy_pattern'] is True
# Vérifier que le flag est effectivement activé
assert manager.is_enabled(FeatureFlag.USE_STRATEGY_PATTERN) is True
# Vérifier le statut de migration
summary = manager.get_status_summary()
assert summary['migration_status']['day_3_ready'] is True
def test_enable_migration_day_invalid(self):
"""Test activation d'un jour de migration invalide."""
manager = FeatureFlagManager()
# Jour invalide
results = manager.enable_migration_day(10, "Test invalide")
assert results == {}
# Jour 1 et 2 ne sont pas supportés (pas de flags associés)
results = manager.enable_migration_day(1, "Test invalide")
assert results == {}
class TestEnvironmentConfiguration:
"""Tests pour la configuration par variables d'environnement."""
@patch.dict(os.environ, {
'FEATURE_FLAG_USE_STRATEGY_PATTERN': 'true',
'FEATURE_FLAG_ENABLE_PERFORMANCE_MONITORING': '1',
'FEATURE_FLAG_USE_REFACTORED_ASSESSMENT': 'false'
})
def test_load_from_environment_variables(self):
"""Test chargement depuis variables d'environnement."""
manager = FeatureFlagManager()
# Vérification des flags activés par env
assert manager.is_enabled(FeatureFlag.USE_STRATEGY_PATTERN) is True
assert manager.is_enabled(FeatureFlag.ENABLE_PERFORMANCE_MONITORING) is True
# Vérification du flag explicitement désactivé
assert manager.is_enabled(FeatureFlag.USE_REFACTORED_ASSESSMENT) is False
# Vérification des flags non définis (défaut: False)
assert manager.is_enabled(FeatureFlag.USE_NEW_STUDENT_SCORE_CALCULATOR) is False
@patch.dict(os.environ, {
'FEATURE_FLAG_USE_STRATEGY_PATTERN': 'yes',
'FEATURE_FLAG_ENABLE_QUERY_OPTIMIZATION': 'on',
'FEATURE_FLAG_ENABLE_BULK_OPERATIONS': 'enabled'
})
def test_environment_boolean_parsing(self):
"""Test parsing des valeurs booléennes de l'environnement."""
manager = FeatureFlagManager()
# Différentes formes de 'true'
assert manager.is_enabled(FeatureFlag.USE_STRATEGY_PATTERN) is True # 'yes'
assert manager.is_enabled(FeatureFlag.ENABLE_QUERY_OPTIMIZATION) is True # 'on'
assert manager.is_enabled(FeatureFlag.ENABLE_BULK_OPERATIONS) is True # 'enabled'
@patch.dict(os.environ, {
'FEATURE_FLAG_USE_STRATEGY_PATTERN': 'false',
'FEATURE_FLAG_ENABLE_PERFORMANCE_MONITORING': '0',
'FEATURE_FLAG_ENABLE_QUERY_OPTIMIZATION': 'no',
'FEATURE_FLAG_ENABLE_BULK_OPERATIONS': 'disabled'
})
def test_environment_false_values(self):
"""Test parsing des valeurs 'false' de l'environnement."""
manager = FeatureFlagManager()
# Différentes formes de 'false'
assert manager.is_enabled(FeatureFlag.USE_STRATEGY_PATTERN) is False # 'false'
assert manager.is_enabled(FeatureFlag.ENABLE_PERFORMANCE_MONITORING) is False # '0'
assert manager.is_enabled(FeatureFlag.ENABLE_QUERY_OPTIMIZATION) is False # 'no'
assert manager.is_enabled(FeatureFlag.ENABLE_BULK_OPERATIONS) is False # 'disabled'
class TestGlobalFunctions:
"""Tests pour les fonctions globales utilitaires."""
def test_global_is_feature_enabled(self):
"""Test fonction globale is_feature_enabled."""
# Par défaut, tous désactivés
assert is_feature_enabled(FeatureFlag.USE_STRATEGY_PATTERN) is False
# Activer via l'instance globale
feature_flags.enable(FeatureFlag.USE_STRATEGY_PATTERN, "Test global")
assert is_feature_enabled(FeatureFlag.USE_STRATEGY_PATTERN) is True
# Nettoyage pour les autres tests
feature_flags.disable(FeatureFlag.USE_STRATEGY_PATTERN, "Nettoyage test")
class TestMigrationScenarios:
"""Tests pour les scénarios de migration réels."""
def test_day_3_migration_scenario(self):
"""Test scénario complet migration Jour 3."""
manager = FeatureFlagManager()
# État initial
summary = manager.get_status_summary()
assert summary['migration_status']['day_3_ready'] is False
# Activation Jour 3
results = manager.enable_migration_day(3, "Migration Jour 3 - Grading Strategies")
assert all(results.values()) # Tous les flags activés avec succès
# Vérification post-migration
summary = manager.get_status_summary()
assert summary['migration_status']['day_3_ready'] is True
assert manager.is_enabled(FeatureFlag.USE_STRATEGY_PATTERN) is True
def test_progressive_migration_scenario(self):
"""Test scénario de migration progressive complète."""
manager = FeatureFlagManager()
# Jour 3: Grading Strategies
manager.enable_migration_day(3, "Jour 3")
summary = manager.get_status_summary()
assert summary['migration_status']['day_3_ready'] is True
assert summary['total_enabled'] == 1
# Jour 4: Assessment Progress Service
manager.enable_migration_day(4, "Jour 4")
summary = manager.get_status_summary()
assert summary['migration_status']['day_4_ready'] is True
assert summary['total_enabled'] == 2
# Jour 5: Student Score Calculator
manager.enable_migration_day(5, "Jour 5")
summary = manager.get_status_summary()
assert summary['migration_status']['day_5_ready'] is True
assert summary['total_enabled'] == 3
# Jour 6: Assessment Statistics Service
manager.enable_migration_day(6, "Jour 6")
summary = manager.get_status_summary()
assert summary['migration_status']['day_6_ready'] is True
assert summary['total_enabled'] == 4
def test_rollback_scenario(self):
"""Test scénario de rollback complet."""
manager = FeatureFlagManager()
# Activer plusieurs jours
manager.enable_migration_day(3, "Migration")
manager.enable_migration_day(4, "Migration")
summary = manager.get_status_summary()
assert summary['total_enabled'] == 2
# Rollback du Jour 4 seulement
manager.disable(FeatureFlag.USE_REFACTORED_ASSESSMENT, "Rollback Jour 4")
summary = manager.get_status_summary()
assert summary['migration_status']['day_3_ready'] is True
assert summary['migration_status']['day_4_ready'] is False
assert summary['total_enabled'] == 1
class TestSafety:
"""Tests de sécurité et validation."""
def test_all_flags_rollback_safe_by_default(self):
"""Test que tous les flags sont rollback-safe par défaut."""
manager = FeatureFlagManager()
for flag in FeatureFlag:
config = manager.get_config(flag)
assert config.rollback_safe is True, f"Flag {flag.value} n'est pas rollback-safe"
def test_migration_flags_have_correct_days(self):
"""Test que les flags de migration ont les bons jours assignés."""
manager = FeatureFlagManager()
# Jour 3
config = manager.get_config(FeatureFlag.USE_STRATEGY_PATTERN)
assert config.migration_day == 3
# Jour 4
config = manager.get_config(FeatureFlag.USE_REFACTORED_ASSESSMENT)
assert config.migration_day == 4
# Jour 5
config = manager.get_config(FeatureFlag.USE_NEW_STUDENT_SCORE_CALCULATOR)
assert config.migration_day == 5
# Jour 6
config = manager.get_config(FeatureFlag.USE_NEW_ASSESSMENT_STATISTICS_SERVICE)
assert config.migration_day == 6
def test_flag_descriptions_exist(self):
"""Test que tous les flags ont des descriptions significatives."""
manager = FeatureFlagManager()
for flag in FeatureFlag:
config = manager.get_config(flag)
assert config.description, f"Flag {flag.value} n'a pas de description"
assert len(config.description) > 10, f"Description trop courte pour {flag.value}"

View File

@@ -1,237 +0,0 @@
"""
Tests de validation pour la migration Pattern Strategy (JOUR 3-4).
Ce module teste que l'implémentation avec Pattern Strategy donne
exactement les mêmes résultats que l'implémentation legacy, garantissant
ainsi une migration sans régression.
"""
import pytest
from decimal import Decimal
from config.feature_flags import feature_flags, FeatureFlag
from models import GradingCalculator
class TestPatternStrategyMigration:
"""
Tests de validation pour s'assurer que la migration vers le Pattern Strategy
ne change aucun comportement existant.
"""
def setup_method(self):
"""Préparation avant chaque test."""
# S'assurer que le flag est désactivé au début
feature_flags.disable(FeatureFlag.USE_STRATEGY_PATTERN, "Test setup")
def teardown_method(self):
"""Nettoyage après chaque test."""
# Remettre le flag à l'état désactivé
feature_flags.disable(FeatureFlag.USE_STRATEGY_PATTERN, "Test teardown")
def test_calculate_score_notes_identical_results(self):
"""
Test que les calculs de notes donnent des résultats identiques
entre l'implémentation legacy et la nouvelle.
"""
test_cases = [
("15.5", "notes", 20.0, 15.5),
("0", "notes", 20.0, 0.0),
("20", "notes", 20.0, 20.0),
("10.25", "notes", 20.0, 10.25),
("invalid", "notes", 20.0, 0.0),
]
for grade_value, grading_type, max_points, expected in test_cases:
# Test avec implémentation legacy
feature_flags.disable(FeatureFlag.USE_STRATEGY_PATTERN, "Testing legacy")
legacy_result = GradingCalculator.calculate_score(grade_value, grading_type, max_points)
# Test avec nouvelle implémentation
feature_flags.enable(FeatureFlag.USE_STRATEGY_PATTERN, "Testing new strategy")
strategy_result = GradingCalculator.calculate_score(grade_value, grading_type, max_points)
# Les résultats doivent être identiques
assert legacy_result == strategy_result, (
f"Résultats différents pour {grade_value}: "
f"legacy={legacy_result}, strategy={strategy_result}"
)
assert legacy_result == expected
def test_calculate_score_score_identical_results(self):
"""
Test que les calculs de scores (0-3) donnent des résultats identiques.
"""
test_cases = [
("0", "score", 12.0, 0.0),
("1", "score", 12.0, 4.0), # (1/3) * 12 = 4
("2", "score", 12.0, 8.0), # (2/3) * 12 = 8
("3", "score", 12.0, 12.0), # (3/3) * 12 = 12
("invalid", "score", 12.0, 0.0),
("4", "score", 12.0, 0.0), # Invalide, hors limite
]
for grade_value, grading_type, max_points, expected in test_cases:
# Test avec implémentation legacy
feature_flags.disable(FeatureFlag.USE_STRATEGY_PATTERN, "Testing legacy")
legacy_result = GradingCalculator.calculate_score(grade_value, grading_type, max_points)
# Test avec nouvelle implémentation
feature_flags.enable(FeatureFlag.USE_STRATEGY_PATTERN, "Testing new strategy")
strategy_result = GradingCalculator.calculate_score(grade_value, grading_type, max_points)
# Les résultats doivent être identiques
assert legacy_result == strategy_result, (
f"Résultats différents pour {grade_value}: "
f"legacy={legacy_result}, strategy={strategy_result}"
)
assert abs(legacy_result - expected) < 0.001 # Tolérance pour les floats
def test_special_values_identical_results(self, app):
"""
Test que les valeurs spéciales sont traitées identiquement.
Nécessite l'application Flask pour l'accès à la configuration.
"""
with app.app_context():
# Valeurs spéciales courantes
special_cases = [
(".", "notes", 20.0), # Pas de réponse -> 0
("d", "notes", 20.0), # Dispensé -> None
(".", "score", 12.0), # Pas de réponse -> 0
("d", "score", 12.0), # Dispensé -> None
]
for grade_value, grading_type, max_points in special_cases:
# Test avec implémentation legacy
feature_flags.disable(FeatureFlag.USE_STRATEGY_PATTERN, "Testing legacy")
legacy_result = GradingCalculator.calculate_score(grade_value, grading_type, max_points)
# Test avec nouvelle implémentation
feature_flags.enable(FeatureFlag.USE_STRATEGY_PATTERN, "Testing new strategy")
strategy_result = GradingCalculator.calculate_score(grade_value, grading_type, max_points)
# Les résultats doivent être identiques
assert legacy_result == strategy_result, (
f"Résultats différents pour valeur spéciale {grade_value}: "
f"legacy={legacy_result}, strategy={strategy_result}"
)
def test_is_counted_in_total_identical_results(self, app):
"""
Test que is_counted_in_total donne des résultats identiques.
"""
with app.app_context():
test_cases = [
("15.5", "notes", True), # Valeur normale
(".", "notes", True), # Pas de réponse compte dans le total
("d", "notes", False), # Dispensé ne compte pas
("0", "score", True), # Valeur normale
(".", "score", True), # Pas de réponse compte dans le total
("d", "score", False), # Dispensé ne compte pas
]
for grade_value, grading_type, expected in test_cases:
# Test avec implémentation legacy
feature_flags.disable(FeatureFlag.USE_STRATEGY_PATTERN, "Testing legacy")
legacy_result = GradingCalculator.is_counted_in_total(grade_value, grading_type)
# Test avec nouvelle implémentation
feature_flags.enable(FeatureFlag.USE_STRATEGY_PATTERN, "Testing new strategy")
strategy_result = GradingCalculator.is_counted_in_total(grade_value, grading_type)
# Les résultats doivent être identiques
assert legacy_result == strategy_result, (
f"Résultats différents pour is_counted_in_total {grade_value}: "
f"legacy={legacy_result}, strategy={strategy_result}"
)
assert legacy_result == expected
def test_feature_flag_toggle_works_correctly(self):
"""
Test que le basculement du feature flag fonctionne correctement.
"""
grade_value, grading_type, max_points = "15.5", "notes", 20.0
# Vérifier état initial (désactivé)
assert not feature_flags.is_enabled(FeatureFlag.USE_STRATEGY_PATTERN)
result_disabled = GradingCalculator.calculate_score(grade_value, grading_type, max_points)
# Activer le flag
feature_flags.enable(FeatureFlag.USE_STRATEGY_PATTERN, "Test toggle")
assert feature_flags.is_enabled(FeatureFlag.USE_STRATEGY_PATTERN)
result_enabled = GradingCalculator.calculate_score(grade_value, grading_type, max_points)
# Désactiver le flag
feature_flags.disable(FeatureFlag.USE_STRATEGY_PATTERN, "Test toggle back")
assert not feature_flags.is_enabled(FeatureFlag.USE_STRATEGY_PATTERN)
result_disabled_again = GradingCalculator.calculate_score(grade_value, grading_type, max_points)
# Tous les résultats doivent être identiques
assert result_disabled == result_enabled == result_disabled_again
assert result_disabled == 15.5
def test_strategy_pattern_performance_acceptable(self):
"""
Test que la nouvelle implémentation n'a pas de dégradation majeure de performance.
"""
import time
grade_value, grading_type, max_points = "15.5", "notes", 20.0
iterations = 1000
# Mesure performance legacy
feature_flags.disable(FeatureFlag.USE_STRATEGY_PATTERN, "Performance test legacy")
start_legacy = time.time()
for _ in range(iterations):
GradingCalculator.calculate_score(grade_value, grading_type, max_points)
time_legacy = time.time() - start_legacy
# Mesure performance strategy
feature_flags.enable(FeatureFlag.USE_STRATEGY_PATTERN, "Performance test strategy")
start_strategy = time.time()
for _ in range(iterations):
GradingCalculator.calculate_score(grade_value, grading_type, max_points)
time_strategy = time.time() - start_strategy
# La nouvelle implémentation ne doit pas être plus de 3x plus lente
performance_ratio = time_strategy / time_legacy
assert performance_ratio < 3.0, (
f"Performance dégradée: strategy={time_strategy:.4f}s, "
f"legacy={time_legacy:.4f}s, ratio={performance_ratio:.2f}"
)
class TestPatternStrategyFactoryValidation:
"""Tests de validation de la factory des strategies."""
def test_strategy_factory_creates_correct_strategies(self):
"""Test que la factory crée les bonnes strategies."""
from services.assessment_services import GradingStrategyFactory
# Strategy pour notes
notes_strategy = GradingStrategyFactory.create('notes')
assert notes_strategy.get_grading_type() == 'notes'
# Strategy pour scores
score_strategy = GradingStrategyFactory.create('score')
assert score_strategy.get_grading_type() == 'score'
# Type invalide
with pytest.raises(ValueError, match="Type de notation non supporté"):
GradingStrategyFactory.create('invalid_type')
def test_strategy_patterns_work_correctly(self):
"""Test que les strategies individuelles fonctionnent correctement."""
from services.assessment_services import GradingStrategyFactory
# Test NotesStrategy
notes_strategy = GradingStrategyFactory.create('notes')
assert notes_strategy.calculate_score("15.5", 20.0) == 15.5
assert notes_strategy.calculate_score("invalid", 20.0) == 0.0
# Test ScoreStrategy
score_strategy = GradingStrategyFactory.create('score')
assert score_strategy.calculate_score("2", 12.0) == 8.0 # (2/3) * 12
assert score_strategy.calculate_score("invalid", 12.0) == 0.0
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -1,452 +0,0 @@
"""
Tests de performance spécialisés pour AssessmentProgressService (JOUR 4 - Étape 2.2)
Ce module teste spécifiquement les améliorations de performance apportées par
AssessmentProgressService en remplaçant les requêtes N+1 par des requêtes optimisées.
Métriques mesurées :
- Nombre de requêtes SQL exécutées
- Temps d'exécution
- Utilisation mémoire
- Scalabilité avec le volume de données
Ces tests permettent de quantifier l'amélioration avant/après migration.
"""
import pytest
import time
import statistics
from contextlib import contextmanager
from typing import List, Dict, Any
from unittest.mock import patch
from datetime import date
from sqlalchemy import event
from models import db, Assessment, ClassGroup, Student, Exercise, GradingElement, Grade
from config.feature_flags import FeatureFlag
class QueryCounter:
"""Utilitaire pour compter les requêtes SQL."""
def __init__(self):
self.query_count = 0
self.queries = []
def count_query(self, conn, cursor, statement, parameters, context, executemany):
"""Callback pour compter les requêtes."""
self.query_count += 1
self.queries.append({
'statement': statement,
'parameters': parameters,
'executemany': executemany
})
@contextmanager
def measure(self):
"""Context manager pour mesurer les requêtes."""
self.query_count = 0
self.queries = []
event.listen(db.engine, "before_cursor_execute", self.count_query)
try:
yield self
finally:
event.remove(db.engine, "before_cursor_execute", self.count_query)
class PerformanceBenchmark:
"""Classe pour mesurer les performances."""
@staticmethod
def measure_execution_time(func, *args, **kwargs) -> Dict[str, Any]:
"""Mesure le temps d'exécution d'une fonction."""
start_time = time.perf_counter()
result = func(*args, **kwargs)
end_time = time.perf_counter()
return {
'result': result,
'execution_time': end_time - start_time,
'execution_time_ms': (end_time - start_time) * 1000
}
@staticmethod
def compare_implementations(assessment, iterations: int = 5) -> Dict[str, Any]:
"""
Compare les performances entre legacy et service.
Args:
assessment: L'assessment à tester
iterations: Nombre d'itérations pour la moyenne
Returns:
Dict avec les statistiques de comparaison
"""
legacy_times = []
service_times = []
legacy_queries = []
service_queries = []
counter = QueryCounter()
# Mesure des performances legacy
for _ in range(iterations):
with counter.measure():
benchmark_result = PerformanceBenchmark.measure_execution_time(
assessment._grading_progress_legacy
)
legacy_times.append(benchmark_result['execution_time_ms'])
legacy_queries.append(counter.query_count)
# Mesure des performances service
for _ in range(iterations):
with counter.measure():
benchmark_result = PerformanceBenchmark.measure_execution_time(
assessment._grading_progress_with_service
)
service_times.append(benchmark_result['execution_time_ms'])
service_queries.append(counter.query_count)
return {
'legacy': {
'avg_time_ms': statistics.mean(legacy_times),
'median_time_ms': statistics.median(legacy_times),
'min_time_ms': min(legacy_times),
'max_time_ms': max(legacy_times),
'std_dev_time_ms': statistics.stdev(legacy_times) if len(legacy_times) > 1 else 0,
'avg_queries': statistics.mean(legacy_queries),
'max_queries': max(legacy_queries),
'all_times': legacy_times,
'all_queries': legacy_queries
},
'service': {
'avg_time_ms': statistics.mean(service_times),
'median_time_ms': statistics.median(service_times),
'min_time_ms': min(service_times),
'max_time_ms': max(service_times),
'std_dev_time_ms': statistics.stdev(service_times) if len(service_times) > 1 else 0,
'avg_queries': statistics.mean(service_queries),
'max_queries': max(service_queries),
'all_times': service_times,
'all_queries': service_queries
},
'improvement': {
'time_ratio': statistics.mean(legacy_times) / statistics.mean(service_times) if statistics.mean(service_times) > 0 else float('inf'),
'queries_saved': statistics.mean(legacy_queries) - statistics.mean(service_queries),
'queries_ratio': statistics.mean(legacy_queries) / statistics.mean(service_queries) if statistics.mean(service_queries) > 0 else float('inf')
}
}
class TestGradingProgressPerformance:
"""
Suite de tests de performance pour grading_progress.
"""
def test_small_dataset_performance(self, app):
"""
PERFORMANCE : Test sur un petit dataset (2 étudiants, 2 exercices, 4 éléments).
"""
assessment = self._create_assessment_with_data(
students_count=2,
exercises_count=2,
elements_per_exercise=2
)
comparison = PerformanceBenchmark.compare_implementations(assessment)
# ASSERTIONS
print(f"\n=== SMALL DATASET PERFORMANCE ===")
print(f"Legacy: {comparison['legacy']['avg_time_ms']:.2f}ms avg, {comparison['legacy']['avg_queries']:.1f} queries avg")
print(f"Service: {comparison['service']['avg_time_ms']:.2f}ms avg, {comparison['service']['avg_queries']:.1f} queries avg")
print(f"Improvement: {comparison['improvement']['time_ratio']:.2f}x faster, {comparison['improvement']['queries_saved']:.1f} queries saved")
# Le service doit faire moins de requêtes
assert comparison['service']['avg_queries'] < comparison['legacy']['avg_queries'], (
f"Service devrait faire moins de requêtes: {comparison['service']['avg_queries']} vs {comparison['legacy']['avg_queries']}"
)
# Les résultats doivent être identiques
legacy_result = assessment._grading_progress_legacy()
service_result = assessment._grading_progress_with_service()
assert legacy_result == service_result
def test_medium_dataset_performance(self, app):
"""
PERFORMANCE : Test sur un dataset moyen (5 étudiants, 3 exercices, 6 éléments).
"""
assessment = self._create_assessment_with_data(
students_count=5,
exercises_count=3,
elements_per_exercise=2
)
comparison = PerformanceBenchmark.compare_implementations(assessment)
print(f"\n=== MEDIUM DATASET PERFORMANCE ===")
print(f"Legacy: {comparison['legacy']['avg_time_ms']:.2f}ms avg, {comparison['legacy']['avg_queries']:.1f} queries avg")
print(f"Service: {comparison['service']['avg_time_ms']:.2f}ms avg, {comparison['service']['avg_queries']:.1f} queries avg")
print(f"Improvement: {comparison['improvement']['time_ratio']:.2f}x faster, {comparison['improvement']['queries_saved']:.1f} queries saved")
# Le service doit faire significativement moins de requêtes avec plus de données
queries_improvement = comparison['improvement']['queries_ratio']
assert queries_improvement > 1.5, (
f"Avec plus de données, l'amélioration devrait être plus significative: {queries_improvement:.2f}x"
)
# Les résultats doivent être identiques
legacy_result = assessment._grading_progress_legacy()
service_result = assessment._grading_progress_with_service()
assert legacy_result == service_result
def test_large_dataset_performance(self, app):
"""
PERFORMANCE : Test sur un grand dataset (10 étudiants, 4 exercices, 12 éléments).
"""
assessment = self._create_assessment_with_data(
students_count=10,
exercises_count=4,
elements_per_exercise=3
)
comparison = PerformanceBenchmark.compare_implementations(assessment)
print(f"\n=== LARGE DATASET PERFORMANCE ===")
print(f"Legacy: {comparison['legacy']['avg_time_ms']:.2f}ms avg, {comparison['legacy']['avg_queries']:.1f} queries avg")
print(f"Service: {comparison['service']['avg_time_ms']:.2f}ms avg, {comparison['service']['avg_queries']:.1f} queries avg")
print(f"Improvement: {comparison['improvement']['time_ratio']:.2f}x faster, {comparison['improvement']['queries_saved']:.1f} queries saved")
# Avec beaucoup de données, l'amélioration doit être dramatique
queries_improvement = comparison['improvement']['queries_ratio']
assert queries_improvement > 2.0, (
f"Avec beaucoup de données, l'amélioration devrait être dramatique: {queries_improvement:.2f}x"
)
# Le service ne doit jamais dépasser un certain nombre de requêtes (peu importe la taille)
max_service_queries = comparison['service']['max_queries']
assert max_service_queries <= 5, (
f"Le service optimisé ne devrait jamais dépasser 5 requêtes, trouvé: {max_service_queries}"
)
# Les résultats doivent être identiques
legacy_result = assessment._grading_progress_legacy()
service_result = assessment._grading_progress_with_service()
assert legacy_result == service_result
def test_scalability_analysis(self, app):
"""
ANALYSE : Teste la scalabilité avec différentes tailles de datasets.
"""
dataset_configs = [
(2, 2, 1), # Petit : 2 étudiants, 2 exercices, 1 élément/ex
(5, 3, 2), # Moyen : 5 étudiants, 3 exercices, 2 éléments/ex
(8, 4, 2), # Grand : 8 étudiants, 4 exercices, 2 éléments/ex
]
scalability_results = []
for students_count, exercises_count, elements_per_exercise in dataset_configs:
assessment = self._create_assessment_with_data(
students_count, exercises_count, elements_per_exercise
)
comparison = PerformanceBenchmark.compare_implementations(assessment, iterations=3)
total_elements = exercises_count * elements_per_exercise
total_grades = students_count * total_elements
scalability_results.append({
'dataset_size': f"{students_count}s-{exercises_count}e-{total_elements}el",
'total_grades': total_grades,
'legacy_queries': comparison['legacy']['avg_queries'],
'service_queries': comparison['service']['avg_queries'],
'queries_ratio': comparison['improvement']['queries_ratio'],
'time_ratio': comparison['improvement']['time_ratio']
})
print(f"\n=== SCALABILITY ANALYSIS ===")
for result in scalability_results:
print(f"Dataset {result['dataset_size']}: "
f"Legacy={result['legacy_queries']:.1f}q, "
f"Service={result['service_queries']:.1f}q, "
f"Improvement={result['queries_ratio']:.1f}x queries")
# Le service doit avoir une complexité constante ou sous-linéaire
service_queries = [r['service_queries'] for r in scalability_results]
legacy_queries = [r['legacy_queries'] for r in scalability_results]
# Les requêtes du service ne doivent pas croître linéairement
service_growth = service_queries[-1] / service_queries[0] if service_queries[0] > 0 else 1
legacy_growth = legacy_queries[-1] / legacy_queries[0] if legacy_queries[0] > 0 else 1
print(f"Service queries growth: {service_growth:.2f}x")
print(f"Legacy queries growth: {legacy_growth:.2f}x")
assert service_growth < legacy_growth, (
f"Le service doit avoir une croissance plus lente que legacy: {service_growth:.2f} vs {legacy_growth:.2f}"
)
def test_query_patterns_analysis(self, app):
"""
ANALYSE : Analyse des patterns de requêtes pour comprendre les optimisations.
"""
assessment = self._create_assessment_with_data(
students_count=3,
exercises_count=2,
elements_per_exercise=2
)
counter = QueryCounter()
# Analyse des requêtes legacy
with counter.measure():
assessment._grading_progress_legacy()
legacy_queries = counter.queries.copy()
# Analyse des requêtes service
with counter.measure():
assessment._grading_progress_with_service()
service_queries = counter.queries.copy()
print(f"\n=== QUERY PATTERNS ANALYSIS ===")
print(f"Legacy executed {len(legacy_queries)} queries:")
for i, query in enumerate(legacy_queries[:5]): # Montrer les 5 premières
print(f" {i+1}: {query['statement'][:100]}...")
print(f"\nService executed {len(service_queries)} queries:")
for i, query in enumerate(service_queries):
print(f" {i+1}: {query['statement'][:100]}...")
# Le service ne doit pas avoir de requêtes dans des boucles
# (heuristique : pas de requêtes identiques répétées)
legacy_statements = [q['statement'] for q in legacy_queries]
service_statements = [q['statement'] for q in service_queries]
legacy_duplicates = len(legacy_statements) - len(set(legacy_statements))
service_duplicates = len(service_statements) - len(set(service_statements))
print(f"Legacy duplicate queries: {legacy_duplicates}")
print(f"Service duplicate queries: {service_duplicates}")
# Le service doit avoir moins de requêtes dupliquées (moins de boucles)
assert service_duplicates < legacy_duplicates, (
f"Service devrait avoir moins de requêtes dupliquées: {service_duplicates} vs {legacy_duplicates}"
)
def _create_assessment_with_data(self, students_count: int, exercises_count: int, elements_per_exercise: int) -> Assessment:
"""
Helper pour créer un assessment avec des données de test.
Args:
students_count: Nombre d'étudiants
exercises_count: Nombre d'exercices
elements_per_exercise: Nombre d'éléments de notation par exercice
Returns:
Assessment créé avec toutes les données associées
"""
# Créer la classe et les étudiants
class_group = ClassGroup(name=f'Perf Test Class {students_count}', year='2025')
students = [
Student(
first_name=f'Student{i}',
last_name=f'Test{i}',
class_group=class_group
)
for i in range(students_count)
]
# Créer l'assessment
assessment = Assessment(
title=f'Performance Test {students_count}s-{exercises_count}e',
date=date.today(),
trimester=1,
class_group=class_group
)
db.session.add_all([class_group, assessment, *students])
db.session.commit()
# Créer les exercices et éléments
exercises = []
elements = []
grades = []
for ex_idx in range(exercises_count):
exercise = Exercise(
title=f'Exercise {ex_idx+1}',
assessment=assessment,
order=ex_idx+1
)
exercises.append(exercise)
for elem_idx in range(elements_per_exercise):
element = GradingElement(
label=f'Question {ex_idx+1}.{elem_idx+1}',
max_points=10,
grading_type='notes',
exercise=exercise
)
elements.append(element)
db.session.add_all(exercises + elements)
db.session.commit()
# Créer des notes partielles (environ 70% de completion)
grade_probability = 0.7
for student in students:
for element in elements:
# Probabilité de 70% d'avoir une note
import random
if random.random() < grade_probability:
grade = Grade(
student=student,
grading_element=element,
value=str(random.randint(5, 10)) # Note entre 5 et 10
)
grades.append(grade)
db.session.add_all(grades)
db.session.commit()
return assessment
def test_memory_usage_comparison(self, app):
"""
MÉMOIRE : Comparer l'utilisation mémoire entre les deux implémentations.
"""
import tracemalloc
assessment = self._create_assessment_with_data(
students_count=8,
exercises_count=4,
elements_per_exercise=3
)
# Mesure mémoire legacy
tracemalloc.start()
legacy_result = assessment._grading_progress_legacy()
_, legacy_peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
# Mesure mémoire service
tracemalloc.start()
service_result = assessment._grading_progress_with_service()
_, service_peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
print(f"\n=== MEMORY USAGE COMPARISON ===")
print(f"Legacy peak memory: {legacy_peak / 1024:.1f} KB")
print(f"Service peak memory: {service_peak / 1024:.1f} KB")
print(f"Memory improvement: {legacy_peak / service_peak:.2f}x")
# Les résultats doivent être identiques
assert legacy_result == service_result
# Note: Il est difficile de garantir que le service utilise moins de mémoire
# car la différence peut être minime et influencée par d'autres facteurs.
# On vérifie juste que l'utilisation reste raisonnable.
assert service_peak < 1024 * 1024, "L'utilisation mémoire ne devrait pas dépasser 1MB"

View File

@@ -179,7 +179,7 @@ class TestClassesRoutes:
"""Test class details route with non-existent class uses repository correctly"""
with app.app_context():
with patch('routes.classes.ClassRepository') as mock_repo_class:
with patch('flask.abort') as mock_abort:
with patch('routes.classes.abort') as mock_abort:
mock_repo = MagicMock()
mock_repo_class.return_value = mock_repo
mock_repo.find_with_full_details.return_value = None

View File

@@ -1,453 +0,0 @@
"""
Benchmark détaillé pour valider la migration get_assessment_statistics().
Vérifie les performances et l'exactitude de la migration étape 3.2.
"""
import pytest
import time
from datetime import date
from models import Assessment, ClassGroup, Student, Exercise, GradingElement, Grade, db
from config.feature_flags import FeatureFlag
from app_config import config_manager
class TestAssessmentStatisticsMigrationBenchmark:
"""Benchmark avancé de la migration des statistiques."""
def test_statistics_migration_correctness_complex_scenario(self, app):
"""
Test de validation avec un scénario complexe réaliste :
- Évaluation avec 3 exercices
- Mix de types de notation (notes et scores)
- 15 étudiants avec scores variés et cas spéciaux
"""
with app.app_context():
# Créer des données de test réalistes
assessment = self._create_realistic_assessment()
# Test avec flag OFF (legacy)
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
start_time = time.perf_counter()
legacy_stats = assessment.get_assessment_statistics()
legacy_duration = time.perf_counter() - start_time
# Test avec flag ON (refactored)
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
try:
start_time = time.perf_counter()
refactored_stats = assessment.get_assessment_statistics()
refactored_duration = time.perf_counter() - start_time
# Vérifications exactes
print(f"\n📊 Statistiques complexes:")
print(f" Legacy: {legacy_stats}")
print(f" Refactored: {refactored_stats}")
print(f"⏱️ Performance:")
print(f" Legacy: {legacy_duration:.6f}s")
print(f" Refactored: {refactored_duration:.6f}s")
print(f" Ratio: {refactored_duration/legacy_duration:.2f}x")
# Les résultats doivent être exactement identiques
assert legacy_stats == refactored_stats, (
f"Mismatch detected!\nLegacy: {legacy_stats}\nRefactored: {refactored_stats}"
)
# Les statistiques doivent être cohérentes
assert legacy_stats['count'] == 15 # 15 étudiants
assert legacy_stats['mean'] > 0
assert legacy_stats['min'] <= legacy_stats['mean'] <= legacy_stats['max']
assert legacy_stats['std_dev'] >= 0
# Le refactored ne doit pas être plus de 3x plus lent
assert refactored_duration <= legacy_duration * 3, (
f"Performance regression! Refactored: {refactored_duration:.6f}s vs Legacy: {legacy_duration:.6f}s"
)
finally:
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
def test_statistics_edge_cases_consistency(self, app):
"""Test des cas limites pour s'assurer de la cohérence."""
with app.app_context():
test_cases = [
self._create_assessment_all_zeros(), # Toutes les notes à 0
self._create_assessment_all_max(), # Toutes les notes maximales
self._create_assessment_single_student(), # Un seul étudiant
self._create_assessment_all_dispensed(), # Tous dispensés
]
for i, assessment in enumerate(test_cases):
print(f"\n🧪 Test case {i+1}: {assessment.title}")
# Test legacy
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
legacy_stats = assessment.get_assessment_statistics()
# Test refactored
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
try:
refactored_stats = assessment.get_assessment_statistics()
print(f" Legacy: {legacy_stats}")
print(f" Refactored: {refactored_stats}")
# Vérification exacte
assert legacy_stats == refactored_stats, (
f"Case {i+1} failed: Legacy={legacy_stats}, Refactored={refactored_stats}"
)
finally:
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
def test_statistics_performance_scaling(self, app):
"""Test de performance avec différentes tailles d'évaluations."""
with app.app_context():
sizes = [5, 10, 25] # Différentes tailles d'évaluations
for size in sizes:
print(f"\n⚡ Test performance avec {size} étudiants")
assessment = self._create_assessment_with_n_students(size)
# Mesures de performance
legacy_times = []
refactored_times = []
# 3 mesures pour chaque version
for _ in range(3):
# Legacy
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
start = time.perf_counter()
legacy_stats = assessment.get_assessment_statistics()
legacy_times.append(time.perf_counter() - start)
# Refactored
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
start = time.perf_counter()
refactored_stats = assessment.get_assessment_statistics()
refactored_times.append(time.perf_counter() - start)
# Les résultats doivent toujours être identiques
assert legacy_stats == refactored_stats
# Moyenne des temps
avg_legacy = sum(legacy_times) / len(legacy_times)
avg_refactored = sum(refactored_times) / len(refactored_times)
print(f" Legacy moyen: {avg_legacy:.6f}s")
print(f" Refactored moyen: {avg_refactored:.6f}s")
print(f" Amélioration: {avg_legacy/avg_refactored:.2f}x")
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
# === Méthodes utilitaires de création de données ===
def _create_realistic_assessment(self):
"""Crée une évaluation complexe réaliste."""
# Classe avec 15 étudiants
class_group = ClassGroup(name="6ème A", year="2025-2026")
db.session.add(class_group)
db.session.flush()
students = []
for i in range(15):
student = Student(
first_name=f"Étudiant{i+1}",
last_name=f"Test{i+1}",
class_group_id=class_group.id
)
students.append(student)
db.session.add(student)
db.session.flush()
# Évaluation
assessment = Assessment(
title="Contrôle Complexe",
description="Évaluation avec différents types de notation",
date=date(2025, 1, 15),
class_group_id=class_group.id,
trimester=2,
coefficient=2.0
)
db.session.add(assessment)
db.session.flush()
# Exercice 1 : Questions à points
ex1 = Exercise(title="Calculs", assessment_id=assessment.id)
db.session.add(ex1)
db.session.flush()
elem1 = GradingElement(
label="Question 1a",
exercise_id=ex1.id,
max_points=8,
grading_type="notes"
)
db.session.add(elem1)
db.session.flush()
elem2 = GradingElement(
label="Question 1b",
exercise_id=ex1.id,
max_points=12,
grading_type="notes"
)
db.session.add(elem2)
db.session.flush()
# Exercice 2 : Compétences
ex2 = Exercise(title="Raisonnement", assessment_id=assessment.id)
db.session.add(ex2)
db.session.flush()
elem3 = GradingElement(
label="Raisonner",
exercise_id=ex2.id,
max_points=3,
grading_type="score"
)
db.session.add(elem3)
db.session.flush()
elem4 = GradingElement(
label="Communiquer",
exercise_id=ex2.id,
max_points=3,
grading_type="score"
)
db.session.add(elem4)
db.session.flush()
# Notes variées avec distribution réaliste
grades_to_add = []
import random
for i, student in enumerate(students):
# Question 1a : distribution normale autour de 6/8
score1a = max(0, min(8, random.gauss(6, 1.5)))
grades_to_add.append(Grade(student_id=student.id, grading_element_id=elem1.id, value=str(round(score1a, 1))))
# Question 1b : distribution normale autour de 9/12
score1b = max(0, min(12, random.gauss(9, 2)))
grades_to_add.append(Grade(student_id=student.id, grading_element_id=elem2.id, value=str(round(score1b, 1))))
# Compétences : distribution vers les niveaux moyens-élevés
comp1 = random.choices([0, 1, 2, 3], weights=[1, 2, 4, 3])[0]
comp2 = random.choices([0, 1, 2, 3], weights=[1, 3, 3, 2])[0]
# Quelques cas spéciaux
if i == 0: # Premier étudiant absent
grades_to_add.append(Grade(student_id=student.id, grading_element_id=elem3.id, value="."))
grades_to_add.append(Grade(student_id=student.id, grading_element_id=elem4.id, value="."))
elif i == 1: # Deuxième étudiant dispensé
grades_to_add.append(Grade(student_id=student.id, grading_element_id=elem3.id, value="d"))
grades_to_add.append(Grade(student_id=student.id, grading_element_id=elem4.id, value=str(comp2)))
else: # Notes normales
grades_to_add.append(Grade(student_id=student.id, grading_element_id=elem3.id, value=str(comp1)))
grades_to_add.append(Grade(student_id=student.id, grading_element_id=elem4.id, value=str(comp2)))
# Ajouter toutes les notes en une fois
for grade in grades_to_add:
db.session.add(grade)
db.session.commit()
return assessment
def _create_assessment_all_zeros(self):
"""Évaluation avec toutes les notes à 0."""
class_group = ClassGroup(name="Test Zeros", year="2025-2026")
db.session.add(class_group)
db.session.flush()
students = [Student(first_name=f"S{i}", last_name="Zero", class_group_id=class_group.id)
for i in range(5)]
for s in students: db.session.add(s)
db.session.flush()
assessment = Assessment(
title="All Zeros Test",
date=date(2025, 1, 15),
class_group_id=class_group.id,
trimester=1
)
db.session.add(assessment)
db.session.flush()
ex = Exercise(title="Ex1", assessment_id=assessment.id)
db.session.add(ex)
db.session.flush()
elem = GradingElement(
label="Q1", exercise_id=ex.id, max_points=20, grading_type="notes"
)
db.session.add(elem)
db.session.flush()
for student in students:
grade = Grade(student_id=student.id, grading_element_id=elem.id, value="0")
db.session.add(grade)
db.session.commit()
return assessment
def _create_assessment_all_max(self):
"""Évaluation avec toutes les notes maximales."""
class_group = ClassGroup(name="Test Max", year="2025-2026")
db.session.add(class_group)
db.session.flush()
students = [Student(first_name=f"S{i}", last_name="Max", class_group_id=class_group.id)
for i in range(5)]
for s in students: db.session.add(s)
db.session.flush()
assessment = Assessment(
title="All Max Test",
date=date(2025, 1, 15),
class_group_id=class_group.id,
trimester=1
)
db.session.add(assessment)
db.session.flush()
ex = Exercise(title="Ex1", assessment_id=assessment.id)
db.session.add(ex)
db.session.flush()
elem1 = GradingElement(
label="Q1", exercise_id=ex.id, max_points=20, grading_type="notes"
)
elem2 = GradingElement(
label="C1", exercise_id=ex.id, max_points=3, grading_type="score"
)
db.session.add_all([elem1, elem2])
db.session.flush()
for student in students:
grade1 = Grade(student_id=student.id, grading_element_id=elem1.id, value="20")
grade2 = Grade(student_id=student.id, grading_element_id=elem2.id, value="3")
db.session.add_all([grade1, grade2])
db.session.commit()
return assessment
def _create_assessment_single_student(self):
"""Évaluation avec un seul étudiant."""
class_group = ClassGroup(name="Test Single", year="2025-2026")
db.session.add(class_group)
db.session.flush()
student = Student(first_name="Solo", last_name="Student", class_group_id=class_group.id)
db.session.add(student)
db.session.flush()
assessment = Assessment(
title="Single Student Test",
date=date(2025, 1, 15),
class_group_id=class_group.id,
trimester=1
)
db.session.add(assessment)
db.session.flush()
ex = Exercise(title="Ex1", assessment_id=assessment.id)
db.session.add(ex)
db.session.flush()
elem = GradingElement(
label="Q1", exercise_id=ex.id, max_points=10, grading_type="notes"
)
db.session.add(elem)
db.session.flush()
grade = Grade(student_id=student.id, grading_element_id=elem.id, value="7.5")
db.session.add(grade)
db.session.commit()
return assessment
def _create_assessment_all_dispensed(self):
"""Évaluation avec tous les étudiants dispensés."""
class_group = ClassGroup(name="Test Dispensed", year="2025-2026")
db.session.add(class_group)
db.session.flush()
students = [Student(first_name=f"S{i}", last_name="Dispensed", class_group_id=class_group.id)
for i in range(3)]
for s in students: db.session.add(s)
db.session.flush()
assessment = Assessment(
title="All Dispensed Test",
date=date(2025, 1, 15),
class_group_id=class_group.id,
trimester=1
)
db.session.add(assessment)
db.session.flush()
ex = Exercise(title="Ex1", assessment_id=assessment.id)
db.session.add(ex)
db.session.flush()
elem = GradingElement(
label="Q1", exercise_id=ex.id, max_points=15, grading_type="notes"
)
db.session.add(elem)
db.session.flush()
for student in students:
grade = Grade(student_id=student.id, grading_element_id=elem.id, value="d")
db.session.add(grade)
db.session.commit()
return assessment
def _create_assessment_with_n_students(self, n):
"""Crée une évaluation avec n étudiants."""
class_group = ClassGroup(name=f"Test {n}S", year="2025-2026")
db.session.add(class_group)
db.session.flush()
students = []
for i in range(n):
student = Student(first_name=f"S{i}", last_name=f"Test{i}", class_group_id=class_group.id)
students.append(student)
db.session.add(student)
db.session.flush()
assessment = Assessment(
title=f"Performance Test {n}",
date=date(2025, 1, 15),
class_group_id=class_group.id,
trimester=1
)
db.session.add(assessment)
db.session.flush()
# 2 exercices avec plusieurs éléments
for ex_num in range(2):
ex = Exercise(title=f"Ex{ex_num+1}", assessment_id=assessment.id)
db.session.add(ex)
db.session.flush()
for elem_num in range(3):
elem = GradingElement(
label=f"Q{elem_num+1}",
exercise_id=ex.id,
max_points=5 + elem_num * 2,
grading_type="notes"
)
db.session.add(elem)
db.session.flush()
# Notes aléatoires pour tous les étudiants
import random
for student in students:
score = random.uniform(0.5, elem.max_points)
grade = Grade(
student_id=student.id,
grading_element_id=elem.id,
value=str(round(score, 1))
)
db.session.add(grade)
db.session.commit()
return assessment

View File

@@ -1,105 +0,0 @@
"""
Tests pour valider la migration du StudentScoreCalculator.
Vérifie la compatibilité totale entre version legacy et optimisée.
"""
import pytest
from datetime import date
from app_config import config_manager
from config.feature_flags import is_feature_enabled, FeatureFlag
from models import Assessment, ClassGroup, Student, Exercise, GradingElement, Grade, db
class TestStudentScoreCalculatorMigration:
"""Tests de migration progressive du StudentScoreCalculator."""
def test_feature_flag_toggle_compatibility(self, app):
"""Test que les deux versions (legacy/optimisée) donnent les mêmes résultats."""
with app.app_context():
# Créer des données de test dans le même contexte
class_group = ClassGroup(name="Test Class", year="2025")
db.session.add(class_group)
db.session.flush()
student1 = Student(first_name="Alice", last_name="Test", class_group_id=class_group.id)
student2 = Student(first_name="Bob", last_name="Test", class_group_id=class_group.id)
db.session.add_all([student1, student2])
db.session.flush()
assessment = Assessment(
title="Test Assessment",
date=date(2025, 1, 15),
trimester=1,
class_group_id=class_group.id
)
db.session.add(assessment)
db.session.flush()
exercise1 = Exercise(title="Exercice 1", assessment_id=assessment.id)
db.session.add(exercise1)
db.session.flush()
element1 = GradingElement(exercise_id=exercise1.id, label="Q1", grading_type="notes", max_points=10)
element2 = GradingElement(exercise_id=exercise1.id, label="Q2", grading_type="score", max_points=3)
db.session.add_all([element1, element2])
db.session.flush()
# Notes
grades = [
Grade(student_id=student1.id, grading_element_id=element1.id, value="8.5"),
Grade(student_id=student1.id, grading_element_id=element2.id, value="2"),
Grade(student_id=student2.id, grading_element_id=element1.id, value="7"),
Grade(student_id=student2.id, grading_element_id=element2.id, value="1"),
]
db.session.add_all(grades)
db.session.commit()
# Version legacy
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', False)
config_manager.save()
legacy_results = assessment.calculate_student_scores()
# Version optimisée
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
config_manager.save()
optimized_results = assessment.calculate_student_scores()
# Validation basique que les deux versions fonctionnent
assert len(legacy_results) == 2 # (students_scores, exercise_scores)
assert len(optimized_results) == 2
legacy_students, legacy_exercises = legacy_results
optimized_students, optimized_exercises = optimized_results
# Même nombre d'étudiants
assert len(legacy_students) == len(optimized_students) == 2
print("Legacy results:", legacy_students.keys())
print("Optimized results:", optimized_students.keys())
def test_optimized_version_performance(self, app):
"""Test que la version optimisée utilise moins de requêtes SQL."""
with app.app_context():
# Créer données basiques
class_group = ClassGroup(name="Test Class", year="2025")
db.session.add(class_group)
db.session.flush()
assessment = Assessment(
title="Test Assessment",
date=date(2025, 1, 15),
trimester=1,
class_group_id=class_group.id
)
db.session.add(assessment)
db.session.commit()
# Activer la version optimisée
config_manager.set('feature_flags.USE_REFACTORED_ASSESSMENT', True)
config_manager.save()
results = assessment.calculate_student_scores()
# Vérification basique que ça fonctionne
students_scores, exercise_scores = results
assert len(students_scores) >= 0 # Peut être vide
assert len(exercise_scores) >= 0