feat: improve assessments filters and cleaning
This commit is contained in:
		
							
								
								
									
										18
									
								
								CLAUDE.md
									
									
									
									
									
								
							
							
						
						
									
										18
									
								
								CLAUDE.md
									
									
									
									
									
								
							| @@ -62,6 +62,24 @@ Grade (Note attribuée à chaque élève) | |||||||
| - **Règles de calcul** : Logique unifiée pour tous les types de notation | - **Règles de calcul** : Logique unifiée pour tous les types de notation | ||||||
| - **Interface d'administration** : Gestion complète des paramètres de notation | - **Interface d'administration** : Gestion complète des paramètres de notation | ||||||
|  |  | ||||||
|  | ### **Filtrage Avancé des Évaluations** | ||||||
|  |  | ||||||
|  | **Filtres Dynamiques Disponibles :** | ||||||
|  |  | ||||||
|  | - **Trimestre** : Filtrage par trimestre (1, 2, 3) pour organiser par période scolaire | ||||||
|  | - **Classe** : Filtrage par groupe de classe pour se concentrer sur une classe spécifique | ||||||
|  | - **Statut de Correction** : **NOUVEAU** - Filtre essentiel pour la gestion des corrections : | ||||||
|  |   - **Non terminées** : Évaluations partiellement corrigées ou non commencées (idéal pour mode midyear) | ||||||
|  |   - **Terminées** : Évaluations 100% corrigées | ||||||
|  |   - **Non commencées** : Évaluations sans aucune note saisie | ||||||
|  | - **Tri** : Organisation par date (récent/ancien), titre alphabétique, ou classe | ||||||
|  |  | ||||||
|  | **JavaScript Dynamique :** | ||||||
|  |  | ||||||
|  | - **Filtrage temps réel** : Les filtres s'appliquent automatiquement au changement | ||||||
|  | - **Persistence des filtres** : État maintenu dans l'URL pour navigation intuitive | ||||||
|  | - **Interface responsive** : Adaptée aux appareils mobiles et desktop | ||||||
|  |  | ||||||
| ### **Interface Utilisateur & UX Moderne (Phase 2 - Décembre 2024)** | ### **Interface Utilisateur & UX Moderne (Phase 2 - Décembre 2024)** | ||||||
|  |  | ||||||
| - **Dashboard avec statistiques en temps réel** : Cartes cliquables avec animations et gradients | - **Dashboard avec statistiques en temps réel** : Cartes cliquables avec animations et gradients | ||||||
|   | |||||||
| @@ -1,421 +0,0 @@ | |||||||
| """ |  | ||||||
| Services découplés pour les opérations métier sur les évaluations. |  | ||||||
|  |  | ||||||
| Ce module applique les principes SOLID en séparant les responsabilités |  | ||||||
| de calcul, statistiques et progression qui étaient auparavant dans le modèle Assessment. |  | ||||||
| """ |  | ||||||
| from abc import ABC, abstractmethod |  | ||||||
| from typing import Dict, Any, List, Optional, Tuple, Protocol |  | ||||||
| from dataclasses import dataclass |  | ||||||
| from collections import defaultdict |  | ||||||
| import statistics |  | ||||||
| import math |  | ||||||
|  |  | ||||||
| # Type hints pour améliorer la lisibilité |  | ||||||
| StudentId = int |  | ||||||
| ExerciseId = int |  | ||||||
| GradingElementId = int |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== INTERFACES (Dependency Inversion Principle) =================== |  | ||||||
|  |  | ||||||
| class ConfigProvider(Protocol): |  | ||||||
|     """Interface pour l'accès à la configuration.""" |  | ||||||
|      |  | ||||||
|     def is_special_value(self, value: str) -> bool: |  | ||||||
|         """Vérifie si une valeur est spéciale (., d, etc.)""" |  | ||||||
|         ... |  | ||||||
|      |  | ||||||
|     def get_special_values(self) -> Dict[str, Dict[str, Any]]: |  | ||||||
|         """Retourne la configuration des valeurs spéciales.""" |  | ||||||
|         ... |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DatabaseProvider(Protocol): |  | ||||||
|     """Interface pour l'accès aux données.""" |  | ||||||
|      |  | ||||||
|     def get_grades_for_assessment(self, assessment_id: int) -> List[Any]: |  | ||||||
|         """Récupère toutes les notes d'une évaluation en une seule requête.""" |  | ||||||
|         ... |  | ||||||
|      |  | ||||||
|     def get_grading_elements_with_students(self, assessment_id: int) -> List[Any]: |  | ||||||
|         """Récupère les éléments de notation avec les étudiants associés.""" |  | ||||||
|         ... |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== DATA TRANSFER OBJECTS =================== |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class ProgressResult: |  | ||||||
|     """Résultat du calcul de progression.""" |  | ||||||
|     percentage: int |  | ||||||
|     completed: int |  | ||||||
|     total: int |  | ||||||
|     status: str |  | ||||||
|     students_count: int |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class StudentScore: |  | ||||||
|     """Score d'un étudiant pour une évaluation.""" |  | ||||||
|     student_id: int |  | ||||||
|     student_name: str |  | ||||||
|     total_score: float |  | ||||||
|     total_max_points: float |  | ||||||
|     exercises: Dict[ExerciseId, Dict[str, Any]] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class StatisticsResult: |  | ||||||
|     """Résultat des calculs statistiques.""" |  | ||||||
|     count: int |  | ||||||
|     mean: float |  | ||||||
|     median: float |  | ||||||
|     min: float |  | ||||||
|     max: float |  | ||||||
|     std_dev: float |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== STRATEGY PATTERN pour les types de notation =================== |  | ||||||
|  |  | ||||||
| class GradingStrategy(ABC): |  | ||||||
|     """Interface Strategy pour les différents types de notation.""" |  | ||||||
|      |  | ||||||
|     @abstractmethod |  | ||||||
|     def calculate_score(self, grade_value: str, max_points: float) -> Optional[float]: |  | ||||||
|         """Calcule le score selon le type de notation.""" |  | ||||||
|         pass |  | ||||||
|      |  | ||||||
|     @abstractmethod |  | ||||||
|     def get_grading_type(self) -> str: |  | ||||||
|         """Retourne le type de notation.""" |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class NotesStrategy(GradingStrategy): |  | ||||||
|     """Strategy pour la notation en points (notes).""" |  | ||||||
|      |  | ||||||
|     def calculate_score(self, grade_value: str, max_points: float) -> Optional[float]: |  | ||||||
|         try: |  | ||||||
|             return float(grade_value) |  | ||||||
|         except (ValueError, TypeError): |  | ||||||
|             return 0.0 |  | ||||||
|      |  | ||||||
|     def get_grading_type(self) -> str: |  | ||||||
|         return 'notes' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ScoreStrategy(GradingStrategy): |  | ||||||
|     """Strategy pour la notation par compétences (score 0-3).""" |  | ||||||
|      |  | ||||||
|     def calculate_score(self, grade_value: str, max_points: float) -> Optional[float]: |  | ||||||
|         try: |  | ||||||
|             score_int = int(grade_value) |  | ||||||
|             if 0 <= score_int <= 3: |  | ||||||
|                 return (score_int / 3) * max_points |  | ||||||
|             return 0.0 |  | ||||||
|         except (ValueError, TypeError): |  | ||||||
|             return 0.0 |  | ||||||
|      |  | ||||||
|     def get_grading_type(self) -> str: |  | ||||||
|         return 'score' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class GradingStrategyFactory: |  | ||||||
|     """Factory pour créer les strategies de notation.""" |  | ||||||
|      |  | ||||||
|     _strategies = { |  | ||||||
|         'notes': NotesStrategy, |  | ||||||
|         'score': ScoreStrategy |  | ||||||
|     } |  | ||||||
|      |  | ||||||
|     @classmethod |  | ||||||
|     def create(cls, grading_type: str) -> GradingStrategy: |  | ||||||
|         """Crée une strategy selon le type.""" |  | ||||||
|         strategy_class = cls._strategies.get(grading_type) |  | ||||||
|         if not strategy_class: |  | ||||||
|             raise ValueError(f"Type de notation non supporté: {grading_type}") |  | ||||||
|         return strategy_class() |  | ||||||
|      |  | ||||||
|     @classmethod |  | ||||||
|     def register_strategy(cls, grading_type: str, strategy_class: type): |  | ||||||
|         """Permet d'enregistrer de nouveaux types de notation.""" |  | ||||||
|         cls._strategies[grading_type] = strategy_class |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== SERVICES MÉTIER =================== |  | ||||||
|  |  | ||||||
| class UnifiedGradingCalculator: |  | ||||||
|     """ |  | ||||||
|     Calculateur unifié utilisant le pattern Strategy et l'injection de dépendances. |  | ||||||
|     Remplace la classe GradingCalculator du modèle. |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     def __init__(self, config_provider: ConfigProvider): |  | ||||||
|         self.config_provider = config_provider |  | ||||||
|         self._strategies = {} |  | ||||||
|      |  | ||||||
|     def calculate_score(self, grade_value: str, grading_type: str, max_points: float) -> Optional[float]: |  | ||||||
|         """ |  | ||||||
|         Point d'entrée unifié pour tous les calculs de score. |  | ||||||
|         Utilise l'injection de dépendances pour éviter les imports circulaires. |  | ||||||
|         """ |  | ||||||
|         # Valeurs spéciales en premier |  | ||||||
|         if self.config_provider.is_special_value(grade_value): |  | ||||||
|             special_config = self.config_provider.get_special_values()[grade_value] |  | ||||||
|             special_value = special_config['value'] |  | ||||||
|             if special_value is None:  # Dispensé |  | ||||||
|                 return None |  | ||||||
|             return float(special_value)  # 0 pour '.', etc. |  | ||||||
|          |  | ||||||
|         # Utilisation du pattern Strategy |  | ||||||
|         strategy = GradingStrategyFactory.create(grading_type) |  | ||||||
|         return strategy.calculate_score(grade_value, max_points) |  | ||||||
|      |  | ||||||
|     def is_counted_in_total(self, grade_value: str) -> bool: |  | ||||||
|         """Détermine si une note doit être comptée dans le total.""" |  | ||||||
|         if self.config_provider.is_special_value(grade_value): |  | ||||||
|             special_config = self.config_provider.get_special_values()[grade_value] |  | ||||||
|             return special_config['counts'] |  | ||||||
|         return True |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AssessmentProgressService: |  | ||||||
|     """ |  | ||||||
|     Service dédié au calcul de progression des notes. |  | ||||||
|     Single Responsibility: calcul et formatage de la progression. |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     def __init__(self, db_provider: DatabaseProvider): |  | ||||||
|         self.db_provider = db_provider |  | ||||||
|      |  | ||||||
|     def calculate_grading_progress(self, assessment) -> ProgressResult: |  | ||||||
|         """ |  | ||||||
|         Calcule la progression de saisie des notes pour une évaluation. |  | ||||||
|         Optimisé pour éviter les requêtes N+1. |  | ||||||
|         """ |  | ||||||
|         total_students = len(assessment.class_group.students) |  | ||||||
|          |  | ||||||
|         if total_students == 0: |  | ||||||
|             return ProgressResult( |  | ||||||
|                 percentage=0, |  | ||||||
|                 completed=0, |  | ||||||
|                 total=0, |  | ||||||
|                 status='no_students', |  | ||||||
|                 students_count=0 |  | ||||||
|             ) |  | ||||||
|          |  | ||||||
|         # Requête optimisée : récupération en une seule fois |  | ||||||
|         grading_elements_data = self.db_provider.get_grading_elements_with_students(assessment.id) |  | ||||||
|          |  | ||||||
|         total_elements = 0 |  | ||||||
|         completed_elements = 0 |  | ||||||
|          |  | ||||||
|         for element_data in grading_elements_data: |  | ||||||
|             total_elements += total_students |  | ||||||
|             completed_elements += element_data['completed_grades_count'] |  | ||||||
|          |  | ||||||
|         if total_elements == 0: |  | ||||||
|             return ProgressResult( |  | ||||||
|                 percentage=0, |  | ||||||
|                 completed=0, |  | ||||||
|                 total=0, |  | ||||||
|                 status='no_elements', |  | ||||||
|                 students_count=total_students |  | ||||||
|             ) |  | ||||||
|          |  | ||||||
|         percentage = round((completed_elements / total_elements) * 100) |  | ||||||
|          |  | ||||||
|         # Détermination du statut |  | ||||||
|         status = self._determine_status(percentage) |  | ||||||
|          |  | ||||||
|         return ProgressResult( |  | ||||||
|             percentage=percentage, |  | ||||||
|             completed=completed_elements, |  | ||||||
|             total=total_elements, |  | ||||||
|             status=status, |  | ||||||
|             students_count=total_students |  | ||||||
|         ) |  | ||||||
|      |  | ||||||
|     def _determine_status(self, percentage: int) -> str: |  | ||||||
|         """Détermine le statut basé sur le pourcentage.""" |  | ||||||
|         if percentage == 0: |  | ||||||
|             return 'not_started' |  | ||||||
|         elif percentage == 100: |  | ||||||
|             return 'completed' |  | ||||||
|         else: |  | ||||||
|             return 'in_progress' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class StudentScoreCalculator: |  | ||||||
|     """ |  | ||||||
|     Service dédié au calcul des scores des étudiants. |  | ||||||
|     Single Responsibility: calculs de notes avec logique métier. |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     def __init__(self,  |  | ||||||
|                  grading_calculator: UnifiedGradingCalculator, |  | ||||||
|                  db_provider: DatabaseProvider): |  | ||||||
|         self.grading_calculator = grading_calculator |  | ||||||
|         self.db_provider = db_provider |  | ||||||
|      |  | ||||||
|     def calculate_student_scores(self, assessment) -> Tuple[Dict[StudentId, StudentScore], Dict[ExerciseId, Dict[StudentId, float]]]: |  | ||||||
|         """ |  | ||||||
|         Calcule les scores de tous les étudiants pour une évaluation. |  | ||||||
|         Optimisé avec requête unique pour éviter N+1. |  | ||||||
|         """ |  | ||||||
|         # Requête optimisée : toutes les notes en une fois |  | ||||||
|         grades_data = self.db_provider.get_grades_for_assessment(assessment.id) |  | ||||||
|          |  | ||||||
|         # Organisation des données par étudiant et exercice |  | ||||||
|         students_scores = {} |  | ||||||
|         exercise_scores = defaultdict(lambda: defaultdict(float)) |  | ||||||
|          |  | ||||||
|         # Calcul pour chaque étudiant |  | ||||||
|         for student in assessment.class_group.students: |  | ||||||
|             student_score = self._calculate_single_student_score( |  | ||||||
|                 student, assessment, grades_data |  | ||||||
|             ) |  | ||||||
|             students_scores[student.id] = student_score |  | ||||||
|              |  | ||||||
|             # Mise à jour des scores par exercice |  | ||||||
|             for exercise_id, exercise_data in student_score.exercises.items(): |  | ||||||
|                 exercise_scores[exercise_id][student.id] = exercise_data['score'] |  | ||||||
|          |  | ||||||
|         return students_scores, dict(exercise_scores) |  | ||||||
|      |  | ||||||
|     def _calculate_single_student_score(self, student, assessment, grades_data) -> StudentScore: |  | ||||||
|         """Calcule le score d'un seul étudiant.""" |  | ||||||
|         total_score = 0 |  | ||||||
|         total_max_points = 0 |  | ||||||
|         student_exercises = {} |  | ||||||
|          |  | ||||||
|         # Filtrage des notes pour cet étudiant |  | ||||||
|         student_grades = { |  | ||||||
|             grade['grading_element_id']: grade  |  | ||||||
|             for grade in grades_data  |  | ||||||
|             if grade['student_id'] == student.id |  | ||||||
|         } |  | ||||||
|          |  | ||||||
|         for exercise in assessment.exercises: |  | ||||||
|             exercise_result = self._calculate_exercise_score( |  | ||||||
|                 exercise, student_grades |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|             student_exercises[exercise.id] = exercise_result |  | ||||||
|             total_score += exercise_result['score'] |  | ||||||
|             total_max_points += exercise_result['max_points'] |  | ||||||
|          |  | ||||||
|         return StudentScore( |  | ||||||
|             student_id=student.id, |  | ||||||
|             student_name=f"{student.first_name} {student.last_name}", |  | ||||||
|             total_score=round(total_score, 2), |  | ||||||
|             total_max_points=total_max_points, |  | ||||||
|             exercises=student_exercises |  | ||||||
|         ) |  | ||||||
|      |  | ||||||
|     def _calculate_exercise_score(self, exercise, student_grades) -> Dict[str, Any]: |  | ||||||
|         """Calcule le score pour un exercice spécifique.""" |  | ||||||
|         exercise_score = 0 |  | ||||||
|         exercise_max_points = 0 |  | ||||||
|          |  | ||||||
|         for element in exercise.grading_elements: |  | ||||||
|             grade_data = student_grades.get(element.id) |  | ||||||
|              |  | ||||||
|             if grade_data and grade_data['value'] and grade_data['value'] != '': |  | ||||||
|                 calculated_score = self.grading_calculator.calculate_score( |  | ||||||
|                     grade_data['value'].strip(), |  | ||||||
|                     element.grading_type, |  | ||||||
|                     element.max_points |  | ||||||
|                 ) |  | ||||||
|                  |  | ||||||
|                 if self.grading_calculator.is_counted_in_total(grade_data['value'].strip()): |  | ||||||
|                     if calculated_score is not None:  # Pas dispensé |  | ||||||
|                         exercise_score += calculated_score |  | ||||||
|                     exercise_max_points += element.max_points |  | ||||||
|          |  | ||||||
|         return { |  | ||||||
|             'score': exercise_score, |  | ||||||
|             'max_points': exercise_max_points, |  | ||||||
|             'title': exercise.title |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AssessmentStatisticsService: |  | ||||||
|     """ |  | ||||||
|     Service dédié aux calculs statistiques. |  | ||||||
|     Single Responsibility: analyses statistiques des résultats. |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     def __init__(self, score_calculator: StudentScoreCalculator): |  | ||||||
|         self.score_calculator = score_calculator |  | ||||||
|      |  | ||||||
|     def get_assessment_statistics(self, assessment) -> StatisticsResult: |  | ||||||
|         """Calcule les statistiques descriptives pour une évaluation.""" |  | ||||||
|         students_scores, _ = self.score_calculator.calculate_student_scores(assessment) |  | ||||||
|         scores = [score.total_score for score in students_scores.values()] |  | ||||||
|          |  | ||||||
|         if not scores: |  | ||||||
|             return StatisticsResult( |  | ||||||
|                 count=0, |  | ||||||
|                 mean=0, |  | ||||||
|                 median=0, |  | ||||||
|                 min=0, |  | ||||||
|                 max=0, |  | ||||||
|                 std_dev=0 |  | ||||||
|             ) |  | ||||||
|          |  | ||||||
|         return StatisticsResult( |  | ||||||
|             count=len(scores), |  | ||||||
|             mean=round(statistics.mean(scores), 2), |  | ||||||
|             median=round(statistics.median(scores), 2), |  | ||||||
|             min=min(scores), |  | ||||||
|             max=max(scores), |  | ||||||
|             std_dev=round(statistics.stdev(scores) if len(scores) > 1 else 0, 2) |  | ||||||
|         ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== FACADE pour simplifier l'utilisation =================== |  | ||||||
|  |  | ||||||
| class AssessmentServicesFacade: |  | ||||||
|     """ |  | ||||||
|     Facade qui regroupe tous les services pour faciliter l'utilisation. |  | ||||||
|     Point d'entrée unique avec injection de dépendances. |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     def __init__(self,  |  | ||||||
|                  config_provider: ConfigProvider, |  | ||||||
|                  db_provider: DatabaseProvider): |  | ||||||
|         # Création des services avec injection de dépendances |  | ||||||
|         self.grading_calculator = UnifiedGradingCalculator(config_provider) |  | ||||||
|         self.progress_service = AssessmentProgressService(db_provider) |  | ||||||
|         self.score_calculator = StudentScoreCalculator(self.grading_calculator, db_provider) |  | ||||||
|         self.statistics_service = AssessmentStatisticsService(self.score_calculator) |  | ||||||
|      |  | ||||||
|     def get_grading_progress(self, assessment) -> ProgressResult: |  | ||||||
|         """Point d'entrée pour la progression.""" |  | ||||||
|         return self.progress_service.calculate_grading_progress(assessment) |  | ||||||
|      |  | ||||||
|     def calculate_student_scores(self, assessment) -> Tuple[Dict[StudentId, StudentScore], Dict[ExerciseId, Dict[StudentId, float]]]: |  | ||||||
|         """Point d'entrée pour les scores étudiants.""" |  | ||||||
|         return self.score_calculator.calculate_student_scores(assessment) |  | ||||||
|      |  | ||||||
|     def get_statistics(self, assessment) -> StatisticsResult: |  | ||||||
|         """Point d'entrée pour les statistiques.""" |  | ||||||
|         return self.statistics_service.get_assessment_statistics(assessment) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== FACTORY FUNCTION =================== |  | ||||||
|  |  | ||||||
| def create_assessment_services() -> AssessmentServicesFacade: |  | ||||||
|     """ |  | ||||||
|     Factory function pour créer une instance configurée de AssessmentServicesFacade. |  | ||||||
|     Point d'entrée standard pour l'utilisation des services refactorisés. |  | ||||||
|     """ |  | ||||||
|     from app_config import config_manager |  | ||||||
|     from models import db |  | ||||||
|      |  | ||||||
|     config_provider = ConfigProvider(config_manager) |  | ||||||
|     db_provider = DatabaseProvider(db) |  | ||||||
|      |  | ||||||
|     return AssessmentServicesFacade(config_provider, db_provider) |  | ||||||
| @@ -1,388 +0,0 @@ | |||||||
| """ |  | ||||||
| Système de Feature Flags pour Migration Progressive (JOUR 1-2) |  | ||||||
|  |  | ||||||
| Ce module implémente un système de feature flags robust pour permettre |  | ||||||
| l'activation/désactivation contrôlée des nouvelles fonctionnalités pendant |  | ||||||
| la migration vers l'architecture refactorisée. |  | ||||||
|  |  | ||||||
| Architecture: |  | ||||||
| - Enum typé pour toutes les feature flags |  | ||||||
| - Configuration centralisée avec validation |  | ||||||
| - Support pour rollback instantané |  | ||||||
| - Logging automatique des changements d'état |  | ||||||
|  |  | ||||||
| Utilisé pour la migration progressive selon MIGRATION_PROGRESSIVE.md |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| from enum import Enum |  | ||||||
| from typing import Dict, Any, Optional |  | ||||||
| from dataclasses import dataclass |  | ||||||
| from datetime import datetime |  | ||||||
| import logging |  | ||||||
|  |  | ||||||
|  |  | ||||||
| logger = logging.getLogger(__name__) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FeatureFlag(Enum): |  | ||||||
|     """ |  | ||||||
|     Énumération de tous les feature flags disponibles. |  | ||||||
|      |  | ||||||
|     Conventions de nommage: |  | ||||||
|     - USE_NEW_<SERVICE_NAME> pour les migrations de services |  | ||||||
|     - ENABLE_<FEATURE_NAME> pour les nouvelles fonctionnalités |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     # === MIGRATION PROGRESSIVE SERVICES === |  | ||||||
|      |  | ||||||
|     # JOUR 3-4: Migration Services Core |  | ||||||
|     USE_STRATEGY_PATTERN = "use_strategy_pattern" |  | ||||||
|     USE_REFACTORED_ASSESSMENT = "use_refactored_assessment" |  | ||||||
|      |  | ||||||
|     # JOUR 5-6: Services Avancés   |  | ||||||
|     USE_NEW_STUDENT_SCORE_CALCULATOR = "use_new_student_score_calculator" |  | ||||||
|     USE_NEW_ASSESSMENT_STATISTICS_SERVICE = "use_new_assessment_statistics_service" |  | ||||||
|      |  | ||||||
|     # === FONCTIONNALITÉS AVANCÉES === |  | ||||||
|      |  | ||||||
|     # Performance et monitoring |  | ||||||
|     ENABLE_PERFORMANCE_MONITORING = "enable_performance_monitoring" |  | ||||||
|     ENABLE_QUERY_OPTIMIZATION = "enable_query_optimization" |  | ||||||
|      |  | ||||||
|     # Interface utilisateur |  | ||||||
|     ENABLE_BULK_OPERATIONS = "enable_bulk_operations" |  | ||||||
|     ENABLE_ADVANCED_FILTERS = "enable_advanced_filters" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class FeatureFlagConfig: |  | ||||||
|     """Configuration d'un feature flag avec métadonnées.""" |  | ||||||
|      |  | ||||||
|     enabled: bool |  | ||||||
|     description: str |  | ||||||
|     migration_day: Optional[int] = None  # Jour de migration selon le plan (1-7) |  | ||||||
|     rollback_safe: bool = True  # Peut être désactivé sans risque |  | ||||||
|     created_at: datetime = None |  | ||||||
|     updated_at: datetime = None |  | ||||||
|      |  | ||||||
|     def __post_init__(self): |  | ||||||
|         if self.created_at is None: |  | ||||||
|             self.created_at = datetime.utcnow() |  | ||||||
|         if self.updated_at is None: |  | ||||||
|             self.updated_at = datetime.utcnow() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FeatureFlagManager: |  | ||||||
|     """ |  | ||||||
|     Gestionnaire centralisé des feature flags. |  | ||||||
|      |  | ||||||
|     Fonctionnalités: |  | ||||||
|     - Configuration via variables d'environnement |  | ||||||
|     - Fallback vers configuration par défaut   |  | ||||||
|     - Logging des changements d'état |  | ||||||
|     - Validation des flags |  | ||||||
|     - Support pour tests unitaires |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     def __init__(self): |  | ||||||
|         self._flags: Dict[FeatureFlag, FeatureFlagConfig] = {} |  | ||||||
|         self._initialize_defaults() |  | ||||||
|         self._load_from_environment() |  | ||||||
|      |  | ||||||
|     def _initialize_defaults(self) -> None: |  | ||||||
|         """Initialise la configuration par défaut des feature flags.""" |  | ||||||
|          |  | ||||||
|         # Configuration par défaut - TOUT DÉSACTIVÉ pour sécurité maximale |  | ||||||
|         default_configs = { |  | ||||||
|             # MIGRATION PROGRESSIVE - JOUR 3-4 |  | ||||||
|             FeatureFlag.USE_STRATEGY_PATTERN: FeatureFlagConfig( |  | ||||||
|                 enabled=False, |  | ||||||
|                 description="Utilise les nouvelles stratégies de notation (Pattern Strategy)", |  | ||||||
|                 migration_day=3, |  | ||||||
|                 rollback_safe=True |  | ||||||
|             ), |  | ||||||
|             FeatureFlag.USE_REFACTORED_ASSESSMENT: FeatureFlagConfig( |  | ||||||
|                 enabled=False, |  | ||||||
|                 description="Utilise le nouveau service de calcul de progression", |  | ||||||
|                 migration_day=4, |  | ||||||
|                 rollback_safe=True |  | ||||||
|             ), |  | ||||||
|              |  | ||||||
|             # MIGRATION PROGRESSIVE - JOUR 5-6   |  | ||||||
|             FeatureFlag.USE_NEW_STUDENT_SCORE_CALCULATOR: FeatureFlagConfig( |  | ||||||
|                 enabled=False, |  | ||||||
|                 description="Utilise le nouveau calculateur de scores étudiants", |  | ||||||
|                 migration_day=5, |  | ||||||
|                 rollback_safe=True |  | ||||||
|             ), |  | ||||||
|             FeatureFlag.USE_NEW_ASSESSMENT_STATISTICS_SERVICE: FeatureFlagConfig( |  | ||||||
|                 enabled=False, |  | ||||||
|                 description="Utilise le nouveau service de statistiques d'évaluation", |  | ||||||
|                 migration_day=6, |  | ||||||
|                 rollback_safe=True |  | ||||||
|             ), |  | ||||||
|              |  | ||||||
|             # FONCTIONNALITÉS AVANCÉES |  | ||||||
|             FeatureFlag.ENABLE_PERFORMANCE_MONITORING: FeatureFlagConfig( |  | ||||||
|                 enabled=False, |  | ||||||
|                 description="Active le monitoring des performances", |  | ||||||
|                 rollback_safe=True |  | ||||||
|             ), |  | ||||||
|             FeatureFlag.ENABLE_QUERY_OPTIMIZATION: FeatureFlagConfig( |  | ||||||
|                 enabled=False, |  | ||||||
|                 description="Active les optimisations de requêtes", |  | ||||||
|                 rollback_safe=True |  | ||||||
|             ), |  | ||||||
|             FeatureFlag.ENABLE_BULK_OPERATIONS: FeatureFlagConfig( |  | ||||||
|                 enabled=False, |  | ||||||
|                 description="Active les opérations en masse", |  | ||||||
|                 rollback_safe=True |  | ||||||
|             ), |  | ||||||
|             FeatureFlag.ENABLE_ADVANCED_FILTERS: FeatureFlagConfig( |  | ||||||
|                 enabled=False, |  | ||||||
|                 description="Active les filtres avancés", |  | ||||||
|                 rollback_safe=True |  | ||||||
|             ), |  | ||||||
|         } |  | ||||||
|          |  | ||||||
|         self._flags.update(default_configs) |  | ||||||
|         logger.info("Feature flags initialisés avec configuration par défaut") |  | ||||||
|      |  | ||||||
|     def _load_from_environment(self) -> None: |  | ||||||
|         """Charge la configuration depuis les variables d'environnement.""" |  | ||||||
|          |  | ||||||
|         for flag in FeatureFlag: |  | ||||||
|             env_var = f"FEATURE_FLAG_{flag.value.upper()}" |  | ||||||
|             env_value = os.environ.get(env_var) |  | ||||||
|              |  | ||||||
|             if env_value is not None: |  | ||||||
|                 # Parse boolean depuis l'environnement |  | ||||||
|                 enabled = env_value.lower() in ('true', '1', 'yes', 'on', 'enabled') |  | ||||||
|                  |  | ||||||
|                 if flag in self._flags: |  | ||||||
|                     old_state = self._flags[flag].enabled |  | ||||||
|                     self._flags[flag].enabled = enabled |  | ||||||
|                     self._flags[flag].updated_at = datetime.utcnow() |  | ||||||
|                      |  | ||||||
|                     if old_state != enabled: |  | ||||||
|                         logger.info( |  | ||||||
|                             f"Feature flag {flag.value} modifié par env: {old_state} -> {enabled}", |  | ||||||
|                             extra={ |  | ||||||
|                                 'event_type': 'feature_flag_changed', |  | ||||||
|                                 'flag_name': flag.value, |  | ||||||
|                                 'old_value': old_state, |  | ||||||
|                                 'new_value': enabled, |  | ||||||
|                                 'source': 'environment' |  | ||||||
|                             } |  | ||||||
|                         ) |  | ||||||
|      |  | ||||||
|     def is_enabled(self, flag: FeatureFlag) -> bool: |  | ||||||
|         """ |  | ||||||
|         Vérifie si un feature flag est activé. |  | ||||||
|          |  | ||||||
|         Args: |  | ||||||
|             flag: Le feature flag à vérifier |  | ||||||
|              |  | ||||||
|         Returns: |  | ||||||
|             bool: True si le flag est activé, False sinon |  | ||||||
|         """ |  | ||||||
|         if flag not in self._flags: |  | ||||||
|             logger.warning( |  | ||||||
|                 f"Feature flag inconnu: {flag.value}. Retour False par défaut.", |  | ||||||
|                 extra={'event_type': 'unknown_feature_flag', 'flag_name': flag.value} |  | ||||||
|             ) |  | ||||||
|             return False |  | ||||||
|          |  | ||||||
|         return self._flags[flag].enabled |  | ||||||
|      |  | ||||||
|     def enable(self, flag: FeatureFlag, reason: str = "") -> bool: |  | ||||||
|         """ |  | ||||||
|         Active un feature flag. |  | ||||||
|          |  | ||||||
|         Args: |  | ||||||
|             flag: Le feature flag à activer |  | ||||||
|             reason: Raison de l'activation (pour logs) |  | ||||||
|              |  | ||||||
|         Returns: |  | ||||||
|             bool: True si l'activation a réussi |  | ||||||
|         """ |  | ||||||
|         if flag not in self._flags: |  | ||||||
|             logger.error(f"Impossible d'activer un feature flag inconnu: {flag.value}") |  | ||||||
|             return False |  | ||||||
|          |  | ||||||
|         old_state = self._flags[flag].enabled |  | ||||||
|         self._flags[flag].enabled = True |  | ||||||
|         self._flags[flag].updated_at = datetime.utcnow() |  | ||||||
|          |  | ||||||
|         logger.info( |  | ||||||
|             f"Feature flag {flag.value} activé. Raison: {reason}", |  | ||||||
|             extra={ |  | ||||||
|                 'event_type': 'feature_flag_enabled', |  | ||||||
|                 'flag_name': flag.value, |  | ||||||
|                 'old_value': old_state, |  | ||||||
|                 'new_value': True, |  | ||||||
|                 'reason': reason, |  | ||||||
|                 'migration_day': self._flags[flag].migration_day |  | ||||||
|             } |  | ||||||
|         ) |  | ||||||
|          |  | ||||||
|         return True |  | ||||||
|      |  | ||||||
|     def disable(self, flag: FeatureFlag, reason: str = "") -> bool: |  | ||||||
|         """ |  | ||||||
|         Désactive un feature flag. |  | ||||||
|          |  | ||||||
|         Args: |  | ||||||
|             flag: Le feature flag à désactiver |  | ||||||
|             reason: Raison de la désactivation (pour logs) |  | ||||||
|              |  | ||||||
|         Returns: |  | ||||||
|             bool: True si la désactivation a réussi |  | ||||||
|         """ |  | ||||||
|         if flag not in self._flags: |  | ||||||
|             logger.error(f"Impossible de désactiver un feature flag inconnu: {flag.value}") |  | ||||||
|             return False |  | ||||||
|          |  | ||||||
|         if not self._flags[flag].rollback_safe: |  | ||||||
|             logger.warning( |  | ||||||
|                 f"Désactivation d'un flag non-rollback-safe: {flag.value}", |  | ||||||
|                 extra={'event_type': 'unsafe_rollback_attempt', 'flag_name': flag.value} |  | ||||||
|             ) |  | ||||||
|          |  | ||||||
|         old_state = self._flags[flag].enabled |  | ||||||
|         self._flags[flag].enabled = False |  | ||||||
|         self._flags[flag].updated_at = datetime.utcnow() |  | ||||||
|          |  | ||||||
|         logger.info( |  | ||||||
|             f"Feature flag {flag.value} désactivé. Raison: {reason}", |  | ||||||
|             extra={ |  | ||||||
|                 'event_type': 'feature_flag_disabled', |  | ||||||
|                 'flag_name': flag.value, |  | ||||||
|                 'old_value': old_state, |  | ||||||
|                 'new_value': False, |  | ||||||
|                 'reason': reason, |  | ||||||
|                 'rollback_safe': self._flags[flag].rollback_safe |  | ||||||
|             } |  | ||||||
|         ) |  | ||||||
|          |  | ||||||
|         return True |  | ||||||
|      |  | ||||||
|     def get_config(self, flag: FeatureFlag) -> Optional[FeatureFlagConfig]: |  | ||||||
|         """Récupère la configuration complète d'un feature flag.""" |  | ||||||
|         return self._flags.get(flag) |  | ||||||
|      |  | ||||||
|     def get_status_summary(self) -> Dict[str, Any]: |  | ||||||
|         """ |  | ||||||
|         Retourne un résumé de l'état de tous les feature flags. |  | ||||||
|          |  | ||||||
|         Returns: |  | ||||||
|             Dict contenant le statut de chaque flag avec métadonnées |  | ||||||
|         """ |  | ||||||
|         summary = { |  | ||||||
|             'flags': {}, |  | ||||||
|             'migration_status': { |  | ||||||
|                 'day_3_ready': False, |  | ||||||
|                 'day_4_ready': False,  |  | ||||||
|                 'day_5_ready': False, |  | ||||||
|                 'day_6_ready': False |  | ||||||
|             }, |  | ||||||
|             'total_enabled': 0, |  | ||||||
|             'last_updated': None |  | ||||||
|         } |  | ||||||
|          |  | ||||||
|         latest_update = None |  | ||||||
|         enabled_count = 0 |  | ||||||
|          |  | ||||||
|         for flag, config in self._flags.items(): |  | ||||||
|             summary['flags'][flag.value] = { |  | ||||||
|                 'enabled': config.enabled, |  | ||||||
|                 'description': config.description, |  | ||||||
|                 'migration_day': config.migration_day, |  | ||||||
|                 'rollback_safe': config.rollback_safe, |  | ||||||
|                 'updated_at': config.updated_at.isoformat() if config.updated_at else None |  | ||||||
|             } |  | ||||||
|              |  | ||||||
|             if config.enabled: |  | ||||||
|                 enabled_count += 1 |  | ||||||
|              |  | ||||||
|             if latest_update is None or (config.updated_at and config.updated_at > latest_update): |  | ||||||
|                 latest_update = config.updated_at |  | ||||||
|          |  | ||||||
|         # Calcul du statut de migration par jour |  | ||||||
|         day_3_flags = [FeatureFlag.USE_STRATEGY_PATTERN] |  | ||||||
|         day_4_flags = [FeatureFlag.USE_REFACTORED_ASSESSMENT]   |  | ||||||
|         day_5_flags = [FeatureFlag.USE_NEW_STUDENT_SCORE_CALCULATOR] |  | ||||||
|         day_6_flags = [FeatureFlag.USE_NEW_ASSESSMENT_STATISTICS_SERVICE] |  | ||||||
|          |  | ||||||
|         summary['migration_status']['day_3_ready'] = all(self.is_enabled(flag) for flag in day_3_flags) |  | ||||||
|         summary['migration_status']['day_4_ready'] = all(self.is_enabled(flag) for flag in day_4_flags) |  | ||||||
|         summary['migration_status']['day_5_ready'] = all(self.is_enabled(flag) for flag in day_5_flags) |  | ||||||
|         summary['migration_status']['day_6_ready'] = all(self.is_enabled(flag) for flag in day_6_flags) |  | ||||||
|          |  | ||||||
|         summary['total_enabled'] = enabled_count |  | ||||||
|         summary['last_updated'] = latest_update.isoformat() if latest_update else None |  | ||||||
|          |  | ||||||
|         return summary |  | ||||||
|      |  | ||||||
|     def enable_migration_day(self, day: int, reason: str = "") -> Dict[str, bool]: |  | ||||||
|         """ |  | ||||||
|         Active tous les feature flags pour un jour de migration donné. |  | ||||||
|          |  | ||||||
|         Args: |  | ||||||
|             day: Numéro du jour de migration (3-6) |  | ||||||
|             reason: Raison de l'activation |  | ||||||
|              |  | ||||||
|         Returns: |  | ||||||
|             Dict[flag_name, success] indiquant quels flags ont été activés |  | ||||||
|         """ |  | ||||||
|         day_flags_map = { |  | ||||||
|             3: [FeatureFlag.USE_STRATEGY_PATTERN], |  | ||||||
|             4: [FeatureFlag.USE_REFACTORED_ASSESSMENT], |  | ||||||
|             5: [FeatureFlag.USE_NEW_STUDENT_SCORE_CALCULATOR],  |  | ||||||
|             6: [FeatureFlag.USE_NEW_ASSESSMENT_STATISTICS_SERVICE] |  | ||||||
|         } |  | ||||||
|          |  | ||||||
|         if day not in day_flags_map: |  | ||||||
|             logger.error(f"Jour de migration invalide: {day}. Jours supportés: 3-6") |  | ||||||
|             return {} |  | ||||||
|          |  | ||||||
|         results = {} |  | ||||||
|         migration_reason = f"Migration Jour {day}: {reason}" if reason else f"Migration Jour {day}" |  | ||||||
|          |  | ||||||
|         for flag in day_flags_map[day]: |  | ||||||
|             success = self.enable(flag, migration_reason) |  | ||||||
|             results[flag.value] = success |  | ||||||
|          |  | ||||||
|         logger.info( |  | ||||||
|             f"Activation des flags pour le jour {day} terminée", |  | ||||||
|             extra={ |  | ||||||
|                 'event_type': 'migration_day_activation', |  | ||||||
|                 'migration_day': day, |  | ||||||
|                 'results': results, |  | ||||||
|                 'reason': reason |  | ||||||
|             } |  | ||||||
|         ) |  | ||||||
|          |  | ||||||
|         return results |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Instance globale du gestionnaire de feature flags |  | ||||||
| feature_flags = FeatureFlagManager() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def is_feature_enabled(flag: FeatureFlag) -> bool: |  | ||||||
|     """ |  | ||||||
|     Fonction utilitaire pour vérifier l'état d'un feature flag. |  | ||||||
|      |  | ||||||
|     Usage dans le code: |  | ||||||
|         from config.feature_flags import is_feature_enabled, FeatureFlag |  | ||||||
|          |  | ||||||
|         if is_feature_enabled(FeatureFlag.USE_NEW_GRADING_STRATEGIES): |  | ||||||
|             # Utiliser la nouvelle implémentation |  | ||||||
|             result = new_grading_service.calculate() |  | ||||||
|         else: |  | ||||||
|             # Utiliser l'ancienne implémentation |  | ||||||
|             result = old_grading_method() |  | ||||||
|     """ |  | ||||||
|     return feature_flags.is_enabled(flag) |  | ||||||
| @@ -1,531 +0,0 @@ | |||||||
| from flask_sqlalchemy import SQLAlchemy |  | ||||||
| from datetime import datetime |  | ||||||
| from sqlalchemy import Index, CheckConstraint, Enum |  | ||||||
| from decimal import Decimal |  | ||||||
| from typing import Optional, Dict, Any |  | ||||||
| from flask import current_app |  | ||||||
|  |  | ||||||
| db = SQLAlchemy() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class GradingCalculator: |  | ||||||
|     """ |  | ||||||
|     Calculateur unifié pour tous types de notation. |  | ||||||
|     Utilise le feature flag USE_STRATEGY_PATTERN pour basculer entre  |  | ||||||
|     l'ancienne logique conditionnelle et le nouveau Pattern Strategy. |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     @staticmethod |  | ||||||
|     def calculate_score(grade_value: str, grading_type: str, max_points: float) -> Optional[float]: |  | ||||||
|         """ |  | ||||||
|         UN seul point d'entrée pour tous les calculs de score. |  | ||||||
|          |  | ||||||
|         Args: |  | ||||||
|             grade_value: Valeur de la note (ex: '15.5', '2', '.', 'd') |  | ||||||
|             grading_type: Type de notation ('notes' ou 'score') |  | ||||||
|             max_points: Points maximum de l'élément de notation |  | ||||||
|              |  | ||||||
|         Returns: |  | ||||||
|             Score calculé ou None pour les valeurs dispensées |  | ||||||
|         """ |  | ||||||
|         # Feature flag pour basculer vers le Pattern Strategy |  | ||||||
|         from config.feature_flags import is_feature_enabled, FeatureFlag |  | ||||||
|          |  | ||||||
|         if is_feature_enabled(FeatureFlag.USE_STRATEGY_PATTERN): |  | ||||||
|             # === NOUVELLE IMPLÉMENTATION : Pattern Strategy === |  | ||||||
|             return GradingCalculator._calculate_score_with_strategy(grade_value, grading_type, max_points) |  | ||||||
|         else: |  | ||||||
|             # === ANCIENNE IMPLÉMENTATION : Logique conditionnelle === |  | ||||||
|             return GradingCalculator._calculate_score_legacy(grade_value, grading_type, max_points) |  | ||||||
|      |  | ||||||
|     @staticmethod |  | ||||||
|     def _calculate_score_with_strategy(grade_value: str, grading_type: str, max_points: float) -> Optional[float]: |  | ||||||
|         """ |  | ||||||
|         Nouvelle implémentation utilisant le Pattern Strategy et l'injection de dépendances. |  | ||||||
|         """ |  | ||||||
|         from services.assessment_services import UnifiedGradingCalculator |  | ||||||
|         from providers.concrete_providers import ConfigManagerProvider |  | ||||||
|          |  | ||||||
|         # Injection de dépendances pour éviter les imports circulaires |  | ||||||
|         config_provider = ConfigManagerProvider() |  | ||||||
|         unified_calculator = UnifiedGradingCalculator(config_provider) |  | ||||||
|          |  | ||||||
|         return unified_calculator.calculate_score(grade_value, grading_type, max_points) |  | ||||||
|      |  | ||||||
|     @staticmethod   |  | ||||||
|     def _calculate_score_legacy(grade_value: str, grading_type: str, max_points: float) -> Optional[float]: |  | ||||||
|         """ |  | ||||||
|         Ancienne implémentation avec logique conditionnelle (pour compatibilité). |  | ||||||
|         """ |  | ||||||
|         # Éviter les imports circulaires en important à l'utilisation |  | ||||||
|         from app_config import config_manager |  | ||||||
|          |  | ||||||
|         # Valeurs spéciales en premier |  | ||||||
|         if config_manager.is_special_value(grade_value): |  | ||||||
|             special_config = config_manager.get_special_values()[grade_value] |  | ||||||
|             special_value = special_config['value'] |  | ||||||
|             if special_value is None:  # Dispensé |  | ||||||
|                 return None |  | ||||||
|             return float(special_value)  # 0 pour '.', 'a' |  | ||||||
|          |  | ||||||
|         # Calcul selon type (logique conditionnelle legacy) |  | ||||||
|         try: |  | ||||||
|             if grading_type == 'notes': |  | ||||||
|                 return float(grade_value) |  | ||||||
|             elif grading_type == 'score': |  | ||||||
|                 # Score 0-3 converti en proportion du max_points |  | ||||||
|                 score_int = int(grade_value) |  | ||||||
|                 if 0 <= score_int <= 3: |  | ||||||
|                     return (score_int / 3) * max_points |  | ||||||
|                 return 0.0 |  | ||||||
|         except (ValueError, TypeError): |  | ||||||
|             return 0.0 |  | ||||||
|          |  | ||||||
|         return 0.0 |  | ||||||
|      |  | ||||||
|     @staticmethod |  | ||||||
|     def is_counted_in_total(grade_value: str, grading_type: str) -> bool: |  | ||||||
|         """ |  | ||||||
|         Détermine si une note doit être comptée dans le total. |  | ||||||
|         Utilise le feature flag USE_STRATEGY_PATTERN pour basculer vers les nouveaux services. |  | ||||||
|          |  | ||||||
|         Returns: |  | ||||||
|             True si la note compte dans le total, False sinon (ex: dispensé) |  | ||||||
|         """ |  | ||||||
|         # Feature flag pour basculer vers le Pattern Strategy |  | ||||||
|         from config.feature_flags import is_feature_enabled, FeatureFlag |  | ||||||
|          |  | ||||||
|         if is_feature_enabled(FeatureFlag.USE_STRATEGY_PATTERN): |  | ||||||
|             # === NOUVELLE IMPLÉMENTATION : Pattern Strategy === |  | ||||||
|             return GradingCalculator._is_counted_in_total_with_strategy(grade_value) |  | ||||||
|         else: |  | ||||||
|             # === ANCIENNE IMPLÉMENTATION : Logique directe === |  | ||||||
|             return GradingCalculator._is_counted_in_total_legacy(grade_value) |  | ||||||
|      |  | ||||||
|     @staticmethod |  | ||||||
|     def _is_counted_in_total_with_strategy(grade_value: str) -> bool: |  | ||||||
|         """ |  | ||||||
|         Nouvelle implémentation utilisant l'injection de dépendances. |  | ||||||
|         """ |  | ||||||
|         from services.assessment_services import UnifiedGradingCalculator |  | ||||||
|         from providers.concrete_providers import ConfigManagerProvider |  | ||||||
|          |  | ||||||
|         # Injection de dépendances pour éviter les imports circulaires |  | ||||||
|         config_provider = ConfigManagerProvider() |  | ||||||
|         unified_calculator = UnifiedGradingCalculator(config_provider) |  | ||||||
|          |  | ||||||
|         return unified_calculator.is_counted_in_total(grade_value) |  | ||||||
|      |  | ||||||
|     @staticmethod |  | ||||||
|     def _is_counted_in_total_legacy(grade_value: str) -> bool: |  | ||||||
|         """ |  | ||||||
|         Ancienne implémentation avec accès direct au config_manager. |  | ||||||
|         """ |  | ||||||
|         from app_config import config_manager |  | ||||||
|          |  | ||||||
|         # Valeurs spéciales |  | ||||||
|         if config_manager.is_special_value(grade_value): |  | ||||||
|             special_config = config_manager.get_special_values()[grade_value] |  | ||||||
|             return special_config['counts'] |  | ||||||
|          |  | ||||||
|         # Toutes les autres valeurs comptent |  | ||||||
|         return True |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ClassGroup(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     name = db.Column(db.String(100), nullable=False, unique=True) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     year = db.Column(db.String(20), nullable=False) |  | ||||||
|     students = db.relationship('Student', backref='class_group', lazy=True) |  | ||||||
|     assessments = db.relationship('Assessment', backref='class_group', lazy=True) |  | ||||||
|      |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<ClassGroup {self.name}>' |  | ||||||
|  |  | ||||||
| class Student(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     last_name = db.Column(db.String(100), nullable=False) |  | ||||||
|     first_name = db.Column(db.String(100), nullable=False) |  | ||||||
|     email = db.Column(db.String(120), unique=True) |  | ||||||
|     class_group_id = db.Column(db.Integer, db.ForeignKey('class_group.id'), nullable=False) |  | ||||||
|     grades = db.relationship('Grade', backref='student', lazy=True) |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Student {self.first_name} {self.last_name}>' |  | ||||||
|      |  | ||||||
|     @property |  | ||||||
|     def full_name(self): |  | ||||||
|         return f"{self.first_name} {self.last_name}" |  | ||||||
|  |  | ||||||
| class Assessment(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     title = db.Column(db.String(200), nullable=False) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     date = db.Column(db.Date, nullable=False, default=datetime.utcnow) |  | ||||||
|     trimester = db.Column(db.Integer, nullable=False)  # 1, 2, ou 3 |  | ||||||
|     class_group_id = db.Column(db.Integer, db.ForeignKey('class_group.id'), nullable=False) |  | ||||||
|     coefficient = db.Column(db.Float, default=1.0)  # Garder Float pour compatibilité |  | ||||||
|     exercises = db.relationship('Exercise', backref='assessment', lazy=True, cascade='all, delete-orphan') |  | ||||||
|  |  | ||||||
|     __table_args__ = ( |  | ||||||
|         CheckConstraint('trimester IN (1, 2, 3)', name='check_trimester_valid'), |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Assessment {self.title}>' |  | ||||||
|      |  | ||||||
|     @property |  | ||||||
|     def grading_progress(self): |  | ||||||
|         """ |  | ||||||
|         Calcule le pourcentage de progression des notes saisies pour cette évaluation. |  | ||||||
|         Utilise le feature flag USE_REFACTORED_ASSESSMENT pour basculer entre  |  | ||||||
|         l'ancienne logique et le nouveau AssessmentProgressService optimisé. |  | ||||||
|          |  | ||||||
|         Returns: |  | ||||||
|             Dict avec les statistiques de progression |  | ||||||
|         """ |  | ||||||
|         # Feature flag pour migration progressive vers AssessmentProgressService |  | ||||||
|         from config.feature_flags import is_feature_enabled, FeatureFlag |  | ||||||
|          |  | ||||||
|         if is_feature_enabled(FeatureFlag.USE_REFACTORED_ASSESSMENT): |  | ||||||
|             # === NOUVELLE IMPLÉMENTATION : AssessmentProgressService === |  | ||||||
|             return self._grading_progress_with_service() |  | ||||||
|         else: |  | ||||||
|             # === ANCIENNE IMPLÉMENTATION : Logique dans le modèle === |  | ||||||
|             return self._grading_progress_legacy() |  | ||||||
|      |  | ||||||
|     def _grading_progress_with_service(self): |  | ||||||
|         """ |  | ||||||
|         Nouvelle implémentation utilisant AssessmentProgressService avec injection de dépendances. |  | ||||||
|         Optimise les requêtes pour éviter les problèmes N+1. |  | ||||||
|         """ |  | ||||||
|         from providers.concrete_providers import AssessmentServicesFactory |  | ||||||
|          |  | ||||||
|         # Injection de dépendances pour éviter les imports circulaires |  | ||||||
|         services_facade = AssessmentServicesFactory.create_facade() |  | ||||||
|         progress_result = services_facade.get_grading_progress(self) |  | ||||||
|          |  | ||||||
|         # Conversion du ProgressResult vers le format dict attendu |  | ||||||
|         return { |  | ||||||
|             'percentage': progress_result.percentage, |  | ||||||
|             'completed': progress_result.completed, |  | ||||||
|             'total': progress_result.total, |  | ||||||
|             'status': progress_result.status, |  | ||||||
|             'students_count': progress_result.students_count |  | ||||||
|         } |  | ||||||
|      |  | ||||||
|     def _grading_progress_legacy(self): |  | ||||||
|         """ |  | ||||||
|         Ancienne implémentation avec requêtes multiples (pour compatibilité). |  | ||||||
|         """         |  | ||||||
|         # Obtenir tous les éléments de notation pour cette évaluation |  | ||||||
|         total_elements = 0 |  | ||||||
|         completed_elements = 0 |  | ||||||
|         total_students = len(self.class_group.students) |  | ||||||
|          |  | ||||||
|         if total_students == 0: |  | ||||||
|             return { |  | ||||||
|                 'percentage': 0, |  | ||||||
|                 'completed': 0, |  | ||||||
|                 'total': 0, |  | ||||||
|                 'status': 'no_students', |  | ||||||
|                 'students_count': 0 |  | ||||||
|             } |  | ||||||
|          |  | ||||||
|         # Parcourir tous les exercices et leurs éléments de notation |  | ||||||
|         for exercise in self.exercises: |  | ||||||
|             for grading_element in exercise.grading_elements: |  | ||||||
|                 total_elements += total_students |  | ||||||
|                  |  | ||||||
|                 # Compter les notes saisies (valeur non nulle et non vide, y compris '.') |  | ||||||
|                 completed_for_element = db.session.query(Grade).filter( |  | ||||||
|                     Grade.grading_element_id == grading_element.id, |  | ||||||
|                     Grade.value.isnot(None), |  | ||||||
|                     Grade.value != '' |  | ||||||
|                 ).count() |  | ||||||
|                  |  | ||||||
|                 completed_elements += completed_for_element |  | ||||||
|          |  | ||||||
|         if total_elements == 0: |  | ||||||
|             return { |  | ||||||
|                 'percentage': 0, |  | ||||||
|                 'completed': 0, |  | ||||||
|                 'total': 0, |  | ||||||
|                 'status': 'no_elements', |  | ||||||
|                 'students_count': total_students |  | ||||||
|             } |  | ||||||
|          |  | ||||||
|         percentage = round((completed_elements / total_elements) * 100) |  | ||||||
|          |  | ||||||
|         # Déterminer le statut |  | ||||||
|         if percentage == 0: |  | ||||||
|             status = 'not_started' |  | ||||||
|         elif percentage == 100: |  | ||||||
|             status = 'completed' |  | ||||||
|         else: |  | ||||||
|             status = 'in_progress' |  | ||||||
|          |  | ||||||
|         return { |  | ||||||
|             'percentage': percentage, |  | ||||||
|             'completed': completed_elements, |  | ||||||
|             'total': total_elements, |  | ||||||
|             'status': status, |  | ||||||
|             'students_count': total_students |  | ||||||
|         } |  | ||||||
|      |  | ||||||
|     def calculate_student_scores(self): |  | ||||||
|         """Calcule les scores de tous les élèves pour cette évaluation. |  | ||||||
|         Retourne un dictionnaire avec les scores par élève et par exercice. |  | ||||||
|         Logique de calcul simplifiée avec 2 types seulement.""" |  | ||||||
|         # Feature flag pour migration progressive vers services optimisés |  | ||||||
|         from config.feature_flags import is_feature_enabled, FeatureFlag |  | ||||||
|          |  | ||||||
|         if is_feature_enabled(FeatureFlag.USE_REFACTORED_ASSESSMENT): |  | ||||||
|             return self._calculate_student_scores_optimized() |  | ||||||
|         return self._calculate_student_scores_legacy() |  | ||||||
|      |  | ||||||
|     def _calculate_student_scores_optimized(self): |  | ||||||
|         """Version optimisée avec services découplés et requête unique.""" |  | ||||||
|         from providers.concrete_providers import AssessmentServicesFactory |  | ||||||
|          |  | ||||||
|         services = AssessmentServicesFactory.create_facade() |  | ||||||
|         students_scores_data, exercise_scores_data = services.score_calculator.calculate_student_scores(self) |  | ||||||
|          |  | ||||||
|         # Conversion vers format legacy pour compatibilité |  | ||||||
|         students_scores = {} |  | ||||||
|         exercise_scores = {} |  | ||||||
|          |  | ||||||
|         for student_id, score_data in students_scores_data.items(): |  | ||||||
|             # Récupérer l'objet étudiant pour compatibilité |  | ||||||
|             student_obj = next(s for s in self.class_group.students if s.id == student_id) |  | ||||||
|             students_scores[student_id] = { |  | ||||||
|                 'student': student_obj, |  | ||||||
|                 'total_score': score_data.total_score, |  | ||||||
|                 'total_max_points': score_data.total_max_points, |  | ||||||
|                 'exercises': score_data.exercises |  | ||||||
|             } |  | ||||||
|          |  | ||||||
|         for exercise_id, student_scores in exercise_scores_data.items(): |  | ||||||
|             exercise_scores[exercise_id] = dict(student_scores) |  | ||||||
|          |  | ||||||
|         return students_scores, exercise_scores |  | ||||||
|      |  | ||||||
|     def _calculate_student_scores_legacy(self): |  | ||||||
|         """Version legacy avec requêtes N+1 - à conserver temporairement.""" |  | ||||||
|         from collections import defaultdict |  | ||||||
|          |  | ||||||
|         students_scores = {} |  | ||||||
|         exercise_scores = defaultdict(lambda: defaultdict(float)) |  | ||||||
|          |  | ||||||
|         for student in self.class_group.students: |  | ||||||
|             total_score = 0 |  | ||||||
|             total_max_points = 0 |  | ||||||
|             student_exercises = {} |  | ||||||
|              |  | ||||||
|             for exercise in self.exercises: |  | ||||||
|                 exercise_score = 0 |  | ||||||
|                 exercise_max_points = 0 |  | ||||||
|                  |  | ||||||
|                 for element in exercise.grading_elements: |  | ||||||
|                     grade = Grade.query.filter_by( |  | ||||||
|                         student_id=student.id, |  | ||||||
|                         grading_element_id=element.id |  | ||||||
|                     ).first() |  | ||||||
|                      |  | ||||||
|                     # Si une note a été saisie pour cet élément (y compris valeurs spéciales) |  | ||||||
|                     if grade and grade.value and grade.value != '': |  | ||||||
|                         # Utiliser la nouvelle logique unifiée |  | ||||||
|                         calculated_score = GradingCalculator.calculate_score( |  | ||||||
|                             grade.value.strip(),  |  | ||||||
|                             element.grading_type,  |  | ||||||
|                             element.max_points |  | ||||||
|                         ) |  | ||||||
|                          |  | ||||||
|                         # Vérifier si cette note compte dans le total |  | ||||||
|                         if GradingCalculator.is_counted_in_total(grade.value.strip(), element.grading_type): |  | ||||||
|                             if calculated_score is not None:  # Pas dispensé |  | ||||||
|                                 exercise_score += calculated_score |  | ||||||
|                             exercise_max_points += element.max_points |  | ||||||
|                         # Si pas compté ou dispensé, on ignore complètement |  | ||||||
|                  |  | ||||||
|                 student_exercises[exercise.id] = { |  | ||||||
|                     'score': exercise_score, |  | ||||||
|                     'max_points': exercise_max_points, |  | ||||||
|                     'title': exercise.title |  | ||||||
|                 } |  | ||||||
|                 total_score += exercise_score |  | ||||||
|                 total_max_points += exercise_max_points |  | ||||||
|                 exercise_scores[exercise.id][student.id] = exercise_score |  | ||||||
|              |  | ||||||
|             students_scores[student.id] = { |  | ||||||
|                 'student': student, |  | ||||||
|                 'total_score': round(total_score, 2), |  | ||||||
|                 'total_max_points': total_max_points, |  | ||||||
|                 'exercises': student_exercises |  | ||||||
|             } |  | ||||||
|          |  | ||||||
|         return students_scores, dict(exercise_scores) |  | ||||||
|      |  | ||||||
|     def get_assessment_statistics(self): |  | ||||||
|         """ |  | ||||||
|         Calcule les statistiques descriptives pour cette évaluation. |  | ||||||
|          |  | ||||||
|         Utilise le feature flag USE_REFACTORED_ASSESSMENT pour basculer entre  |  | ||||||
|         l'ancien système et les nouveaux services refactorisés. |  | ||||||
|         """ |  | ||||||
|         from config.feature_flags import FeatureFlag, is_feature_enabled |  | ||||||
|          |  | ||||||
|         if is_feature_enabled(FeatureFlag.USE_REFACTORED_ASSESSMENT): |  | ||||||
|             from providers.concrete_providers import AssessmentServicesFactory |  | ||||||
|             services = AssessmentServicesFactory.create_facade() |  | ||||||
|             result = services.statistics_service.get_assessment_statistics(self) |  | ||||||
|              |  | ||||||
|             # Conversion du StatisticsResult vers le format dict legacy |  | ||||||
|             return { |  | ||||||
|                 'count': result.count, |  | ||||||
|                 'mean': result.mean, |  | ||||||
|                 'median': result.median, |  | ||||||
|                 'min': result.min, |  | ||||||
|                 'max': result.max, |  | ||||||
|                 'std_dev': result.std_dev |  | ||||||
|             } |  | ||||||
|          |  | ||||||
|         return self._get_assessment_statistics_legacy() |  | ||||||
|      |  | ||||||
|     def _get_assessment_statistics_legacy(self): |  | ||||||
|         """Version legacy des statistiques - À supprimer après migration complète.""" |  | ||||||
|         students_scores, _ = self.calculate_student_scores() |  | ||||||
|         scores = [data['total_score'] for data in students_scores.values()] |  | ||||||
|          |  | ||||||
|         if not scores: |  | ||||||
|             return { |  | ||||||
|                 'count': 0, |  | ||||||
|                 'mean': 0, |  | ||||||
|                 'median': 0, |  | ||||||
|                 'min': 0, |  | ||||||
|                 'max': 0, |  | ||||||
|                 'std_dev': 0 |  | ||||||
|             } |  | ||||||
|          |  | ||||||
|         import statistics |  | ||||||
|         import math |  | ||||||
|          |  | ||||||
|         return { |  | ||||||
|             'count': len(scores), |  | ||||||
|             'mean': round(statistics.mean(scores), 2), |  | ||||||
|             'median': round(statistics.median(scores), 2), |  | ||||||
|             'min': min(scores), |  | ||||||
|             'max': max(scores), |  | ||||||
|             'std_dev': round(statistics.stdev(scores) if len(scores) > 1 else 0, 2) |  | ||||||
|         } |  | ||||||
|      |  | ||||||
|     def get_total_max_points(self): |  | ||||||
|         """Calcule le total des points maximum pour cette évaluation.""" |  | ||||||
|         total = 0 |  | ||||||
|         for exercise in self.exercises: |  | ||||||
|             for element in exercise.grading_elements: |  | ||||||
|                 # Logique simplifiée avec 2 types : notes et score |  | ||||||
|                 total += element.max_points |  | ||||||
|         return total |  | ||||||
|  |  | ||||||
| class Exercise(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     assessment_id = db.Column(db.Integer, db.ForeignKey('assessment.id'), nullable=False) |  | ||||||
|     title = db.Column(db.String(200), nullable=False) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     order = db.Column(db.Integer, default=1) |  | ||||||
|     grading_elements = db.relationship('GradingElement', backref='exercise', lazy=True, cascade='all, delete-orphan') |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Exercise {self.title}>' |  | ||||||
|  |  | ||||||
| class GradingElement(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     exercise_id = db.Column(db.Integer, db.ForeignKey('exercise.id'), nullable=False) |  | ||||||
|     label = db.Column(db.String(200), nullable=False) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     skill = db.Column(db.String(200)) |  | ||||||
|     max_points = db.Column(db.Float, nullable=False)  # Garder Float pour compatibilité |  | ||||||
|     # NOUVEAU : Types enum directement |  | ||||||
|     grading_type = db.Column(Enum('notes', 'score', name='grading_types'), nullable=False, default='notes') |  | ||||||
|     # Ajout du champ domain_id |  | ||||||
|     domain_id = db.Column(db.Integer, db.ForeignKey('domains.id'), nullable=True)  # Optionnel |  | ||||||
|     grades = db.relationship('Grade', backref='grading_element', lazy=True, cascade='all, delete-orphan') |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<GradingElement {self.label}>' |  | ||||||
|  |  | ||||||
| class Grade(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     student_id = db.Column(db.Integer, db.ForeignKey('student.id'), nullable=False) |  | ||||||
|     grading_element_id = db.Column(db.Integer, db.ForeignKey('grading_element.id'), nullable=False) |  | ||||||
|     value = db.Column(db.String(10))  # Garder l'ancien format pour compatibilité |  | ||||||
|     comment = db.Column(db.Text) |  | ||||||
|      |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Grade {self.value} for {self.student.first_name if self.student else "Unknown"}>' |  | ||||||
|  |  | ||||||
| # Configuration tables |  | ||||||
|  |  | ||||||
| class AppConfig(db.Model): |  | ||||||
|     """Configuration simple de l'application (clé-valeur).""" |  | ||||||
|     __tablename__ = 'app_config' |  | ||||||
|      |  | ||||||
|     key = db.Column(db.String(100), primary_key=True) |  | ||||||
|     value = db.Column(db.Text, nullable=False) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     created_at = db.Column(db.DateTime, default=datetime.utcnow) |  | ||||||
|     updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) |  | ||||||
|      |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<AppConfig {self.key}={self.value}>' |  | ||||||
|  |  | ||||||
| class CompetenceScaleValue(db.Model): |  | ||||||
|     """Valeurs de l'échelle des compétences (0, 1, 2, 3, ., d, etc.).""" |  | ||||||
|     __tablename__ = 'competence_scale_values' |  | ||||||
|      |  | ||||||
|     value = db.Column(db.String(10), primary_key=True)  # '0', '1', '2', '3', '.', 'd', etc. |  | ||||||
|     label = db.Column(db.String(100), nullable=False) |  | ||||||
|     color = db.Column(db.String(7), nullable=False)  # Format #RRGGBB |  | ||||||
|     included_in_total = db.Column(db.Boolean, default=True, nullable=False) |  | ||||||
|     created_at = db.Column(db.DateTime, default=datetime.utcnow) |  | ||||||
|     updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) |  | ||||||
|      |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<CompetenceScaleValue {self.value}: {self.label}>' |  | ||||||
|  |  | ||||||
| class Competence(db.Model): |  | ||||||
|     """Liste des compétences (Calculer, Raisonner, etc.).""" |  | ||||||
|     __tablename__ = 'competences' |  | ||||||
|      |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     name = db.Column(db.String(100), unique=True, nullable=False) |  | ||||||
|     color = db.Column(db.String(7), nullable=False)  # Format #RRGGBB |  | ||||||
|     icon = db.Column(db.String(50), nullable=False) |  | ||||||
|     order_index = db.Column(db.Integer, default=0)  # Pour l'ordre d'affichage |  | ||||||
|     created_at = db.Column(db.DateTime, default=datetime.utcnow) |  | ||||||
|     updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) |  | ||||||
|      |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Competence {self.name}>' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Domain(db.Model): |  | ||||||
|     """Domaines/tags pour les éléments de notation.""" |  | ||||||
|     __tablename__ = 'domains' |  | ||||||
|      |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     name = db.Column(db.String(100), unique=True, nullable=False) |  | ||||||
|     color = db.Column(db.String(7), nullable=False, default='#6B7280')  # Format #RRGGBB |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     created_at = db.Column(db.DateTime, default=datetime.utcnow) |  | ||||||
|     updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) |  | ||||||
|      |  | ||||||
|     # Relation inverse |  | ||||||
|     grading_elements = db.relationship('GradingElement', backref='domain', lazy=True) |  | ||||||
|      |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Domain {self.name}>' |  | ||||||
| @@ -1 +0,0 @@ | |||||||
| # Examples et guides de migration |  | ||||||
| @@ -1,290 +0,0 @@ | |||||||
| """ |  | ||||||
| Guide de migration vers la nouvelle architecture avec services découplés. |  | ||||||
|  |  | ||||||
| Ce fichier montre comment migrer progressivement du code existant |  | ||||||
| vers la nouvelle architecture avec injection de dépendances. |  | ||||||
| """ |  | ||||||
| from typing import Dict, Any |  | ||||||
|  |  | ||||||
| # =================== AVANT : Code couplé avec imports circulaires =================== |  | ||||||
|  |  | ||||||
| class OldRoute: |  | ||||||
|     """Exemple de l'ancienne approche avec couplage fort.""" |  | ||||||
|      |  | ||||||
|     def assessment_detail_old(self, assessment_id: int): |  | ||||||
|         """Ancienne version avec logique dans les modèles.""" |  | ||||||
|         from models import Assessment  # Import direct |  | ||||||
|          |  | ||||||
|         assessment = Assessment.query.get_or_404(assessment_id) |  | ||||||
|          |  | ||||||
|         # ❌ Problèmes : |  | ||||||
|         # 1. Logique métier dans le modèle (violation SRP) |  | ||||||
|         # 2. Import circulaire dans grading_progress |  | ||||||
|         # 3. Requêtes N+1 dans calculate_student_scores |  | ||||||
|         # 4. Pas de testabilité (dépendances hard-codées) |  | ||||||
|          |  | ||||||
|         progress = assessment.grading_progress  # Import circulaire caché |  | ||||||
|         scores, exercises = assessment.calculate_student_scores()  # N+1 queries |  | ||||||
|         stats = assessment.get_assessment_statistics() |  | ||||||
|          |  | ||||||
|         return { |  | ||||||
|             'assessment': assessment, |  | ||||||
|             'progress': progress, |  | ||||||
|             'scores': scores, |  | ||||||
|             'statistics': stats |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== APRÈS : Architecture découplée =================== |  | ||||||
|  |  | ||||||
| class NewRoute: |  | ||||||
|     """Nouvelle approche avec injection de dépendances.""" |  | ||||||
|      |  | ||||||
|     def __init__(self, assessment_services_facade=None): |  | ||||||
|         """Injection de dépendances pour testabilité.""" |  | ||||||
|         if assessment_services_facade is None: |  | ||||||
|             from providers.concrete_providers import AssessmentServicesFactory |  | ||||||
|             assessment_services_facade = AssessmentServicesFactory.create_facade() |  | ||||||
|          |  | ||||||
|         self.services = assessment_services_facade |  | ||||||
|      |  | ||||||
|     def assessment_detail_new(self, assessment_id: int) -> Dict[str, Any]: |  | ||||||
|         """ |  | ||||||
|         Nouvelle version avec services découplés. |  | ||||||
|          |  | ||||||
|         ✅ Avantages : |  | ||||||
|         1. Services dédiés (respect SRP) |  | ||||||
|         2. Plus d'imports circulaires |  | ||||||
|         3. Requêtes optimisées (plus de N+1) |  | ||||||
|         4. Testable avec mocks |  | ||||||
|         5. Extensible (pattern Strategy) |  | ||||||
|         """ |  | ||||||
|         from models_refactored import Assessment  # Modèle allégé |  | ||||||
|          |  | ||||||
|         assessment = Assessment.query.get_or_404(assessment_id) |  | ||||||
|          |  | ||||||
|         # Appels optimisés aux services |  | ||||||
|         progress = self.services.get_grading_progress(assessment) |  | ||||||
|         scores, exercises = self.services.calculate_student_scores(assessment) |  | ||||||
|         stats = self.services.get_statistics(assessment) |  | ||||||
|          |  | ||||||
|         return { |  | ||||||
|             'assessment': assessment, |  | ||||||
|             'progress': progress.__dict__,  # Conversion DTO -> dict |  | ||||||
|             'scores': {k: v.__dict__ for k, v in scores.items()}, |  | ||||||
|             'statistics': stats.__dict__ |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== MIGRATION PROGRESSIVE =================== |  | ||||||
|  |  | ||||||
| class MigrationRoute: |  | ||||||
|     """Exemple de migration progressive pour minimiser les risques.""" |  | ||||||
|      |  | ||||||
|     def __init__(self): |  | ||||||
|         # Feature flag pour basculer entre ancien et nouveau code |  | ||||||
|         self.use_new_services = self._get_feature_flag('USE_NEW_ASSESSMENT_SERVICES') |  | ||||||
|          |  | ||||||
|         if self.use_new_services: |  | ||||||
|             from providers.concrete_providers import AssessmentServicesFactory |  | ||||||
|             self.services = AssessmentServicesFactory.create_facade() |  | ||||||
|      |  | ||||||
|     def assessment_detail_hybrid(self, assessment_id: int): |  | ||||||
|         """Version hybride permettant de tester graduellement.""" |  | ||||||
|         from models import Assessment  # Import de l'ancien modèle |  | ||||||
|          |  | ||||||
|         assessment = Assessment.query.get_or_404(assessment_id) |  | ||||||
|          |  | ||||||
|         if self.use_new_services: |  | ||||||
|             # Nouvelle implémentation |  | ||||||
|             progress = self.services.get_grading_progress(assessment) |  | ||||||
|             scores, exercises = self.services.calculate_student_scores(assessment) |  | ||||||
|             stats = self.services.get_statistics(assessment) |  | ||||||
|              |  | ||||||
|             return { |  | ||||||
|                 'assessment': assessment, |  | ||||||
|                 'progress': progress.__dict__, |  | ||||||
|                 'scores': scores, |  | ||||||
|                 'statistics': stats.__dict__ |  | ||||||
|             } |  | ||||||
|         else: |  | ||||||
|             # Ancienne implémentation (fallback) |  | ||||||
|             progress = assessment.grading_progress |  | ||||||
|             scores, exercises = assessment.calculate_student_scores() |  | ||||||
|             stats = assessment.get_assessment_statistics() |  | ||||||
|              |  | ||||||
|             return { |  | ||||||
|                 'assessment': assessment, |  | ||||||
|                 'progress': progress, |  | ||||||
|                 'scores': scores, |  | ||||||
|                 'statistics': stats |  | ||||||
|             } |  | ||||||
|      |  | ||||||
|     def _get_feature_flag(self, flag_name: str) -> bool: |  | ||||||
|         """Récupère un feature flag depuis la configuration.""" |  | ||||||
|         # Exemple d'implémentation |  | ||||||
|         import os |  | ||||||
|         return os.environ.get(flag_name, 'false').lower() == 'true' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== TESTS AVEC LA NOUVELLE ARCHITECTURE =================== |  | ||||||
|  |  | ||||||
| class TestableRoute: |  | ||||||
|     """Exemple montrant la testabilité améliorée.""" |  | ||||||
|      |  | ||||||
|     def __init__(self, services_facade): |  | ||||||
|         self.services = services_facade |  | ||||||
|      |  | ||||||
|     def get_assessment_summary(self, assessment_id: int): |  | ||||||
|         """Méthode facilement testable avec mocks.""" |  | ||||||
|         from models_refactored import Assessment |  | ||||||
|          |  | ||||||
|         assessment = Assessment.query.get_or_404(assessment_id) |  | ||||||
|         progress = self.services.get_grading_progress(assessment) |  | ||||||
|          |  | ||||||
|         return { |  | ||||||
|             'title': assessment.title, |  | ||||||
|             'progress_percentage': progress.percentage, |  | ||||||
|             'status': progress.status |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def test_assessment_summary(): |  | ||||||
|     """Test unitaire simple grâce à l'injection de dépendances.""" |  | ||||||
|     from unittest.mock import Mock |  | ||||||
|     from services.assessment_services import ProgressResult |  | ||||||
|      |  | ||||||
|     # Création des mocks |  | ||||||
|     mock_services = Mock() |  | ||||||
|     mock_services.get_grading_progress.return_value = ProgressResult( |  | ||||||
|         percentage=75, |  | ||||||
|         completed=15, |  | ||||||
|         total=20, |  | ||||||
|         status='in_progress', |  | ||||||
|         students_count=25 |  | ||||||
|     ) |  | ||||||
|      |  | ||||||
|     # Test de la route avec mock injecté |  | ||||||
|     route = TestableRoute(mock_services) |  | ||||||
|      |  | ||||||
|     # Mock de l'assessment |  | ||||||
|     mock_assessment = Mock() |  | ||||||
|     mock_assessment.title = 'Test Assessment' |  | ||||||
|      |  | ||||||
|     # Simulation du test (en vrai on moquerait aussi la DB) |  | ||||||
|     with patch('models_refactored.Assessment') as mock_model: |  | ||||||
|         mock_model.query.get_or_404.return_value = mock_assessment |  | ||||||
|          |  | ||||||
|         result = route.get_assessment_summary(1) |  | ||||||
|          |  | ||||||
|         assert result['title'] == 'Test Assessment' |  | ||||||
|         assert result['progress_percentage'] == 75 |  | ||||||
|         assert result['status'] == 'in_progress' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== EXTENSIBILITÉ : Nouveaux types de notation =================== |  | ||||||
|  |  | ||||||
| class CustomGradingStrategy: |  | ||||||
|     """Exemple d'extension pour un nouveau type de notation.""" |  | ||||||
|      |  | ||||||
|     def calculate_score(self, grade_value: str, max_points: float) -> float: |  | ||||||
|         """Logique personnalisée (ex: notation par lettres A,B,C,D).""" |  | ||||||
|         letter_to_score = { |  | ||||||
|             'A': 1.0, |  | ||||||
|             'B': 0.75, |  | ||||||
|             'C': 0.5, |  | ||||||
|             'D': 0.25, |  | ||||||
|             'F': 0.0 |  | ||||||
|         } |  | ||||||
|          |  | ||||||
|         letter = grade_value.upper() |  | ||||||
|         ratio = letter_to_score.get(letter, 0.0) |  | ||||||
|         return ratio * max_points |  | ||||||
|      |  | ||||||
|     def get_grading_type(self) -> str: |  | ||||||
|         return 'letters' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def register_custom_grading(): |  | ||||||
|     """Exemple d'enregistrement d'un nouveau type de notation.""" |  | ||||||
|     from services.assessment_services import GradingStrategyFactory |  | ||||||
|      |  | ||||||
|     GradingStrategyFactory.register_strategy('letters', CustomGradingStrategy) |  | ||||||
|      |  | ||||||
|     # Maintenant le système peut gérer le type 'letters' automatiquement |  | ||||||
|     strategy = GradingStrategyFactory.create('letters') |  | ||||||
|     score = strategy.calculate_score('B', 20.0)  # = 15.0 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== MONITORING ET MÉTRIQUES =================== |  | ||||||
|  |  | ||||||
| class MonitoredAssessmentService: |  | ||||||
|     """Exemple d'ajout de monitoring sans modifier la logique métier.""" |  | ||||||
|      |  | ||||||
|     def __init__(self, services_facade): |  | ||||||
|         self.services = services_facade |  | ||||||
|         self.metrics_collector = self._init_metrics() |  | ||||||
|      |  | ||||||
|     def get_grading_progress_with_metrics(self, assessment): |  | ||||||
|         """Wrapper avec métriques autour du service.""" |  | ||||||
|         start_time = time.time() |  | ||||||
|          |  | ||||||
|         try: |  | ||||||
|             result = self.services.get_grading_progress(assessment) |  | ||||||
|              |  | ||||||
|             # Métriques de succès |  | ||||||
|             self.metrics_collector.increment('assessment.progress.success') |  | ||||||
|             self.metrics_collector.histogram('assessment.progress.duration',  |  | ||||||
|                                            time.time() - start_time) |  | ||||||
|              |  | ||||||
|             return result |  | ||||||
|              |  | ||||||
|         except Exception as e: |  | ||||||
|             # Métriques d'erreur |  | ||||||
|             self.metrics_collector.increment('assessment.progress.error') |  | ||||||
|             self.metrics_collector.increment(f'assessment.progress.error.{type(e).__name__}') |  | ||||||
|             raise |  | ||||||
|      |  | ||||||
|     def _init_metrics(self): |  | ||||||
|         """Initialisation du collecteur de métriques.""" |  | ||||||
|         # Exemple avec StatsD ou Prometheus |  | ||||||
|         return Mock()  # Placeholder |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =================== RÉSUMÉ DES BÉNÉFICES =================== |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| 🎯 BÉNÉFICES DE LA REFACTORISATION : |  | ||||||
|  |  | ||||||
| 1. **Respect des principes SOLID** : |  | ||||||
|    - Single Responsibility : Chaque service a UNE responsabilité |  | ||||||
|    - Open/Closed : Extensible via Strategy pattern (nouveaux types notation) |  | ||||||
|    - Liskov Substitution : Interfaces respectées |  | ||||||
|    - Interface Segregation : Interfaces spécialisées (ConfigProvider, DatabaseProvider) |  | ||||||
|    - Dependency Inversion : Injection de dépendances, plus d'imports circulaires |  | ||||||
|  |  | ||||||
| 2. **Performance améliorée** : |  | ||||||
|    - Plus de requêtes N+1 (requêtes optimisées dans les providers) |  | ||||||
|    - Possibilité de cache au niveau des services |  | ||||||
|    - Calculs optimisés |  | ||||||
|  |  | ||||||
| 3. **Testabilité** : |  | ||||||
|    - Services mockables indépendamment |  | ||||||
|    - Tests unitaires isolés |  | ||||||
|    - Tests d'intégration facilités |  | ||||||
|  |  | ||||||
| 4. **Maintenabilité** : |  | ||||||
|    - Code plus lisible et organisé |  | ||||||
|    - Responsabilités clairement séparées |  | ||||||
|    - Evolution facilitée |  | ||||||
|  |  | ||||||
| 5. **Extensibilité** : |  | ||||||
|    - Nouveaux types de notation via Strategy pattern |  | ||||||
|    - Nouveaux providers pour différents backends |  | ||||||
|    - Monitoring et logging ajoutables facilement |  | ||||||
|  |  | ||||||
| 6. **Sécurité** : |  | ||||||
|    - Plus d'imports circulaires (réduction surface d'attaque) |  | ||||||
|    - Validation centralisée dans les services |  | ||||||
|    - Meilleur contrôle des dépendances |  | ||||||
| """ |  | ||||||
| @@ -1,556 +0,0 @@ | |||||||
| #!/usr/bin/env python3 |  | ||||||
| """ |  | ||||||
| Script de Finalisation Migration Progressive (JOUR 7 - Étape 4.1) |  | ||||||
|  |  | ||||||
| Ce script active définitivement tous les nouveaux services et finalise  |  | ||||||
| la migration selon le plan MIGRATION_PROGRESSIVE.md |  | ||||||
|  |  | ||||||
| Fonctionnalités: |  | ||||||
| - Activation de tous les feature flags de migration |  | ||||||
| - Validation du système en mode production |  | ||||||
| - Tests complets de non-régression |  | ||||||
| - Benchmark final de performance |  | ||||||
| - Rapport de finalisation |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import time |  | ||||||
| import logging |  | ||||||
| from datetime import datetime |  | ||||||
| from pathlib import Path |  | ||||||
|  |  | ||||||
| # Configuration du logging pour le script de finalisation |  | ||||||
| logging.basicConfig( |  | ||||||
|     level=logging.INFO, |  | ||||||
|     format='%(asctime)s - %(levelname)s - %(message)s', |  | ||||||
|     handlers=[ |  | ||||||
|         logging.StreamHandler(sys.stdout), |  | ||||||
|         logging.FileHandler('logs/migration_finalization.log', mode='w') |  | ||||||
|     ] |  | ||||||
| ) |  | ||||||
| logger = logging.getLogger(__name__) |  | ||||||
|  |  | ||||||
| def setup_flask_context(): |  | ||||||
|     """Configure le contexte Flask pour les tests finaux.""" |  | ||||||
|     # Ajouter le répertoire racine au PYTHONPATH |  | ||||||
|     project_root = Path(__file__).parent |  | ||||||
|     if str(project_root) not in sys.path: |  | ||||||
|         sys.path.insert(0, str(project_root)) |  | ||||||
|      |  | ||||||
|     # Importer et configurer Flask |  | ||||||
|     from app import create_app |  | ||||||
|     app = create_app() |  | ||||||
|     ctx = app.app_context() |  | ||||||
|     ctx.push() |  | ||||||
|     return app, ctx |  | ||||||
|  |  | ||||||
| def activate_all_migration_features(): |  | ||||||
|     """ |  | ||||||
|     ÉTAPE 4.1: Active définitivement tous les feature flags de migration. |  | ||||||
|     """ |  | ||||||
|     logger.info("=== ÉTAPE 4.1: ACTIVATION DÉFINITIVE DES FEATURE FLAGS ===") |  | ||||||
|      |  | ||||||
|     from config.feature_flags import feature_flags, FeatureFlag |  | ||||||
|      |  | ||||||
|     # Liste des feature flags de migration à activer définitivement |  | ||||||
|     migration_flags = [ |  | ||||||
|         FeatureFlag.USE_STRATEGY_PATTERN, |  | ||||||
|         FeatureFlag.USE_REFACTORED_ASSESSMENT, |  | ||||||
|         FeatureFlag.USE_NEW_STUDENT_SCORE_CALCULATOR, |  | ||||||
|         FeatureFlag.USE_NEW_ASSESSMENT_STATISTICS_SERVICE, |  | ||||||
|     ] |  | ||||||
|      |  | ||||||
|     logger.info(f"Activation de {len(migration_flags)} feature flags de migration...") |  | ||||||
|      |  | ||||||
|     activation_results = {} |  | ||||||
|     for flag in migration_flags: |  | ||||||
|         success = feature_flags.enable(flag, reason="Finalisation migration JOUR 7 - Production ready") |  | ||||||
|         activation_results[flag.value] = success |  | ||||||
|          |  | ||||||
|         if success: |  | ||||||
|             logger.info(f"✅ {flag.value} activé avec succès") |  | ||||||
|         else: |  | ||||||
|             logger.error(f"❌ Erreur activation {flag.value}") |  | ||||||
|      |  | ||||||
|     # Vérifier que tous les flags sont bien actifs |  | ||||||
|     logger.info("\n=== VÉRIFICATION ACTIVATION ===") |  | ||||||
|     all_active = True |  | ||||||
|     for flag in migration_flags: |  | ||||||
|         is_active = feature_flags.is_enabled(flag) |  | ||||||
|         status = "✅ ACTIF" if is_active else "❌ INACTIF"  |  | ||||||
|         logger.info(f"{flag.value}: {status}") |  | ||||||
|          |  | ||||||
|         if not is_active: |  | ||||||
|             all_active = False |  | ||||||
|      |  | ||||||
|     # Résumé de l'état des feature flags |  | ||||||
|     status_summary = feature_flags.get_status_summary() |  | ||||||
|     logger.info(f"\n=== RÉSUMÉ FEATURE FLAGS ===") |  | ||||||
|     logger.info(f"Total flags actifs: {status_summary['total_enabled']}") |  | ||||||
|     logger.info(f"Migration Jour 3 prête: {status_summary['migration_status']['day_3_ready']}") |  | ||||||
|     logger.info(f"Migration Jour 4 prête: {status_summary['migration_status']['day_4_ready']}") |  | ||||||
|     logger.info(f"Migration Jour 5 prête: {status_summary['migration_status']['day_5_ready']}") |  | ||||||
|     logger.info(f"Migration Jour 6 prête: {status_summary['migration_status']['day_6_ready']}") |  | ||||||
|      |  | ||||||
|     if not all_active: |  | ||||||
|         raise RuntimeError("Certains feature flags n'ont pas pu être activés !") |  | ||||||
|      |  | ||||||
|     logger.info("✅ Tous les feature flags de migration sont maintenant ACTIFS") |  | ||||||
|     return activation_results |  | ||||||
|  |  | ||||||
| def validate_system_in_production_mode(): |  | ||||||
|     """ |  | ||||||
|     ÉTAPE 4.1: Validation complète du système avec tous les nouveaux services actifs. |  | ||||||
|     """ |  | ||||||
|     logger.info("\n=== VALIDATION SYSTÈME EN MODE PRODUCTION ===") |  | ||||||
|      |  | ||||||
|     from models import Assessment, ClassGroup, Student |  | ||||||
|     from services.assessment_services import ( |  | ||||||
|         AssessmentProgressService,  |  | ||||||
|         StudentScoreCalculator, |  | ||||||
|         AssessmentStatisticsService, |  | ||||||
|         UnifiedGradingCalculator |  | ||||||
|     ) |  | ||||||
|     from providers.concrete_providers import ( |  | ||||||
|         ConfigManagerProvider, |  | ||||||
|         SQLAlchemyDatabaseProvider |  | ||||||
|     ) |  | ||||||
|      |  | ||||||
|     # Vérifier qu'on a des données de test |  | ||||||
|     assessments = Assessment.query.limit(3).all() |  | ||||||
|     if not assessments: |  | ||||||
|         logger.warning("⚠️  Aucune évaluation trouvée pour les tests") |  | ||||||
|         return False |  | ||||||
|      |  | ||||||
|     logger.info(f"Tests avec {len(assessments)} évaluations...") |  | ||||||
|      |  | ||||||
|     # Test 1: AssessmentProgressService |  | ||||||
|     logger.info("Test 1: AssessmentProgressService...") |  | ||||||
|     try: |  | ||||||
|         service = AssessmentProgressService(SQLAlchemyDatabaseProvider()) |  | ||||||
|         for assessment in assessments: |  | ||||||
|             progress = service.calculate_grading_progress(assessment) |  | ||||||
|             logger.info(f"  Évaluation {assessment.id}: {progress.percentage}% complété") |  | ||||||
|         logger.info("✅ AssessmentProgressService OK") |  | ||||||
|     except Exception as e: |  | ||||||
|         logger.error(f"❌ AssessmentProgressService ERREUR: {str(e)}") |  | ||||||
|         return False |  | ||||||
|      |  | ||||||
|     # Test 2: StudentScoreCalculator  |  | ||||||
|     logger.info("Test 2: StudentScoreCalculator...") |  | ||||||
|     try: |  | ||||||
|         config_provider = ConfigManagerProvider() |  | ||||||
|         db_provider = SQLAlchemyDatabaseProvider() |  | ||||||
|         calculator = UnifiedGradingCalculator(config_provider) |  | ||||||
|         service = StudentScoreCalculator(calculator, db_provider) |  | ||||||
|          |  | ||||||
|         for assessment in assessments: |  | ||||||
|             scores = service.calculate_student_scores(assessment) |  | ||||||
|             logger.info(f"  Évaluation {assessment.id}: {len(scores)} scores calculés") |  | ||||||
|         logger.info("✅ StudentScoreCalculator OK") |  | ||||||
|     except Exception as e: |  | ||||||
|         logger.error(f"❌ StudentScoreCalculator ERREUR: {str(e)}") |  | ||||||
|         return False |  | ||||||
|      |  | ||||||
|     # Test 3: AssessmentStatisticsService |  | ||||||
|     logger.info("Test 3: AssessmentStatisticsService...") |  | ||||||
|     try: |  | ||||||
|         score_calculator = StudentScoreCalculator(calculator, db_provider) |  | ||||||
|         service = AssessmentStatisticsService(score_calculator) |  | ||||||
|          |  | ||||||
|         for assessment in assessments: |  | ||||||
|             stats = service.get_assessment_statistics(assessment) |  | ||||||
|             logger.info(f"  Évaluation {assessment.id}: moyenne {stats.mean if hasattr(stats, 'mean') else 'N/A'}") |  | ||||||
|         logger.info("✅ AssessmentStatisticsService OK") |  | ||||||
|     except Exception as e: |  | ||||||
|         logger.error(f"❌ AssessmentStatisticsService ERREUR: {str(e)}") |  | ||||||
|         return False |  | ||||||
|      |  | ||||||
|     # Test 4: Pattern Strategy via UnifiedGradingCalculator |  | ||||||
|     logger.info("Test 4: Pattern Strategy...") |  | ||||||
|     try: |  | ||||||
|         calculator = UnifiedGradingCalculator(config_provider) |  | ||||||
|          |  | ||||||
|         # Test différents types de notation |  | ||||||
|         test_cases = [ |  | ||||||
|             ("15.5", "notes", 20.0), |  | ||||||
|             ("2", "score", 3.0), |  | ||||||
|             (".", "notes", 20.0), |  | ||||||
|             ("d", "score", 3.0) |  | ||||||
|         ] |  | ||||||
|          |  | ||||||
|         for grade_value, grading_type, max_points in test_cases: |  | ||||||
|             score = calculator.calculate_score(grade_value, grading_type, max_points) |  | ||||||
|             logger.info(f"  {grade_value} ({grading_type}/{max_points}) -> {score}") |  | ||||||
|          |  | ||||||
|         logger.info("✅ Pattern Strategy OK") |  | ||||||
|     except Exception as e: |  | ||||||
|         logger.error(f"❌ Pattern Strategy ERREUR: {str(e)}") |  | ||||||
|         return False |  | ||||||
|      |  | ||||||
|     logger.info("✅ VALIDATION SYSTÈME COMPLÈTE - SUCCÈS") |  | ||||||
|     return True |  | ||||||
|  |  | ||||||
| def run_comprehensive_tests(): |  | ||||||
|     """ |  | ||||||
|     ÉTAPE 4.2: Exécute tous les tests pour s'assurer qu'aucune régression n'a été introduite. |  | ||||||
|     """ |  | ||||||
|     logger.info("\n=== ÉTAPE 4.2: TESTS FINAUX COMPLETS ===") |  | ||||||
|      |  | ||||||
|     import subprocess |  | ||||||
|      |  | ||||||
|     # 1. Tests unitaires standards |  | ||||||
|     logger.info("Exécution des tests unitaires...") |  | ||||||
|     result = subprocess.run([ |  | ||||||
|         sys.executable, "-m", "pytest",  |  | ||||||
|         "tests/", "-v", "--tb=short", "--disable-warnings" |  | ||||||
|     ], capture_output=True, text=True) |  | ||||||
|      |  | ||||||
|     if result.returncode != 0: |  | ||||||
|         logger.error("❌ Tests unitaires ÉCHOUÉS:") |  | ||||||
|         logger.error(result.stdout) |  | ||||||
|         logger.error(result.stderr) |  | ||||||
|         return False |  | ||||||
|     else: |  | ||||||
|         logger.info("✅ Tests unitaires RÉUSSIS") |  | ||||||
|         # Extraire le nombre de tests qui passent |  | ||||||
|         output_lines = result.stdout.split('\n') |  | ||||||
|         for line in output_lines: |  | ||||||
|             if "passed" in line and ("failed" in line or "error" in line or "test session starts" not in line): |  | ||||||
|                 logger.info(f"   {line.strip()}") |  | ||||||
|                 break |  | ||||||
|      |  | ||||||
|     # 2. Tests spécifiques de migration |  | ||||||
|     logger.info("\nExécution des tests de migration...") |  | ||||||
|     migration_test_files = [ |  | ||||||
|         "tests/test_feature_flags.py", |  | ||||||
|         "tests/test_pattern_strategy_migration.py",  |  | ||||||
|         "tests/test_assessment_progress_migration.py", |  | ||||||
|         "tests/test_student_score_calculator_migration.py", |  | ||||||
|         "tests/test_assessment_statistics_migration.py" |  | ||||||
|     ] |  | ||||||
|      |  | ||||||
|     for test_file in migration_test_files: |  | ||||||
|         if os.path.exists(test_file): |  | ||||||
|             logger.info(f"  Tests {os.path.basename(test_file)}...") |  | ||||||
|             result = subprocess.run([ |  | ||||||
|                 sys.executable, "-m", "pytest",  |  | ||||||
|                 test_file, "-v", "--tb=short", "--disable-warnings" |  | ||||||
|             ], capture_output=True, text=True) |  | ||||||
|              |  | ||||||
|             if result.returncode != 0: |  | ||||||
|                 logger.error(f"❌ {test_file} ÉCHOUÉ") |  | ||||||
|                 logger.error(result.stdout[-500:])  # Dernières 500 chars |  | ||||||
|                 return False |  | ||||||
|             else: |  | ||||||
|                 logger.info(f"✅ {os.path.basename(test_file)} OK") |  | ||||||
|      |  | ||||||
|     logger.info("✅ TOUS LES TESTS FINAUX RÉUSSIS") |  | ||||||
|     return True |  | ||||||
|  |  | ||||||
| def benchmark_final_performance(): |  | ||||||
|     """ |  | ||||||
|     ÉTAPE 4.2: Benchmark final des performances vs baseline initiale. |  | ||||||
|     """ |  | ||||||
|     logger.info("\n=== ÉTAPE 4.2: BENCHMARK FINAL DE PERFORMANCE ===") |  | ||||||
|      |  | ||||||
|     try: |  | ||||||
|         # Utiliser le script de benchmark existant s'il existe |  | ||||||
|         if os.path.exists("benchmark_final_migration.py"): |  | ||||||
|             logger.info("Exécution du benchmark final...") |  | ||||||
|             import subprocess |  | ||||||
|             result = subprocess.run([ |  | ||||||
|                 sys.executable, "benchmark_final_migration.py" |  | ||||||
|             ], capture_output=True, text=True) |  | ||||||
|              |  | ||||||
|             if result.returncode == 0: |  | ||||||
|                 logger.info("✅ Benchmark final exécuté avec succès:") |  | ||||||
|                 logger.info(result.stdout) |  | ||||||
|             else: |  | ||||||
|                 logger.error("❌ Erreur benchmark final:") |  | ||||||
|                 logger.error(result.stderr) |  | ||||||
|                 return False |  | ||||||
|         else: |  | ||||||
|             # Benchmark simple intégré |  | ||||||
|             logger.info("Benchmark intégré simple...") |  | ||||||
|              |  | ||||||
|             from models import Assessment |  | ||||||
|             assessments = Assessment.query.limit(5).all() |  | ||||||
|              |  | ||||||
|             if not assessments: |  | ||||||
|                 logger.warning("⚠️  Pas d'évaluations pour le benchmark") |  | ||||||
|                 return True |  | ||||||
|              |  | ||||||
|             # Test de performance sur le calcul de progression |  | ||||||
|             start_time = time.time() |  | ||||||
|             for assessment in assessments: |  | ||||||
|                 _ = assessment.grading_progress |  | ||||||
|             progression_time = time.time() - start_time |  | ||||||
|              |  | ||||||
|             # Test de performance sur le calcul de scores |  | ||||||
|             start_time = time.time()   |  | ||||||
|             for assessment in assessments: |  | ||||||
|                 _ = assessment.calculate_student_scores() |  | ||||||
|             scores_time = time.time() - start_time |  | ||||||
|              |  | ||||||
|             # Test de performance sur les statistiques |  | ||||||
|             start_time = time.time() |  | ||||||
|             for assessment in assessments: |  | ||||||
|                 _ = assessment.get_assessment_statistics() |  | ||||||
|             stats_time = time.time() - start_time |  | ||||||
|              |  | ||||||
|             logger.info(f"Performance avec nouveaux services (5 évaluations):") |  | ||||||
|             logger.info(f"  - Calcul progression: {progression_time:.3f}s") |  | ||||||
|             logger.info(f"  - Calcul scores: {scores_time:.3f}s")  |  | ||||||
|             logger.info(f"  - Calcul statistiques: {stats_time:.3f}s") |  | ||||||
|             logger.info(f"  - Total: {progression_time + scores_time + stats_time:.3f}s") |  | ||||||
|          |  | ||||||
|         logger.info("✅ BENCHMARK FINAL TERMINÉ") |  | ||||||
|         return True |  | ||||||
|          |  | ||||||
|     except Exception as e: |  | ||||||
|         logger.error(f"❌ Erreur benchmark final: {str(e)}") |  | ||||||
|         return False |  | ||||||
|  |  | ||||||
| def generate_migration_final_report(): |  | ||||||
|     """ |  | ||||||
|     Génère le rapport final de migration avec toutes les métriques. |  | ||||||
|     """ |  | ||||||
|     logger.info("\n=== GÉNÉRATION RAPPORT FINAL DE MIGRATION ===") |  | ||||||
|      |  | ||||||
|     from config.feature_flags import feature_flags |  | ||||||
|      |  | ||||||
|     report_content = f""" |  | ||||||
| # 🎯 RAPPORT FINAL - MIGRATION PROGRESSIVE NOTYTEX |  | ||||||
| ## JOUR 7 - Finalisation Complète |  | ||||||
|  |  | ||||||
| **Date de finalisation:** {datetime.now().strftime('%d/%m/%Y à %H:%M:%S')} |  | ||||||
| **Version:** Architecture Refactorisée - Phase 2 |  | ||||||
| **État:** MIGRATION TERMINÉE AVEC SUCCÈS ✅ |  | ||||||
|  |  | ||||||
| --- |  | ||||||
|  |  | ||||||
| ## 📊 RÉSUMÉ EXÉCUTIF |  | ||||||
|  |  | ||||||
| ### ✅ OBJECTIFS ATTEINTS |  | ||||||
| - **Architecture refactorisée** : Modèle Assessment découplé en 4 services spécialisés |  | ||||||
| - **Pattern Strategy** : Système de notation extensible sans modification de code |  | ||||||
| - **Injection de dépendances** : Élimination des imports circulaires |  | ||||||
| - **Performance optimisée** : Requêtes N+1 éliminées |  | ||||||
| - **Feature flags** : Migration progressive sécurisée avec rollback possible |  | ||||||
| - **Tests complets** : 214+ tests passants, aucune régression |  | ||||||
|  |  | ||||||
| ### 🎯 MÉTRIQUES CLÉS |  | ||||||
| | Métrique | Avant | Après | Amélioration | |  | ||||||
| |----------|-------|-------|--------------| |  | ||||||
| | Taille modèle Assessment | 267 lignes | 80 lignes | -70% | |  | ||||||
| | Responsabilités par classe | 4 | 1 | Respect SRP | |  | ||||||
| | Imports circulaires | 3 | 0 | 100% éliminés | |  | ||||||
| | Services découplés | 0 | 4 | Architecture moderne | |  | ||||||
| | Tests passants | Variable | 214+ | Stabilité garantie | |  | ||||||
|  |  | ||||||
| --- |  | ||||||
|  |  | ||||||
| ## 🏗️ ARCHITECTURE FINALE |  | ||||||
|  |  | ||||||
| ### Services Créés (560+ lignes nouvelles) |  | ||||||
| 1. **AssessmentProgressService** - Calcul de progression isolé et optimisé |  | ||||||
| 2. **StudentScoreCalculator** - Calculs de scores avec requêtes optimisées |  | ||||||
| 3. **AssessmentStatisticsService** - Analyses statistiques découplées |  | ||||||
| 4. **UnifiedGradingCalculator** - Logique de notation centralisée avec Pattern Strategy |  | ||||||
|  |  | ||||||
| ### Pattern Strategy Opérationnel |  | ||||||
| - **GradingStrategy** interface extensible |  | ||||||
| - **NotesStrategy** et **ScoreStrategy** implémentées |  | ||||||
| - **GradingStrategyFactory** pour gestion des types |  | ||||||
| - Nouveaux types de notation ajoutables sans modification de code existant |  | ||||||
|  |  | ||||||
| ### Injection de Dépendances |  | ||||||
| - **ConfigProvider** et **DatabaseProvider** (interfaces) |  | ||||||
| - **ConfigManagerProvider** et **SQLAlchemyDatabaseProvider** (implémentations) |  | ||||||
| - Elimination complète des imports circulaires |  | ||||||
| - Tests unitaires 100% mockables |  | ||||||
|  |  | ||||||
| --- |  | ||||||
|  |  | ||||||
| ## 🚀 FEATURE FLAGS - ÉTAT FINAL |  | ||||||
|  |  | ||||||
| {_get_feature_flags_summary()} |  | ||||||
|  |  | ||||||
| --- |  | ||||||
|  |  | ||||||
| ## ⚡ OPTIMISATIONS PERFORMANCE |  | ||||||
|  |  | ||||||
| ### Élimination Problèmes N+1 |  | ||||||
| - **Avant** : 1 requête + N requêtes par élève/exercice |  | ||||||
| - **Après** : Requêtes optimisées avec joinedload et batch loading |  | ||||||
| - **Résultat** : Performance linéaire au lieu de quadratique |  | ||||||
|  |  | ||||||
| ### Calculs Optimisés   |  | ||||||
| - Progression : Cache des requêtes fréquentes |  | ||||||
| - Scores : Calcul en batch pour tous les élèves |  | ||||||
| - Statistiques : Agrégations SQL au lieu de calculs Python |  | ||||||
|  |  | ||||||
| --- |  | ||||||
|  |  | ||||||
| ## 🧪 VALIDATION FINALE |  | ||||||
|  |  | ||||||
| ### Tests de Non-Régression |  | ||||||
| - ✅ Tous les tests existants passent |  | ||||||
| - ✅ Tests spécifiques de migration passent |  | ||||||
| - ✅ Validation des calculs identiques (ancien vs nouveau) |  | ||||||
| - ✅ Performance égale ou améliorée |  | ||||||
|  |  | ||||||
| ### Validation Système Production |  | ||||||
| - ✅ Tous les services fonctionnels avec feature flags actifs |  | ||||||
| - ✅ Pattern Strategy opérationnel sur tous types de notation |  | ||||||
| - ✅ Injection de dépendances sans imports circulaires |  | ||||||
| - ✅ Interface utilisateur inchangée (transparence utilisateur) |  | ||||||
|  |  | ||||||
| --- |  | ||||||
|  |  | ||||||
| ## 🎓 FORMATION & MAINTENANCE |  | ||||||
|  |  | ||||||
| ### Nouveaux Patterns Disponibles |  | ||||||
| - **Comment ajouter un type de notation** : Créer nouvelle GradingStrategy |  | ||||||
| - **Comment modifier la logique de progression** : AssessmentProgressService |  | ||||||
| - **Comment optimiser une requête** : DatabaseProvider avec eager loading |  | ||||||
|  |  | ||||||
| ### Code Legacy |  | ||||||
| - **Méthodes legacy** : Conservées temporairement pour sécurité |  | ||||||
| - **Feature flags** : Permettent rollback instantané si nécessaire |  | ||||||
| - **Documentation** : Migration guide complet fourni |  | ||||||
|  |  | ||||||
| --- |  | ||||||
|  |  | ||||||
| ## 📋 PROCHAINES ÉTAPES RECOMMANDÉES |  | ||||||
|  |  | ||||||
| ### Phase 2 (Optionnelle - 2-4 semaines) |  | ||||||
| 1. **Nettoyage code legacy** une fois stabilisé en production (1-2 semaines) |  | ||||||
| 2. **Suppression feature flags** devenus permanents |  | ||||||
| 3. **Optimisations supplémentaires** : Cache Redis, pagination |  | ||||||
| 4. **Interface API REST** pour intégrations externes |  | ||||||
|  |  | ||||||
| ### Maintenance Continue   |  | ||||||
| 1. **Monitoring** : Surveiller performance en production |  | ||||||
| 2. **Tests** : Maintenir couverture >90% |  | ||||||
| 3. **Formation équipe** : Sessions sur nouvelle architecture |  | ||||||
| 4. **Documentation** : Tenir à jour selon évolutions |  | ||||||
|  |  | ||||||
| --- |  | ||||||
|  |  | ||||||
| ## 🎯 CONCLUSION |  | ||||||
|  |  | ||||||
| La migration progressive de l'architecture Notytex est **TERMINÉE AVEC SUCCÈS**. |  | ||||||
|  |  | ||||||
| L'application bénéficie maintenant : |  | ||||||
| - D'une **architecture moderne** respectant les principes SOLID |  | ||||||
| - De **performances optimisées** avec élimination des anti-patterns |  | ||||||
| - D'une **extensibilité facilitée** pour les futures évolutions |  | ||||||
| - D'une **stabilité garantie** par 214+ tests passants |  | ||||||
| - D'un **système de rollback** pour sécurité maximale |  | ||||||
|  |  | ||||||
| **L'équipe dispose désormais d'une base technique solide pour les développements futurs.** 🚀 |  | ||||||
|  |  | ||||||
| --- |  | ||||||
|  |  | ||||||
| *Rapport généré automatiquement le {datetime.now().strftime('%d/%m/%Y à %H:%M:%S')} par le script de finalisation de migration.* |  | ||||||
| """ |  | ||||||
|      |  | ||||||
|     # Écrire le rapport final |  | ||||||
|     report_path = "MIGRATION_FINAL_REPORT.md" |  | ||||||
|     with open(report_path, 'w', encoding='utf-8') as f: |  | ||||||
|         f.write(report_content) |  | ||||||
|      |  | ||||||
|     logger.info(f"✅ Rapport final généré: {report_path}") |  | ||||||
|     return report_path |  | ||||||
|  |  | ||||||
| def _get_feature_flags_summary(): |  | ||||||
|     """Génère le résumé des feature flags pour le rapport.""" |  | ||||||
|     from config.feature_flags import feature_flags |  | ||||||
|      |  | ||||||
|     status_summary = feature_flags.get_status_summary() |  | ||||||
|      |  | ||||||
|     summary = "| Feature Flag | État | Description |\n" |  | ||||||
|     summary += "|--------------|------|-------------|\n" |  | ||||||
|      |  | ||||||
|     for flag_name, config in status_summary['flags'].items(): |  | ||||||
|         status = "✅ ACTIF" if config['enabled'] else "❌ INACTIF" |  | ||||||
|         summary += f"| {flag_name} | {status} | {config['description']} |\n" |  | ||||||
|      |  | ||||||
|     summary += f"\n**Total actifs:** {status_summary['total_enabled']} feature flags\n" |  | ||||||
|     summary += f"**Dernière mise à jour:** {status_summary['last_updated']}\n" |  | ||||||
|      |  | ||||||
|     return summary |  | ||||||
|  |  | ||||||
| def main(): |  | ||||||
|     """ |  | ||||||
|     Fonction principale de finalisation de migration. |  | ||||||
|     """ |  | ||||||
|     logger.info("🚀 DÉBUT FINALISATION MIGRATION PROGRESSIVE - JOUR 7") |  | ||||||
|     logger.info("=" * 60) |  | ||||||
|      |  | ||||||
|     try: |  | ||||||
|         # Configuration Flask |  | ||||||
|         app, ctx = setup_flask_context() |  | ||||||
|         logger.info("✅ Contexte Flask configuré") |  | ||||||
|          |  | ||||||
|         # Étape 4.1: Activation définitive des feature flags |  | ||||||
|         activation_results = activate_all_migration_features() |  | ||||||
|         logger.info("✅ ÉTAPE 4.1 TERMINÉE - Feature flags activés") |  | ||||||
|          |  | ||||||
|         # Validation système en mode production |  | ||||||
|         system_valid = validate_system_in_production_mode() |  | ||||||
|         if not system_valid: |  | ||||||
|             raise RuntimeError("Validation système échouée") |  | ||||||
|         logger.info("✅ Système validé en mode production") |  | ||||||
|          |  | ||||||
|         # Étape 4.2: Tests finaux complets |  | ||||||
|         tests_passed = run_comprehensive_tests() |  | ||||||
|         if not tests_passed: |  | ||||||
|             raise RuntimeError("Tests finaux échoués") |  | ||||||
|         logger.info("✅ ÉTAPE 4.2 TERMINÉE - Tests finaux réussis") |  | ||||||
|          |  | ||||||
|         # Benchmark final |  | ||||||
|         benchmark_success = benchmark_final_performance() |  | ||||||
|         if not benchmark_success: |  | ||||||
|             logger.warning("⚠️  Benchmark final incomplet mais non bloquant") |  | ||||||
|         else: |  | ||||||
|             logger.info("✅ Benchmark final terminé") |  | ||||||
|          |  | ||||||
|         # Génération rapport final |  | ||||||
|         report_path = generate_migration_final_report() |  | ||||||
|         logger.info(f"✅ Rapport final généré: {report_path}") |  | ||||||
|          |  | ||||||
|         # Nettoyage contexte |  | ||||||
|         ctx.pop() |  | ||||||
|          |  | ||||||
|         logger.info("=" * 60) |  | ||||||
|         logger.info("🎉 MIGRATION PROGRESSIVE TERMINÉE AVEC SUCCÈS !") |  | ||||||
|         logger.info("=" * 60) |  | ||||||
|         logger.info("📋 Actions recommandées:") |  | ||||||
|         logger.info("   1. Vérifier le rapport final: MIGRATION_FINAL_REPORT.md") |  | ||||||
|         logger.info("   2. Déployer en production avec feature flags actifs") |  | ||||||
|         logger.info("   3. Surveiller les performances pendant 1-2 semaines") |  | ||||||
|         logger.info("   4. Nettoyer le code legacy si tout fonctionne bien") |  | ||||||
|         logger.info("   5. Former l'équipe sur la nouvelle architecture") |  | ||||||
|          |  | ||||||
|         return True |  | ||||||
|          |  | ||||||
|     except Exception as e: |  | ||||||
|         logger.error(f"❌ ERREUR FATALE DURANT FINALISATION: {str(e)}") |  | ||||||
|         logger.exception("Détails de l'erreur:") |  | ||||||
|          |  | ||||||
|         logger.error("=" * 60) |  | ||||||
|         logger.error("🚨 PROCÉDURE DE ROLLBACK RECOMMANDÉE:") |  | ||||||
|         logger.error("   1. Désactiver tous les feature flags:") |  | ||||||
|         logger.error("      python -c \"from config.feature_flags import feature_flags, FeatureFlag; [feature_flags.disable(f) for f in FeatureFlag]\"") |  | ||||||
|         logger.error("   2. Vérifier que l'application fonctionne avec l'ancien code") |  | ||||||
|         logger.error("   3. Analyser l'erreur et corriger avant de réessayer") |  | ||||||
|          |  | ||||||
|         return False |  | ||||||
|  |  | ||||||
| if __name__ == "__main__": |  | ||||||
|     success = main() |  | ||||||
|     sys.exit(0 if success else 1) |  | ||||||
| @@ -1,53 +0,0 @@ | |||||||
| 🏆 RAPPORT FINAL DE MIGRATION - JOUR 7 |  | ||||||
| ================================================================================ |  | ||||||
| Date: 2025-08-07 09:24:09 |  | ||||||
| Services testés: 4 |  | ||||||
|  |  | ||||||
| 📈 RÉSUMÉ EXÉCUTIF: |  | ||||||
|    Amélioration moyenne: -6.9% |  | ||||||
|    Meilleure amélioration: -0.9% (StudentScoreCalculator) |  | ||||||
|    Services améliorés: 0/4 |  | ||||||
|  |  | ||||||
| 📊 DÉTAIL PAR SERVICE: |  | ||||||
|  |  | ||||||
| 🔹 AssessmentProgressService |  | ||||||
|    Ancien temps:        1.68ms ± 0.18ms |  | ||||||
|    Nouveau temps:       1.76ms ± 0.30ms |  | ||||||
|    Amélioration:        -4.2% |  | ||||||
|    Itérations:            50 |  | ||||||
|    Accélération:        0.96x |  | ||||||
|  |  | ||||||
| 🔹 StudentScoreCalculator |  | ||||||
|    Ancien temps:        4.33ms ± 0.53ms |  | ||||||
|    Nouveau temps:       4.37ms ± 0.51ms |  | ||||||
|    Amélioration:        -0.9% |  | ||||||
|    Itérations:            30 |  | ||||||
|    Accélération:        0.99x |  | ||||||
|  |  | ||||||
| 🔹 AssessmentStatisticsService |  | ||||||
|    Ancien temps:        4.44ms ± 0.63ms |  | ||||||
|    Nouveau temps:       4.53ms ± 0.82ms |  | ||||||
|    Amélioration:        -2.1% |  | ||||||
|    Itérations:            30 |  | ||||||
|    Accélération:        0.98x |  | ||||||
|  |  | ||||||
| 🔹 UnifiedGradingCalculator |  | ||||||
|    Ancien temps:        0.05ms ± 0.01ms |  | ||||||
|    Nouveau temps:       0.06ms ± 0.03ms |  | ||||||
|    Amélioration:       -20.2% |  | ||||||
|    Itérations:           200 |  | ||||||
|    Accélération:        0.83x |  | ||||||
|  |  | ||||||
| 🔧 ANALYSE TECHNIQUE: |  | ||||||
|  |  | ||||||
| ⚠️ Services avec régression: |  | ||||||
|    • AssessmentProgressService: -4.2% |  | ||||||
|    • StudentScoreCalculator: -0.9% |  | ||||||
|    • AssessmentStatisticsService: -2.1% |  | ||||||
|    • UnifiedGradingCalculator: -20.2% |  | ||||||
|  |  | ||||||
| 🎯 CONCLUSION: |  | ||||||
| ⚠️ Performance globale: -6.9% |  | ||||||
| ⚠️ Analyse des régressions nécessaire |  | ||||||
|  |  | ||||||
| 🚀 Prêt pour la production avec la nouvelle architecture ! |  | ||||||
| @@ -1,267 +0,0 @@ | |||||||
| """ |  | ||||||
| Version refactorisée des modèles après application des principes SOLID. |  | ||||||
|  |  | ||||||
| Cette version montre comment la classe Assessment devient plus simple |  | ||||||
| après extraction des services métier. |  | ||||||
| """ |  | ||||||
| from flask_sqlalchemy import SQLAlchemy |  | ||||||
| from datetime import datetime |  | ||||||
| from sqlalchemy import Index, CheckConstraint, Enum |  | ||||||
| from decimal import Decimal |  | ||||||
| from typing import Optional, Dict, Any |  | ||||||
| from flask import current_app |  | ||||||
|  |  | ||||||
| # Import des services pour délégation |  | ||||||
| from services.assessment_services import ProgressResult, StudentScore, StatisticsResult |  | ||||||
| from providers.concrete_providers import AssessmentServicesFactory |  | ||||||
|  |  | ||||||
| db = SQLAlchemy() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ClassGroup(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     name = db.Column(db.String(100), nullable=False, unique=True) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     year = db.Column(db.String(20), nullable=False) |  | ||||||
|     students = db.relationship('Student', backref='class_group', lazy=True) |  | ||||||
|     assessments = db.relationship('Assessment', backref='class_group', lazy=True) |  | ||||||
|      |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<ClassGroup {self.name}>' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Student(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     last_name = db.Column(db.String(100), nullable=False) |  | ||||||
|     first_name = db.Column(db.String(100), nullable=False) |  | ||||||
|     email = db.Column(db.String(120), unique=True) |  | ||||||
|     class_group_id = db.Column(db.Integer, db.ForeignKey('class_group.id'), nullable=False) |  | ||||||
|     grades = db.relationship('Grade', backref='student', lazy=True) |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Student {self.first_name} {self.last_name}>' |  | ||||||
|      |  | ||||||
|     @property |  | ||||||
|     def full_name(self): |  | ||||||
|         return f"{self.first_name} {self.last_name}" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Assessment(db.Model): |  | ||||||
|     """ |  | ||||||
|     Modèle Assessment refactorisé selon les principes SOLID. |  | ||||||
|      |  | ||||||
|     AVANT: 267 lignes avec 4 responsabilités |  | ||||||
|     APRÈS: ~80 lignes avec 1 responsabilité (modèle de données) |  | ||||||
|      |  | ||||||
|     Les responsabilités métier ont été extraites vers: |  | ||||||
|     - AssessmentProgressService (progression) |  | ||||||
|     - StudentScoreCalculator (scores étudiants)   |  | ||||||
|     - AssessmentStatisticsService (statistiques) |  | ||||||
|     """ |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     title = db.Column(db.String(200), nullable=False) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     date = db.Column(db.Date, nullable=False, default=datetime.utcnow) |  | ||||||
|     trimester = db.Column(db.Integer, nullable=False)  # 1, 2, ou 3 |  | ||||||
|     class_group_id = db.Column(db.Integer, db.ForeignKey('class_group.id'), nullable=False) |  | ||||||
|     coefficient = db.Column(db.Float, default=1.0) |  | ||||||
|     exercises = db.relationship('Exercise', backref='assessment', lazy=True, cascade='all, delete-orphan') |  | ||||||
|  |  | ||||||
|     __table_args__ = ( |  | ||||||
|         CheckConstraint('trimester IN (1, 2, 3)', name='check_trimester_valid'), |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Assessment {self.title}>' |  | ||||||
|      |  | ||||||
|     # =============== DELEGATION VERS LES SERVICES =============== |  | ||||||
|      |  | ||||||
|     @property |  | ||||||
|     def grading_progress(self) -> Dict[str, Any]: |  | ||||||
|         """ |  | ||||||
|         Délègue le calcul de progression au service dédié. |  | ||||||
|         Plus d'import circulaire, pas de logique métier dans le modèle. |  | ||||||
|         """ |  | ||||||
|         services = AssessmentServicesFactory.create_facade() |  | ||||||
|         progress: ProgressResult = services.get_grading_progress(self) |  | ||||||
|          |  | ||||||
|         # Conversion en dict pour compatibilité avec l'API existante |  | ||||||
|         return { |  | ||||||
|             'percentage': progress.percentage, |  | ||||||
|             'completed': progress.completed, |  | ||||||
|             'total': progress.total, |  | ||||||
|             'status': progress.status, |  | ||||||
|             'students_count': progress.students_count |  | ||||||
|         } |  | ||||||
|      |  | ||||||
|     def calculate_student_scores(self): |  | ||||||
|         """ |  | ||||||
|         Délègue le calcul des scores au service dédié. |  | ||||||
|         Plus de requêtes N+1, logique optimisée dans le service. |  | ||||||
|         """ |  | ||||||
|         services = AssessmentServicesFactory.create_facade() |  | ||||||
|         students_scores, exercise_scores = services.calculate_student_scores(self) |  | ||||||
|          |  | ||||||
|         # Conversion pour compatibilité avec l'API existante |  | ||||||
|         converted_students = {} |  | ||||||
|         for student_id, score in students_scores.items(): |  | ||||||
|             converted_students[student_id] = { |  | ||||||
|                 'student': next(s for s in self.class_group.students if s.id == student_id), |  | ||||||
|                 'total_score': score.total_score, |  | ||||||
|                 'total_max_points': score.total_max_points, |  | ||||||
|                 'exercises': score.exercises |  | ||||||
|             } |  | ||||||
|          |  | ||||||
|         return converted_students, exercise_scores |  | ||||||
|      |  | ||||||
|     def get_assessment_statistics(self) -> Dict[str, float]: |  | ||||||
|         """ |  | ||||||
|         Délègue les calculs statistiques au service dédié. |  | ||||||
|         Logique métier externalisée, modèle simplifié. |  | ||||||
|         """ |  | ||||||
|         services = AssessmentServicesFactory.create_facade() |  | ||||||
|         stats: StatisticsResult = services.get_statistics(self) |  | ||||||
|          |  | ||||||
|         # Conversion en dict pour compatibilité |  | ||||||
|         return { |  | ||||||
|             'count': stats.count, |  | ||||||
|             'mean': stats.mean, |  | ||||||
|             'median': stats.median, |  | ||||||
|             'min': stats.min, |  | ||||||
|             'max': stats.max, |  | ||||||
|             'std_dev': stats.std_dev |  | ||||||
|         } |  | ||||||
|      |  | ||||||
|     def get_total_max_points(self) -> float: |  | ||||||
|         """ |  | ||||||
|         Calcule le total des points maximum. |  | ||||||
|         Seule logique métier simple gardée dans le modèle. |  | ||||||
|         """ |  | ||||||
|         total = 0 |  | ||||||
|         for exercise in self.exercises: |  | ||||||
|             for element in exercise.grading_elements: |  | ||||||
|                 total += element.max_points |  | ||||||
|         return total |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Exercise(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     assessment_id = db.Column(db.Integer, db.ForeignKey('assessment.id'), nullable=False) |  | ||||||
|     title = db.Column(db.String(200), nullable=False) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     order = db.Column(db.Integer, default=1) |  | ||||||
|     grading_elements = db.relationship('GradingElement', backref='exercise', lazy=True, cascade='all, delete-orphan') |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Exercise {self.title}>' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class GradingElement(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     exercise_id = db.Column(db.Integer, db.ForeignKey('exercise.id'), nullable=False) |  | ||||||
|     label = db.Column(db.String(200), nullable=False) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     skill = db.Column(db.String(200)) |  | ||||||
|     max_points = db.Column(db.Float, nullable=False) |  | ||||||
|     grading_type = db.Column(Enum('notes', 'score', name='grading_types'), nullable=False, default='notes') |  | ||||||
|     domain_id = db.Column(db.Integer, db.ForeignKey('domains.id'), nullable=True) |  | ||||||
|     grades = db.relationship('Grade', backref='grading_element', lazy=True, cascade='all, delete-orphan') |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<GradingElement {self.label}>' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Grade(db.Model): |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     student_id = db.Column(db.Integer, db.ForeignKey('student.id'), nullable=False) |  | ||||||
|     grading_element_id = db.Column(db.Integer, db.ForeignKey('grading_element.id'), nullable=False) |  | ||||||
|     value = db.Column(db.String(10)) |  | ||||||
|     comment = db.Column(db.Text) |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Grade {self.value} for {self.student.first_name if self.student else "Unknown"}>' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Configuration tables (inchangées) |  | ||||||
| class AppConfig(db.Model): |  | ||||||
|     """Configuration simple de l'application (clé-valeur).""" |  | ||||||
|     __tablename__ = 'app_config' |  | ||||||
|      |  | ||||||
|     key = db.Column(db.String(100), primary_key=True) |  | ||||||
|     value = db.Column(db.Text, nullable=False) |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     created_at = db.Column(db.DateTime, default=datetime.utcnow) |  | ||||||
|     updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) |  | ||||||
|      |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<AppConfig {self.key}={self.value}>' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CompetenceScaleValue(db.Model): |  | ||||||
|     """Valeurs de l'échelle des compétences (0, 1, 2, 3, ., d, etc.).""" |  | ||||||
|     __tablename__ = 'competence_scale_values' |  | ||||||
|      |  | ||||||
|     value = db.Column(db.String(10), primary_key=True) |  | ||||||
|     label = db.Column(db.String(100), nullable=False) |  | ||||||
|     color = db.Column(db.String(7), nullable=False) |  | ||||||
|     included_in_total = db.Column(db.Boolean, default=True, nullable=False) |  | ||||||
|     created_at = db.Column(db.DateTime, default=datetime.utcnow) |  | ||||||
|     updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) |  | ||||||
|      |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<CompetenceScaleValue {self.value}: {self.label}>' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Competence(db.Model): |  | ||||||
|     """Liste des compétences (Calculer, Raisonner, etc.).""" |  | ||||||
|     __tablename__ = 'competences' |  | ||||||
|      |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     name = db.Column(db.String(100), unique=True, nullable=False) |  | ||||||
|     color = db.Column(db.String(7), nullable=False) |  | ||||||
|     icon = db.Column(db.String(50), nullable=False) |  | ||||||
|     order_index = db.Column(db.Integer, default=0) |  | ||||||
|     created_at = db.Column(db.DateTime, default=datetime.utcnow) |  | ||||||
|     updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) |  | ||||||
|      |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Competence {self.name}>' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Domain(db.Model): |  | ||||||
|     """Domaines/tags pour les éléments de notation.""" |  | ||||||
|     __tablename__ = 'domains' |  | ||||||
|      |  | ||||||
|     id = db.Column(db.Integer, primary_key=True) |  | ||||||
|     name = db.Column(db.String(100), unique=True, nullable=False) |  | ||||||
|     color = db.Column(db.String(7), nullable=False, default='#6B7280') |  | ||||||
|     description = db.Column(db.Text) |  | ||||||
|     created_at = db.Column(db.DateTime, default=datetime.utcnow) |  | ||||||
|     updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) |  | ||||||
|      |  | ||||||
|     # Relation inverse |  | ||||||
|     grading_elements = db.relationship('GradingElement', backref='domain', lazy=True) |  | ||||||
|      |  | ||||||
|     def __repr__(self): |  | ||||||
|         return f'<Domain {self.name}>' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # =============== CLASSE POUR RETROCOMPATIBILITÉ =============== |  | ||||||
|  |  | ||||||
| class GradingCalculator: |  | ||||||
|     """ |  | ||||||
|     Classe pour rétrocompatibilité. Délègue vers les nouveaux services. |  | ||||||
|     À supprimer progressivement au profit de l'injection de dépendances. |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     @staticmethod |  | ||||||
|     def calculate_score(grade_value: str, grading_type: str, max_points: float) -> Optional[float]: |  | ||||||
|         """Délègue vers le nouveau service unifié.""" |  | ||||||
|         services = AssessmentServicesFactory.create_facade() |  | ||||||
|         return services.grading_calculator.calculate_score(grade_value, grading_type, max_points) |  | ||||||
|      |  | ||||||
|     @staticmethod |  | ||||||
|     def is_counted_in_total(grade_value: str, grading_type: str) -> bool: |  | ||||||
|         """Délègue vers le nouveau service unifié.""" |  | ||||||
|         services = AssessmentServicesFactory.create_facade() |  | ||||||
|         return services.grading_calculator.is_counted_in_total(grade_value) |  | ||||||
| @@ -1,78 +0,0 @@ | |||||||
| { |  | ||||||
|   "timestamp": "2025-08-07T02:39:53.135159", |  | ||||||
|   "total_duration_ms": 12.613060003786813, |  | ||||||
|   "python_version": "3.13.5", |  | ||||||
|   "system_info": { |  | ||||||
|     "cpu_count": 8, |  | ||||||
|     "cpu_freq": { |  | ||||||
|       "current": 2249.1085000000003, |  | ||||||
|       "min": 400.0, |  | ||||||
|       "max": 4600.0 |  | ||||||
|     }, |  | ||||||
|     "memory_total_gb": 15.300716400146484, |  | ||||||
|     "python_version": "3.13.5 (main, Jun 21 2025, 09:35:00) [GCC 15.1.1 20250425]", |  | ||||||
|     "platform": "linux" |  | ||||||
|   }, |  | ||||||
|   "results": [ |  | ||||||
|     { |  | ||||||
|       "name": "database_query_assessments_with_relations", |  | ||||||
|       "execution_time_ms": 0.9407232035300694, |  | ||||||
|       "memory_usage_mb": 0.0234375, |  | ||||||
|       "iterations": 5, |  | ||||||
|       "min_time_ms": 0.322260006214492, |  | ||||||
|       "max_time_ms": 3.3645250005065463, |  | ||||||
|       "avg_time_ms": 0.9407232035300694, |  | ||||||
|       "std_dev_ms": 1.3550010965272643, |  | ||||||
|       "success": true, |  | ||||||
|       "error_message": null, |  | ||||||
|       "metadata": { |  | ||||||
|         "query_type": "assessments_with_joinedload" |  | ||||||
|       } |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|       "name": "database_query_grades_complex_join", |  | ||||||
|       "execution_time_ms": 0.3953178005758673, |  | ||||||
|       "memory_usage_mb": 0.0078125, |  | ||||||
|       "iterations": 5, |  | ||||||
|       "min_time_ms": 0.1903810043586418, |  | ||||||
|       "max_time_ms": 1.1664140038192272, |  | ||||||
|       "avg_time_ms": 0.3953178005758673, |  | ||||||
|       "std_dev_ms": 0.43115645332458297, |  | ||||||
|       "success": true, |  | ||||||
|       "error_message": null, |  | ||||||
|       "metadata": { |  | ||||||
|         "query_type": "grades_with_complex_joins" |  | ||||||
|       } |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|       "name": "config_get_competence_scale_values", |  | ||||||
|       "execution_time_ms": 0.30451139755314216, |  | ||||||
|       "memory_usage_mb": 0.0046875, |  | ||||||
|       "iterations": 5, |  | ||||||
|       "min_time_ms": 0.21855999511899427, |  | ||||||
|       "max_time_ms": 0.6202539952937514, |  | ||||||
|       "avg_time_ms": 0.30451139755314216, |  | ||||||
|       "std_dev_ms": 0.17659352127776015, |  | ||||||
|       "success": true, |  | ||||||
|       "error_message": null, |  | ||||||
|       "metadata": { |  | ||||||
|         "operation": "get_competence_scale_values" |  | ||||||
|       } |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|       "name": "config_validate_grade_values", |  | ||||||
|       "execution_time_ms": 0.08327200193889439, |  | ||||||
|       "memory_usage_mb": 0.0, |  | ||||||
|       "iterations": 5, |  | ||||||
|       "min_time_ms": 0.055030999646987766, |  | ||||||
|       "max_time_ms": 0.18798900418914855, |  | ||||||
|       "avg_time_ms": 0.08327200193889439, |  | ||||||
|       "std_dev_ms": 0.05856681083962526, |  | ||||||
|       "success": true, |  | ||||||
|       "error_message": null, |  | ||||||
|       "metadata": { |  | ||||||
|         "operation": "validate_multiple_grade_values" |  | ||||||
|       } |  | ||||||
|     } |  | ||||||
|   ] |  | ||||||
| } |  | ||||||
| @@ -15,6 +15,7 @@ class AssessmentRepository(BaseRepository[Assessment]): | |||||||
|         self,  |         self,  | ||||||
|         trimester: Optional[int] = None, |         trimester: Optional[int] = None, | ||||||
|         class_id: Optional[int] = None, |         class_id: Optional[int] = None, | ||||||
|  |         correction_status: Optional[str] = None, | ||||||
|         sort_by: str = 'date_desc' |         sort_by: str = 'date_desc' | ||||||
|     ) -> List[Assessment]: |     ) -> List[Assessment]: | ||||||
|         """Trouve les évaluations selon les filtres avec eager loading des classes.""" |         """Trouve les évaluations selon les filtres avec eager loading des classes.""" | ||||||
| @@ -35,7 +36,14 @@ class AssessmentRepository(BaseRepository[Assessment]): | |||||||
|         # Application du tri |         # Application du tri | ||||||
|         query = self._apply_sorting(query, sort_by) |         query = self._apply_sorting(query, sort_by) | ||||||
|          |          | ||||||
|         return query.all() |         # Récupérer les résultats | ||||||
|  |         assessments = query.all() | ||||||
|  |          | ||||||
|  |         # Filtrer par statut de correction si nécessaire | ||||||
|  |         if correction_status: | ||||||
|  |             assessments = self._filter_by_correction_status(assessments, correction_status) | ||||||
|  |          | ||||||
|  |         return assessments | ||||||
|      |      | ||||||
|     def find_with_full_details(self, id: int) -> Optional[Assessment]: |     def find_with_full_details(self, id: int) -> Optional[Assessment]: | ||||||
|         """Trouve une évaluation avec tous ses détails.""" |         """Trouve une évaluation avec tous ses détails.""" | ||||||
| @@ -98,4 +106,22 @@ class AssessmentRepository(BaseRepository[Assessment]): | |||||||
|             return query.order_by(Assessment.title.asc()) |             return query.order_by(Assessment.title.asc()) | ||||||
|         elif sort_by == 'class': |         elif sort_by == 'class': | ||||||
|             return query.join(ClassGroup).order_by(ClassGroup.name.asc()) |             return query.join(ClassGroup).order_by(ClassGroup.name.asc()) | ||||||
|         return query |         return query | ||||||
|  |      | ||||||
|  |     def _filter_by_correction_status(self, assessments: List[Assessment], status: str) -> List[Assessment]: | ||||||
|  |         """Filtre les évaluations par statut de correction.""" | ||||||
|  |         filtered_assessments = [] | ||||||
|  |          | ||||||
|  |         for assessment in assessments: | ||||||
|  |             progress = assessment.grading_progress | ||||||
|  |             progress_status = progress.get('status', 'not_started') | ||||||
|  |              | ||||||
|  |             # Mapper les statuts de progression aux filtres | ||||||
|  |             if status == 'complete' and progress_status == 'completed': | ||||||
|  |                 filtered_assessments.append(assessment) | ||||||
|  |             elif status == 'incomplete' and progress_status in ['in_progress', 'not_started']: | ||||||
|  |                 filtered_assessments.append(assessment) | ||||||
|  |             elif status == 'not_started' and progress_status == 'not_started': | ||||||
|  |                 filtered_assessments.append(assessment) | ||||||
|  |          | ||||||
|  |         return filtered_assessments | ||||||
| @@ -15,12 +15,14 @@ def list(): | |||||||
|     # Récupérer les paramètres de filtrage |     # Récupérer les paramètres de filtrage | ||||||
|     trimester_filter = request.args.get('trimester', '') |     trimester_filter = request.args.get('trimester', '') | ||||||
|     class_filter = request.args.get('class', '') |     class_filter = request.args.get('class', '') | ||||||
|  |     correction_filter = request.args.get('correction', '') | ||||||
|     sort_by = request.args.get('sort', 'date_desc') |     sort_by = request.args.get('sort', 'date_desc') | ||||||
|      |      | ||||||
|     # Utiliser le repository pour les filtres |     # Utiliser le repository pour les filtres | ||||||
|     assessments = assessment_repo.find_by_filters( |     assessments = assessment_repo.find_by_filters( | ||||||
|         trimester=int(trimester_filter) if trimester_filter else None, |         trimester=int(trimester_filter) if trimester_filter else None, | ||||||
|         class_id=int(class_filter) if class_filter else None, |         class_id=int(class_filter) if class_filter else None, | ||||||
|  |         correction_status=correction_filter if correction_filter else None, | ||||||
|         sort_by=sort_by |         sort_by=sort_by | ||||||
|     ) |     ) | ||||||
|      |      | ||||||
| @@ -32,6 +34,7 @@ def list(): | |||||||
|                          classes=classes, |                          classes=classes, | ||||||
|                          current_trimester=trimester_filter, |                          current_trimester=trimester_filter, | ||||||
|                          current_class=class_filter, |                          current_class=class_filter, | ||||||
|  |                          current_correction=correction_filter, | ||||||
|                          current_sort=sort_by) |                          current_sort=sort_by) | ||||||
|  |  | ||||||
| # Route obsolète supprimée - utiliser new_unified à la place | # Route obsolète supprimée - utiliser new_unified à la place | ||||||
|   | |||||||
| @@ -1,4 +1,4 @@ | |||||||
| from flask import Blueprint, render_template, request, jsonify | from flask import Blueprint, render_template, request, jsonify, flash, redirect, url_for | ||||||
| from models import db, Grade, GradingElement | from models import db, Grade, GradingElement | ||||||
| from repositories import AssessmentRepository, StudentRepository, GradeRepository | from repositories import AssessmentRepository, StudentRepository, GradeRepository | ||||||
| from app_config import config_manager | from app_config import config_manager | ||||||
|   | |||||||
							
								
								
									
										80
									
								
								run_tests.py
									
									
									
									
									
								
							
							
						
						
									
										80
									
								
								run_tests.py
									
									
									
									
									
								
							| @@ -1,80 +0,0 @@ | |||||||
| #!/usr/bin/env python3 |  | ||||||
| """ |  | ||||||
| Script pour exécuter les tests unitaires avec pytest |  | ||||||
| Usage: uv run python run_tests.py [options] |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| import subprocess |  | ||||||
| import sys |  | ||||||
| import os |  | ||||||
|  |  | ||||||
| def run_tests(): |  | ||||||
|     """Exécute les tests avec pytest et uv""" |  | ||||||
|     print("🧪 Exécution des tests unitaires avec pytest...") |  | ||||||
|     print("=" * 50) |  | ||||||
|      |  | ||||||
|     # Commande de base pour exécuter les tests |  | ||||||
|     cmd = ["uv", "run", "pytest", "tests/", "-v", "--tb=short"] |  | ||||||
|      |  | ||||||
|     # Ajouter la couverture de code si demandé |  | ||||||
|     if "--coverage" in sys.argv: |  | ||||||
|         cmd.extend(["--cov=.", "--cov-report=term-missing", "--cov-report=html:htmlcov"]) |  | ||||||
|         print("📊 Génération du rapport de couverture activée") |  | ||||||
|      |  | ||||||
|     # Mode quiet si demandé |  | ||||||
|     if "--quiet" in sys.argv: |  | ||||||
|         cmd = ["uv", "run", "pytest", "tests/", "-q"] |  | ||||||
|      |  | ||||||
|     # Tests spécifiques si un pattern est fourni |  | ||||||
|     test_pattern = None |  | ||||||
|     for arg in sys.argv[1:]: |  | ||||||
|         if not arg.startswith("--"): |  | ||||||
|             test_pattern = arg |  | ||||||
|             cmd.append(f"tests/{test_pattern}") |  | ||||||
|             break |  | ||||||
|      |  | ||||||
|     try: |  | ||||||
|         # Exécuter les tests |  | ||||||
|         result = subprocess.run(cmd, cwd=os.getcwd()) |  | ||||||
|          |  | ||||||
|         print("\n" + "=" * 50) |  | ||||||
|         if result.returncode == 0: |  | ||||||
|             print("✅ Tous les tests sont passés avec succès!") |  | ||||||
|         else: |  | ||||||
|             print(f"❌ {result.returncode} test(s) ont échoué") |  | ||||||
|              |  | ||||||
|         if "--coverage" in sys.argv: |  | ||||||
|             print("📈 Rapport de couverture généré dans htmlcov/index.html") |  | ||||||
|              |  | ||||||
|         return result.returncode |  | ||||||
|          |  | ||||||
|     except KeyboardInterrupt: |  | ||||||
|         print("\n⚠️ Tests interrompus par l'utilisateur") |  | ||||||
|         return 1 |  | ||||||
|     except Exception as e: |  | ||||||
|         print(f"❌ Erreur lors de l'exécution des tests: {e}") |  | ||||||
|         return 1 |  | ||||||
|  |  | ||||||
| def show_help(): |  | ||||||
|     """Affiche l'aide""" |  | ||||||
|     print(""" |  | ||||||
| Usage: uv run python run_tests.py [options] [pattern] |  | ||||||
|  |  | ||||||
| Options: |  | ||||||
|   --coverage    Génère un rapport de couverture de code |  | ||||||
|   --quiet       Mode silencieux (moins de détails) |  | ||||||
|   --help        Affiche cette aide |  | ||||||
|  |  | ||||||
| Exemples: |  | ||||||
|   uv run python run_tests.py                    # Tous les tests |  | ||||||
|   uv run python run_tests.py --coverage         # Avec couverture |  | ||||||
|   uv run python run_tests.py test_models.py     # Tests d'un fichier spécifique |  | ||||||
|   uv run python run_tests.py --quiet            # Mode silencieux |  | ||||||
| """) |  | ||||||
|  |  | ||||||
| if __name__ == "__main__": |  | ||||||
|     if "--help" in sys.argv: |  | ||||||
|         show_help() |  | ||||||
|         sys.exit(0) |  | ||||||
|      |  | ||||||
|     sys.exit(run_tests()) |  | ||||||
| @@ -1,505 +0,0 @@ | |||||||
| #!/usr/bin/env python3 |  | ||||||
| """ |  | ||||||
| Script de Benchmark des Performances - Baseline (JOUR 1-2) |  | ||||||
|  |  | ||||||
| Ce script établit la baseline de performance de l'application avant la migration |  | ||||||
| vers l'architecture refactorisée. Il mesure les métriques critiques : |  | ||||||
|  |  | ||||||
| 1. Temps de réponse des opérations courantes |  | ||||||
| 2. Consommation mémoire des calculs |  | ||||||
| 3. Performance des requêtes de base de données |  | ||||||
| 4. Temps de rendu des templates |  | ||||||
|  |  | ||||||
| Utilisé pour valider que la migration n'introduit pas de régressions de performance. |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| import sys |  | ||||||
| import time |  | ||||||
| import psutil |  | ||||||
| import statistics |  | ||||||
| from typing import Dict, List, Any, Callable, Optional |  | ||||||
| from contextlib import contextmanager |  | ||||||
| from dataclasses import dataclass, asdict |  | ||||||
| from datetime import datetime |  | ||||||
| import json |  | ||||||
| from pathlib import Path |  | ||||||
|  |  | ||||||
| # Import Flask app pour tests |  | ||||||
| sys.path.append(str(Path(__file__).parent.parent)) |  | ||||||
| from app import create_app |  | ||||||
| from models import db, Assessment, Student, ClassGroup, Exercise, GradingElement, Grade |  | ||||||
| from app_config import config_manager |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class BenchmarkResult: |  | ||||||
|     """Résultat d'un benchmark individuel.""" |  | ||||||
|      |  | ||||||
|     name: str |  | ||||||
|     execution_time_ms: float |  | ||||||
|     memory_usage_mb: float |  | ||||||
|     iterations: int |  | ||||||
|     min_time_ms: float |  | ||||||
|     max_time_ms: float |  | ||||||
|     avg_time_ms: float |  | ||||||
|     std_dev_ms: float |  | ||||||
|     success: bool |  | ||||||
|     error_message: Optional[str] = None |  | ||||||
|     metadata: Dict[str, Any] = None |  | ||||||
|      |  | ||||||
|     def __post_init__(self): |  | ||||||
|         if self.metadata is None: |  | ||||||
|             self.metadata = {} |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class BenchmarkSuite: |  | ||||||
|     """Suite complète de benchmarks.""" |  | ||||||
|      |  | ||||||
|     timestamp: datetime |  | ||||||
|     total_duration_ms: float |  | ||||||
|     python_version: str |  | ||||||
|     system_info: Dict[str, Any] |  | ||||||
|     results: List[BenchmarkResult] |  | ||||||
|      |  | ||||||
|     def to_json(self) -> str: |  | ||||||
|         """Convertit la suite en JSON pour persistance.""" |  | ||||||
|         data = asdict(self) |  | ||||||
|         data['timestamp'] = self.timestamp.isoformat() |  | ||||||
|         return json.dumps(data, indent=2) |  | ||||||
|      |  | ||||||
|     @classmethod |  | ||||||
|     def from_json(cls, json_str: str) -> 'BenchmarkSuite': |  | ||||||
|         """Charge une suite depuis JSON.""" |  | ||||||
|         data = json.loads(json_str) |  | ||||||
|         data['timestamp'] = datetime.fromisoformat(data['timestamp']) |  | ||||||
|         data['results'] = [BenchmarkResult(**result) for result in data['results']] |  | ||||||
|         return cls(**data) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PerformanceBenchmarker: |  | ||||||
|     """ |  | ||||||
|     Système de benchmark des performances. |  | ||||||
|      |  | ||||||
|     Mesure les métriques critiques de l'application pour établir une baseline |  | ||||||
|     avant la migration vers l'architecture refactorisée. |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     def __init__(self, app=None, iterations: int = 10): |  | ||||||
|         self.app = app or create_app('testing') |  | ||||||
|         self.iterations = iterations |  | ||||||
|         self.results: List[BenchmarkResult] = [] |  | ||||||
|         self.start_time: Optional[float] = None |  | ||||||
|      |  | ||||||
|     @contextmanager |  | ||||||
|     def measure_performance(self, name: str, metadata: Dict[str, Any] = None): |  | ||||||
|         """ |  | ||||||
|         Context manager pour mesurer les performances d'une opération. |  | ||||||
|          |  | ||||||
|         Usage: |  | ||||||
|             with benchmarker.measure_performance("operation_name"): |  | ||||||
|                 # Code à mesurer |  | ||||||
|                 result = expensive_operation() |  | ||||||
|         """ |  | ||||||
|         process = psutil.Process() |  | ||||||
|         memory_before = process.memory_info().rss / 1024 / 1024  # MB |  | ||||||
|          |  | ||||||
|         start_time = time.perf_counter() |  | ||||||
|         error_message = None |  | ||||||
|         success = True |  | ||||||
|          |  | ||||||
|         try: |  | ||||||
|             yield |  | ||||||
|         except Exception as e: |  | ||||||
|             success = False |  | ||||||
|             error_message = str(e) |  | ||||||
|         finally: |  | ||||||
|             end_time = time.perf_counter() |  | ||||||
|             memory_after = process.memory_info().rss / 1024 / 1024  # MB |  | ||||||
|              |  | ||||||
|             execution_time_ms = (end_time - start_time) * 1000 |  | ||||||
|             memory_usage_mb = memory_after - memory_before |  | ||||||
|              |  | ||||||
|             # Créer le résultat avec des valeurs temporaires |  | ||||||
|             # (sera mis à jour par run_benchmark pour les statistiques) |  | ||||||
|             result = BenchmarkResult( |  | ||||||
|                 name=name, |  | ||||||
|                 execution_time_ms=execution_time_ms, |  | ||||||
|                 memory_usage_mb=memory_usage_mb, |  | ||||||
|                 iterations=1, |  | ||||||
|                 min_time_ms=execution_time_ms, |  | ||||||
|                 max_time_ms=execution_time_ms, |  | ||||||
|                 avg_time_ms=execution_time_ms, |  | ||||||
|                 std_dev_ms=0.0, |  | ||||||
|                 success=success, |  | ||||||
|                 error_message=error_message, |  | ||||||
|                 metadata=metadata or {} |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|             self.results.append(result) |  | ||||||
|      |  | ||||||
|     def run_benchmark(self, name: str, operation: Callable, metadata: Dict[str, Any] = None) -> BenchmarkResult: |  | ||||||
|         """ |  | ||||||
|         Exécute un benchmark sur une opération donnée. |  | ||||||
|          |  | ||||||
|         Args: |  | ||||||
|             name: Nom du benchmark |  | ||||||
|             operation: Fonction à benchmarker |  | ||||||
|             metadata: Métadonnées additionnelles |  | ||||||
|              |  | ||||||
|         Returns: |  | ||||||
|             BenchmarkResult avec les statistiques détaillées |  | ||||||
|         """ |  | ||||||
|         times = [] |  | ||||||
|         memory_usages = [] |  | ||||||
|         success_count = 0 |  | ||||||
|         last_error = None |  | ||||||
|          |  | ||||||
|         print(f"🔄 Exécution benchmark '{name}' ({self.iterations} itérations)...") |  | ||||||
|          |  | ||||||
|         for i in range(self.iterations): |  | ||||||
|             process = psutil.Process() |  | ||||||
|             memory_before = process.memory_info().rss / 1024 / 1024  # MB |  | ||||||
|              |  | ||||||
|             start_time = time.perf_counter() |  | ||||||
|              |  | ||||||
|             try: |  | ||||||
|                 operation() |  | ||||||
|                 success_count += 1 |  | ||||||
|             except Exception as e: |  | ||||||
|                 last_error = str(e) |  | ||||||
|                 print(f"  ⚠️  Erreur itération {i+1}: {e}") |  | ||||||
|              |  | ||||||
|             end_time = time.perf_counter() |  | ||||||
|             memory_after = process.memory_info().rss / 1024 / 1024  # MB |  | ||||||
|              |  | ||||||
|             execution_time_ms = (end_time - start_time) * 1000 |  | ||||||
|             memory_usage_mb = memory_after - memory_before |  | ||||||
|              |  | ||||||
|             times.append(execution_time_ms) |  | ||||||
|             memory_usages.append(memory_usage_mb) |  | ||||||
|          |  | ||||||
|         # Calcul des statistiques |  | ||||||
|         success = success_count > 0 |  | ||||||
|         avg_time_ms = statistics.mean(times) if times else 0 |  | ||||||
|         min_time_ms = min(times) if times else 0 |  | ||||||
|         max_time_ms = max(times) if times else 0 |  | ||||||
|         std_dev_ms = statistics.stdev(times) if len(times) > 1 else 0 |  | ||||||
|         avg_memory_mb = statistics.mean(memory_usages) if memory_usages else 0 |  | ||||||
|          |  | ||||||
|         result = BenchmarkResult( |  | ||||||
|             name=name, |  | ||||||
|             execution_time_ms=avg_time_ms, |  | ||||||
|             memory_usage_mb=avg_memory_mb, |  | ||||||
|             iterations=self.iterations, |  | ||||||
|             min_time_ms=min_time_ms, |  | ||||||
|             max_time_ms=max_time_ms, |  | ||||||
|             avg_time_ms=avg_time_ms, |  | ||||||
|             std_dev_ms=std_dev_ms, |  | ||||||
|             success=success, |  | ||||||
|             error_message=last_error if not success else None, |  | ||||||
|             metadata=metadata or {} |  | ||||||
|         ) |  | ||||||
|          |  | ||||||
|         self.results.append(result) |  | ||||||
|          |  | ||||||
|         if success: |  | ||||||
|             print(f"  ✅ Terminé - {avg_time_ms:.2f}ms ± {std_dev_ms:.2f}ms") |  | ||||||
|         else: |  | ||||||
|             print(f"  ❌ Échec - {success_count}/{self.iterations} succès") |  | ||||||
|          |  | ||||||
|         return result |  | ||||||
|      |  | ||||||
|     def benchmark_grading_progress_calculation(self): |  | ||||||
|         """Benchmark du calcul de progression de notation.""" |  | ||||||
|          |  | ||||||
|         with self.app.app_context(): |  | ||||||
|             # Créer des données de test |  | ||||||
|             assessment = Assessment.query.first() |  | ||||||
|             if not assessment: |  | ||||||
|                 print("⚠️  Pas d'évaluation trouvée, skip benchmark progression") |  | ||||||
|                 return |  | ||||||
|              |  | ||||||
|             def calculate_progress(): |  | ||||||
|                 # Test de l'ancienne implémentation |  | ||||||
|                 progress = assessment.grading_progress |  | ||||||
|                 return progress |  | ||||||
|              |  | ||||||
|             self.run_benchmark( |  | ||||||
|                 "grading_progress_calculation_legacy", |  | ||||||
|                 calculate_progress, |  | ||||||
|                 {"assessment_id": assessment.id, "method": "legacy_property"} |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def benchmark_student_scores_calculation(self): |  | ||||||
|         """Benchmark du calcul des scores étudiants.""" |  | ||||||
|          |  | ||||||
|         with self.app.app_context(): |  | ||||||
|             assessment = Assessment.query.first() |  | ||||||
|             if not assessment: |  | ||||||
|                 print("⚠️  Pas d'évaluation trouvée, skip benchmark scores") |  | ||||||
|                 return |  | ||||||
|              |  | ||||||
|             def calculate_scores(): |  | ||||||
|                 # Test de l'ancienne implémentation |  | ||||||
|                 scores = assessment.calculate_student_scores() |  | ||||||
|                 return scores |  | ||||||
|              |  | ||||||
|             self.run_benchmark( |  | ||||||
|                 "student_scores_calculation_legacy", |  | ||||||
|                 calculate_scores, |  | ||||||
|                 { |  | ||||||
|                     "assessment_id": assessment.id, |  | ||||||
|                     "method": "legacy_method", |  | ||||||
|                     "students_count": len(assessment.class_group.students) |  | ||||||
|                 } |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def benchmark_assessment_statistics(self): |  | ||||||
|         """Benchmark du calcul des statistiques d'évaluation.""" |  | ||||||
|          |  | ||||||
|         with self.app.app_context(): |  | ||||||
|             assessment = Assessment.query.first() |  | ||||||
|             if not assessment: |  | ||||||
|                 print("⚠️  Pas d'évaluation trouvée, skip benchmark statistiques") |  | ||||||
|                 return |  | ||||||
|              |  | ||||||
|             def calculate_statistics(): |  | ||||||
|                 # Test de l'ancienne implémentation |  | ||||||
|                 stats = assessment.get_assessment_statistics() |  | ||||||
|                 return stats |  | ||||||
|              |  | ||||||
|             self.run_benchmark( |  | ||||||
|                 "assessment_statistics_calculation_legacy", |  | ||||||
|                 calculate_statistics, |  | ||||||
|                 { |  | ||||||
|                     "assessment_id": assessment.id, |  | ||||||
|                     "method": "legacy_method", |  | ||||||
|                     "exercises_count": len(assessment.exercises) |  | ||||||
|                 } |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def benchmark_database_queries(self): |  | ||||||
|         """Benchmark des requêtes de base de données critiques.""" |  | ||||||
|          |  | ||||||
|         with self.app.app_context(): |  | ||||||
|             def query_assessments(): |  | ||||||
|                 # Requête typique : liste des évaluations avec relations |  | ||||||
|                 assessments = Assessment.query.options( |  | ||||||
|                     db.joinedload(Assessment.class_group), |  | ||||||
|                     db.joinedload(Assessment.exercises) |  | ||||||
|                 ).all() |  | ||||||
|                 return len(assessments) |  | ||||||
|              |  | ||||||
|             self.run_benchmark( |  | ||||||
|                 "database_query_assessments_with_relations", |  | ||||||
|                 query_assessments, |  | ||||||
|                 {"query_type": "assessments_with_joinedload"} |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|             def query_grades(): |  | ||||||
|                 # Requête typique : toutes les notes |  | ||||||
|                 grades = Grade.query.join(GradingElement).join(Exercise).join(Assessment).all() |  | ||||||
|                 return len(grades) |  | ||||||
|              |  | ||||||
|             self.run_benchmark( |  | ||||||
|                 "database_query_grades_complex_join", |  | ||||||
|                 query_grades, |  | ||||||
|                 {"query_type": "grades_with_complex_joins"} |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def benchmark_config_operations(self): |  | ||||||
|         """Benchmark des opérations de configuration.""" |  | ||||||
|          |  | ||||||
|         with self.app.app_context(): |  | ||||||
|             def get_scale_values(): |  | ||||||
|                 # Test des opérations de configuration fréquentes |  | ||||||
|                 values = config_manager.get_competence_scale_values() |  | ||||||
|                 return len(values) |  | ||||||
|              |  | ||||||
|             self.run_benchmark( |  | ||||||
|                 "config_get_competence_scale_values", |  | ||||||
|                 get_scale_values, |  | ||||||
|                 {"operation": "get_competence_scale_values"} |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|             def validate_grade_values(): |  | ||||||
|                 # Test de validation de notes |  | ||||||
|                 test_values = ['15.5', '2', '.', 'd', 'invalid'] |  | ||||||
|                 results = [] |  | ||||||
|                 for value in test_values: |  | ||||||
|                     results.append(config_manager.validate_grade_value(value, 'notes')) |  | ||||||
|                     results.append(config_manager.validate_grade_value(value, 'score')) |  | ||||||
|                 return len(results) |  | ||||||
|              |  | ||||||
|             self.run_benchmark( |  | ||||||
|                 "config_validate_grade_values", |  | ||||||
|                 validate_grade_values, |  | ||||||
|                 {"operation": "validate_multiple_grade_values"} |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def run_full_suite(self) -> BenchmarkSuite: |  | ||||||
|         """Exécute la suite complète de benchmarks.""" |  | ||||||
|          |  | ||||||
|         print("🚀 Démarrage de la suite de benchmarks des performances") |  | ||||||
|         print(f"📊 Configuration: {self.iterations} itérations par test") |  | ||||||
|         print("=" * 60) |  | ||||||
|          |  | ||||||
|         self.start_time = time.perf_counter() |  | ||||||
|         self.results = [] |  | ||||||
|          |  | ||||||
|         # Benchmarks des fonctionnalités core |  | ||||||
|         self.benchmark_grading_progress_calculation() |  | ||||||
|         self.benchmark_student_scores_calculation() |  | ||||||
|         self.benchmark_assessment_statistics() |  | ||||||
|          |  | ||||||
|         # Benchmarks des requêtes de base de données |  | ||||||
|         self.benchmark_database_queries() |  | ||||||
|          |  | ||||||
|         # Benchmarks des opérations de configuration |  | ||||||
|         self.benchmark_config_operations() |  | ||||||
|          |  | ||||||
|         end_time = time.perf_counter() |  | ||||||
|         total_duration_ms = (end_time - self.start_time) * 1000 |  | ||||||
|          |  | ||||||
|         # Informations système |  | ||||||
|         system_info = { |  | ||||||
|             'cpu_count': psutil.cpu_count(), |  | ||||||
|             'cpu_freq': psutil.cpu_freq()._asdict() if psutil.cpu_freq() else None, |  | ||||||
|             'memory_total_gb': psutil.virtual_memory().total / 1024**3, |  | ||||||
|             'python_version': sys.version, |  | ||||||
|             'platform': sys.platform |  | ||||||
|         } |  | ||||||
|          |  | ||||||
|         suite = BenchmarkSuite( |  | ||||||
|             timestamp=datetime.utcnow(), |  | ||||||
|             total_duration_ms=total_duration_ms, |  | ||||||
|             python_version=sys.version.split()[0], |  | ||||||
|             system_info=system_info, |  | ||||||
|             results=self.results |  | ||||||
|         ) |  | ||||||
|          |  | ||||||
|         print("\n" + "=" * 60) |  | ||||||
|         print("📈 RÉSUMÉ DES PERFORMANCES") |  | ||||||
|         print("=" * 60) |  | ||||||
|          |  | ||||||
|         for result in self.results: |  | ||||||
|             status = "✅" if result.success else "❌" |  | ||||||
|             print(f"{status} {result.name:40} {result.avg_time_ms:8.2f}ms ± {result.std_dev_ms:6.2f}ms") |  | ||||||
|          |  | ||||||
|         print(f"\n⏱️  Durée totale: {total_duration_ms:.2f}ms") |  | ||||||
|         print(f"📊 Tests réussis: {sum(1 for r in self.results if r.success)}/{len(self.results)}") |  | ||||||
|          |  | ||||||
|         return suite |  | ||||||
|      |  | ||||||
|     def save_baseline(self, filepath: str = "performance_baseline.json"): |  | ||||||
|         """Sauvegarde la baseline de performance.""" |  | ||||||
|          |  | ||||||
|         suite = self.run_full_suite() |  | ||||||
|          |  | ||||||
|         baseline_path = Path(filepath) |  | ||||||
|         baseline_path.write_text(suite.to_json()) |  | ||||||
|          |  | ||||||
|         print(f"\n💾 Baseline sauvegardée: {baseline_path.absolute()}") |  | ||||||
|         return suite |  | ||||||
|      |  | ||||||
|     def compare_with_baseline(self, baseline_path: str = "performance_baseline.json") -> Dict[str, Any]: |  | ||||||
|         """Compare les performances actuelles avec la baseline.""" |  | ||||||
|          |  | ||||||
|         baseline_file = Path(baseline_path) |  | ||||||
|         if not baseline_file.exists(): |  | ||||||
|             raise FileNotFoundError(f"Baseline non trouvée: {baseline_path}") |  | ||||||
|          |  | ||||||
|         baseline_suite = BenchmarkSuite.from_json(baseline_file.read_text()) |  | ||||||
|         current_suite = self.run_full_suite() |  | ||||||
|          |  | ||||||
|         comparison = { |  | ||||||
|             'baseline_date': baseline_suite.timestamp.isoformat(), |  | ||||||
|             'current_date': current_suite.timestamp.isoformat(), |  | ||||||
|             'comparisons': [], |  | ||||||
|             'summary': { |  | ||||||
|                 'regressions': 0, |  | ||||||
|                 'improvements': 0, |  | ||||||
|                 'stable': 0 |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
|          |  | ||||||
|         # Créer un dictionnaire de la baseline pour comparaison facile |  | ||||||
|         baseline_by_name = {r.name: r for r in baseline_suite.results} |  | ||||||
|          |  | ||||||
|         for current_result in current_suite.results: |  | ||||||
|             name = current_result.name |  | ||||||
|             baseline_result = baseline_by_name.get(name) |  | ||||||
|              |  | ||||||
|             if not baseline_result: |  | ||||||
|                 continue |  | ||||||
|              |  | ||||||
|             # Calcul du changement en pourcentage |  | ||||||
|             time_change_pct = ((current_result.avg_time_ms - baseline_result.avg_time_ms)  |  | ||||||
|                               / baseline_result.avg_time_ms * 100) |  | ||||||
|              |  | ||||||
|             # Détermination du statut (régression si > 10% plus lent) |  | ||||||
|             if time_change_pct > 10: |  | ||||||
|                 status = 'regression' |  | ||||||
|                 comparison['summary']['regressions'] += 1 |  | ||||||
|             elif time_change_pct < -10: |  | ||||||
|                 status = 'improvement' |  | ||||||
|                 comparison['summary']['improvements'] += 1 |  | ||||||
|             else: |  | ||||||
|                 status = 'stable' |  | ||||||
|                 comparison['summary']['stable'] += 1 |  | ||||||
|              |  | ||||||
|             comparison['comparisons'].append({ |  | ||||||
|                 'name': name, |  | ||||||
|                 'baseline_time_ms': baseline_result.avg_time_ms, |  | ||||||
|                 'current_time_ms': current_result.avg_time_ms, |  | ||||||
|                 'time_change_pct': time_change_pct, |  | ||||||
|                 'status': status |  | ||||||
|             }) |  | ||||||
|          |  | ||||||
|         # Affichage du résumé de comparaison |  | ||||||
|         print("\n" + "=" * 60) |  | ||||||
|         print("📊 COMPARAISON AVEC BASELINE") |  | ||||||
|         print("=" * 60) |  | ||||||
|          |  | ||||||
|         for comp in comparison['comparisons']: |  | ||||||
|             status_icon = {'regression': '🔴', 'improvement': '🟢', 'stable': '🟡'}[comp['status']] |  | ||||||
|             print(f"{status_icon} {comp['name']:40} {comp['time_change_pct']:+7.1f}%") |  | ||||||
|          |  | ||||||
|         summary = comparison['summary'] |  | ||||||
|         print(f"\n📈 Régressions: {summary['regressions']}") |  | ||||||
|         print(f"📈 Améliorations: {summary['improvements']}") |  | ||||||
|         print(f"📈 Stable: {summary['stable']}") |  | ||||||
|          |  | ||||||
|         return comparison |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def main(): |  | ||||||
|     """Point d'entrée principal du script.""" |  | ||||||
|      |  | ||||||
|     import argparse |  | ||||||
|      |  | ||||||
|     parser = argparse.ArgumentParser(description="Benchmark des performances Notytex") |  | ||||||
|     parser.add_argument('--iterations', type=int, default=10,  |  | ||||||
|                        help='Nombre d\'itérations par test (défaut: 10)') |  | ||||||
|     parser.add_argument('--baseline', action='store_true', |  | ||||||
|                        help='Créer une nouvelle baseline') |  | ||||||
|     parser.add_argument('--compare', type=str, metavar='BASELINE_FILE', |  | ||||||
|                        help='Comparer avec une baseline existante') |  | ||||||
|     parser.add_argument('--output', type=str, default='performance_baseline.json', |  | ||||||
|                        help='Fichier de sortie pour la baseline') |  | ||||||
|      |  | ||||||
|     args = parser.parse_args() |  | ||||||
|      |  | ||||||
|     benchmarker = PerformanceBenchmarker(iterations=args.iterations) |  | ||||||
|      |  | ||||||
|     if args.baseline: |  | ||||||
|         benchmarker.save_baseline(args.output) |  | ||||||
|     elif args.compare: |  | ||||||
|         benchmarker.compare_with_baseline(args.compare) |  | ||||||
|     else: |  | ||||||
|         benchmarker.run_full_suite() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     main() |  | ||||||
| @@ -1,566 +0,0 @@ | |||||||
| #!/usr/bin/env python3 |  | ||||||
| """ |  | ||||||
| Script de Validation de l'Architecture des Services (JOUR 1-2) |  | ||||||
|  |  | ||||||
| Ce script valide que l'architecture refactorisée est correctement préparée |  | ||||||
| pour la migration progressive. Il vérifie : |  | ||||||
|  |  | ||||||
| 1. Présence et structure des nouveaux services |  | ||||||
| 2. Compatibilité des interfaces publiques |  | ||||||
| 3. Tests de couverture des services |  | ||||||
| 4. Conformité aux principes SOLID |  | ||||||
| 5. Documentation et type hints |  | ||||||
|  |  | ||||||
| Utilisé avant de commencer la migration pour s'assurer que tout est prêt. |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| import sys |  | ||||||
| import inspect |  | ||||||
| import importlib |  | ||||||
| from pathlib import Path |  | ||||||
| from typing import Dict, List, Any, Optional, get_type_hints |  | ||||||
| from dataclasses import dataclass |  | ||||||
| import ast |  | ||||||
| import subprocess |  | ||||||
|  |  | ||||||
| # Configuration du path pour imports |  | ||||||
| sys.path.append(str(Path(__file__).parent.parent)) |  | ||||||
|  |  | ||||||
| # Import Flask app early pour éviter les problèmes d'ordre d'import |  | ||||||
| try: |  | ||||||
|     from app import create_app |  | ||||||
|     # Créer une instance d'app pour les imports qui en dépendent |  | ||||||
|     _app = create_app('testing') |  | ||||||
|     _app_context = _app.app_context() |  | ||||||
|     _app_context.push() |  | ||||||
| except Exception as e: |  | ||||||
|     print(f"⚠️  Warning: Could not initialize Flask app context: {e}") |  | ||||||
|     _app_context = None |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class ValidationResult: |  | ||||||
|     """Résultat d'une validation individuelle.""" |  | ||||||
|      |  | ||||||
|     name: str |  | ||||||
|     passed: bool |  | ||||||
|     message: str |  | ||||||
|     details: Optional[Dict[str, Any]] = None |  | ||||||
|     severity: str = "ERROR"  # ERROR, WARNING, INFO |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ArchitectureValidator: |  | ||||||
|     """ |  | ||||||
|     Validateur de l'architecture des services refactorisés. |  | ||||||
|      |  | ||||||
|     Vérifie que tous les composants nécessaires sont présents et correctement |  | ||||||
|     structurés pour la migration progressive. |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     def __init__(self): |  | ||||||
|         self.results: List[ValidationResult] = [] |  | ||||||
|         self.project_root = Path(__file__).parent.parent |  | ||||||
|         self.services_path = self.project_root / "services" |  | ||||||
|      |  | ||||||
|     def add_result(self, name: str, passed: bool, message: str,  |  | ||||||
|                    details: Dict[str, Any] = None, severity: str = "ERROR"): |  | ||||||
|         """Ajoute un résultat de validation.""" |  | ||||||
|         result = ValidationResult(name, passed, message, details, severity) |  | ||||||
|         self.results.append(result) |  | ||||||
|          |  | ||||||
|         # Affichage immédiat pour feedback |  | ||||||
|         status = "✅" if passed else ("⚠️" if severity == "WARNING" else "❌") |  | ||||||
|         print(f"{status} {name}: {message}") |  | ||||||
|      |  | ||||||
|     def validate_services_module_structure(self): |  | ||||||
|         """Valide la structure du module services.""" |  | ||||||
|          |  | ||||||
|         # Vérification de l'existence du dossier services |  | ||||||
|         if not self.services_path.exists(): |  | ||||||
|             self.add_result( |  | ||||||
|                 "services_directory_exists", |  | ||||||
|                 False, |  | ||||||
|                 "Le dossier 'services' n'existe pas" |  | ||||||
|             ) |  | ||||||
|             return |  | ||||||
|          |  | ||||||
|         self.add_result( |  | ||||||
|             "services_directory_exists", |  | ||||||
|             True, |  | ||||||
|             "Dossier services présent" |  | ||||||
|         ) |  | ||||||
|          |  | ||||||
|         # Vérification du __init__.py |  | ||||||
|         init_file = self.services_path / "__init__.py" |  | ||||||
|         if not init_file.exists(): |  | ||||||
|             self.add_result( |  | ||||||
|                 "services_init_file", |  | ||||||
|                 False, |  | ||||||
|                 "Fichier services/__init__.py manquant" |  | ||||||
|             ) |  | ||||||
|         else: |  | ||||||
|             self.add_result( |  | ||||||
|                 "services_init_file", |  | ||||||
|                 True, |  | ||||||
|                 "Fichier services/__init__.py présent" |  | ||||||
|             ) |  | ||||||
|          |  | ||||||
|         # Vérification des fichiers de services attendus |  | ||||||
|         expected_services = [ |  | ||||||
|             "assessment_services.py" |  | ||||||
|         ] |  | ||||||
|          |  | ||||||
|         for service_file in expected_services: |  | ||||||
|             service_path = self.services_path / service_file |  | ||||||
|             if not service_path.exists(): |  | ||||||
|                 self.add_result( |  | ||||||
|                     f"service_file_{service_file}", |  | ||||||
|                     False, |  | ||||||
|                     f"Fichier {service_file} manquant" |  | ||||||
|                 ) |  | ||||||
|             else: |  | ||||||
|                 self.add_result( |  | ||||||
|                     f"service_file_{service_file}", |  | ||||||
|                     True, |  | ||||||
|                     f"Fichier {service_file} présent" |  | ||||||
|                 ) |  | ||||||
|      |  | ||||||
|     def validate_assessment_services_classes(self): |  | ||||||
|         """Valide la présence des classes de services d'évaluation.""" |  | ||||||
|          |  | ||||||
|         try: |  | ||||||
|             from services.assessment_services import ( |  | ||||||
|                 GradingStrategy, |  | ||||||
|                 NotesStrategy, |  | ||||||
|                 ScoreStrategy, |  | ||||||
|                 GradingStrategyFactory, |  | ||||||
|                 UnifiedGradingCalculator, |  | ||||||
|                 AssessmentProgressService, |  | ||||||
|                 StudentScoreCalculator, |  | ||||||
|                 AssessmentStatisticsService, |  | ||||||
|                 AssessmentServicesFacade |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|             # Vérification des classes core (Pattern Strategy) |  | ||||||
|             expected_classes = [ |  | ||||||
|                 ("GradingStrategy", GradingStrategy), |  | ||||||
|                 ("NotesStrategy", NotesStrategy), |  | ||||||
|                 ("ScoreStrategy", ScoreStrategy), |  | ||||||
|                 ("GradingStrategyFactory", GradingStrategyFactory), |  | ||||||
|                 ("UnifiedGradingCalculator", UnifiedGradingCalculator), |  | ||||||
|                 ("AssessmentProgressService", AssessmentProgressService), |  | ||||||
|                 ("StudentScoreCalculator", StudentScoreCalculator), |  | ||||||
|                 ("AssessmentStatisticsService", AssessmentStatisticsService), |  | ||||||
|                 ("AssessmentServicesFacade", AssessmentServicesFacade) |  | ||||||
|             ] |  | ||||||
|              |  | ||||||
|             for class_name, class_obj in expected_classes: |  | ||||||
|                 self.add_result( |  | ||||||
|                     f"service_class_{class_name}", |  | ||||||
|                     True, |  | ||||||
|                     f"Classe {class_name} définie correctement" |  | ||||||
|                 ) |  | ||||||
|                  |  | ||||||
|                 # Vérification que c'est bien une classe |  | ||||||
|                 if not inspect.isclass(class_obj): |  | ||||||
|                     self.add_result( |  | ||||||
|                         f"service_class_type_{class_name}", |  | ||||||
|                         False, |  | ||||||
|                         f"{class_name} n'est pas une classe" |  | ||||||
|                     ) |  | ||||||
|          |  | ||||||
|         except ImportError as e: |  | ||||||
|             self.add_result( |  | ||||||
|                 "assessment_services_import", |  | ||||||
|                 False, |  | ||||||
|                 f"Impossible d'importer les services: {e}" |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def validate_service_interfaces(self): |  | ||||||
|         """Valide les interfaces publiques des services.""" |  | ||||||
|          |  | ||||||
|         try: |  | ||||||
|             from services.assessment_services import ( |  | ||||||
|                 GradingStrategy, |  | ||||||
|                 AssessmentProgressService, |  | ||||||
|                 StudentScoreCalculator, |  | ||||||
|                 AssessmentStatisticsService |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|             # Vérification GradingStrategy (ABC) |  | ||||||
|             if hasattr(GradingStrategy, '__abstractmethods__'): |  | ||||||
|                 abstract_methods = GradingStrategy.__abstractmethods__ |  | ||||||
|                 expected_abstract = {'calculate_score'} |  | ||||||
|                  |  | ||||||
|                 if expected_abstract.issubset(abstract_methods): |  | ||||||
|                     self.add_result( |  | ||||||
|                         "grading_strategy_abstract_methods", |  | ||||||
|                         True, |  | ||||||
|                         "GradingStrategy a les méthodes abstraites correctes" |  | ||||||
|                     ) |  | ||||||
|                 else: |  | ||||||
|                     self.add_result( |  | ||||||
|                         "grading_strategy_abstract_methods", |  | ||||||
|                         False, |  | ||||||
|                         f"Méthodes abstraites manquantes: {expected_abstract - abstract_methods}" |  | ||||||
|                     ) |  | ||||||
|              |  | ||||||
|             # Vérification des méthodes publiques des services |  | ||||||
|             service_methods = { |  | ||||||
|                 AssessmentProgressService: ['calculate_grading_progress'], |  | ||||||
|                 StudentScoreCalculator: ['calculate_student_scores'], |  | ||||||
|                 AssessmentStatisticsService: ['get_assessment_statistics'] |  | ||||||
|             } |  | ||||||
|              |  | ||||||
|             for service_class, expected_methods in service_methods.items(): |  | ||||||
|                 for method_name in expected_methods: |  | ||||||
|                     if hasattr(service_class, method_name): |  | ||||||
|                         self.add_result( |  | ||||||
|                             f"service_method_{service_class.__name__}_{method_name}", |  | ||||||
|                             True, |  | ||||||
|                             f"{service_class.__name__}.{method_name} présente" |  | ||||||
|                         ) |  | ||||||
|                     else: |  | ||||||
|                         self.add_result( |  | ||||||
|                             f"service_method_{service_class.__name__}_{method_name}", |  | ||||||
|                             False, |  | ||||||
|                             f"Méthode {service_class.__name__}.{method_name} manquante" |  | ||||||
|                         ) |  | ||||||
|          |  | ||||||
|         except ImportError as e: |  | ||||||
|             self.add_result( |  | ||||||
|                 "service_interfaces_validation", |  | ||||||
|                 False, |  | ||||||
|                 f"Impossible de valider les interfaces: {e}" |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def validate_type_hints(self): |  | ||||||
|         """Valide la présence de type hints dans les services.""" |  | ||||||
|          |  | ||||||
|         services_file = self.services_path / "assessment_services.py" |  | ||||||
|         if not services_file.exists(): |  | ||||||
|             self.add_result( |  | ||||||
|                 "type_hints_validation", |  | ||||||
|                 False, |  | ||||||
|                 "Fichier assessment_services.py non trouvé pour validation type hints" |  | ||||||
|             ) |  | ||||||
|             return |  | ||||||
|          |  | ||||||
|         try: |  | ||||||
|             # Parse le code pour analyser les type hints |  | ||||||
|             with open(services_file, 'r', encoding='utf-8') as f: |  | ||||||
|                 content = f.read() |  | ||||||
|              |  | ||||||
|             tree = ast.parse(content) |  | ||||||
|              |  | ||||||
|             # Compter les fonctions avec et sans type hints |  | ||||||
|             functions_with_hints = 0 |  | ||||||
|             functions_without_hints = 0 |  | ||||||
|              |  | ||||||
|             for node in ast.walk(tree): |  | ||||||
|                 if isinstance(node, ast.FunctionDef): |  | ||||||
|                     # Ignorer les méthodes spéciales |  | ||||||
|                     if node.name.startswith('__') and node.name.endswith('__'): |  | ||||||
|                         continue |  | ||||||
|                      |  | ||||||
|                     has_return_annotation = node.returns is not None |  | ||||||
|                     has_arg_annotations = any(arg.annotation is not None for arg in node.args.args[1:])  # Skip self |  | ||||||
|                      |  | ||||||
|                     if has_return_annotation or has_arg_annotations: |  | ||||||
|                         functions_with_hints += 1 |  | ||||||
|                     else: |  | ||||||
|                         functions_without_hints += 1 |  | ||||||
|              |  | ||||||
|             total_functions = functions_with_hints + functions_without_hints |  | ||||||
|             if total_functions > 0: |  | ||||||
|                 hint_percentage = (functions_with_hints / total_functions) * 100 |  | ||||||
|                  |  | ||||||
|                 # Considérer comme bon si > 80% des fonctions ont des type hints |  | ||||||
|                 passed = hint_percentage >= 80 |  | ||||||
|                 self.add_result( |  | ||||||
|                     "type_hints_coverage", |  | ||||||
|                     passed, |  | ||||||
|                     f"Couverture type hints: {hint_percentage:.1f}% ({functions_with_hints}/{total_functions})", |  | ||||||
|                     {"percentage": hint_percentage, "with_hints": functions_with_hints, "total": total_functions}, |  | ||||||
|                     severity="WARNING" if not passed else "INFO" |  | ||||||
|                 ) |  | ||||||
|              |  | ||||||
|         except Exception as e: |  | ||||||
|             self.add_result( |  | ||||||
|                 "type_hints_validation", |  | ||||||
|                 False, |  | ||||||
|                 f"Erreur lors de l'analyse des type hints: {e}", |  | ||||||
|                 severity="WARNING" |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def validate_test_coverage(self): |  | ||||||
|         """Valide la couverture de tests des services.""" |  | ||||||
|          |  | ||||||
|         test_file = self.project_root / "tests" / "test_assessment_services.py" |  | ||||||
|         if not test_file.exists(): |  | ||||||
|             self.add_result( |  | ||||||
|                 "test_file_exists", |  | ||||||
|                 False, |  | ||||||
|                 "Fichier test_assessment_services.py manquant" |  | ||||||
|             ) |  | ||||||
|             return |  | ||||||
|          |  | ||||||
|         self.add_result( |  | ||||||
|             "test_file_exists", |  | ||||||
|             True, |  | ||||||
|             "Fichier de tests des services présent" |  | ||||||
|         ) |  | ||||||
|          |  | ||||||
|         # Analyser le contenu des tests |  | ||||||
|         try: |  | ||||||
|             with open(test_file, 'r', encoding='utf-8') as f: |  | ||||||
|                 content = f.read() |  | ||||||
|              |  | ||||||
|             # Compter les classes de test et méthodes de test |  | ||||||
|             tree = ast.parse(content) |  | ||||||
|             test_classes = 0 |  | ||||||
|             test_methods = 0 |  | ||||||
|              |  | ||||||
|             for node in ast.walk(tree): |  | ||||||
|                 if isinstance(node, ast.ClassDef) and node.name.startswith('Test'): |  | ||||||
|                     test_classes += 1 |  | ||||||
|                 elif isinstance(node, ast.FunctionDef) and node.name.startswith('test_'): |  | ||||||
|                     test_methods += 1 |  | ||||||
|              |  | ||||||
|             self.add_result( |  | ||||||
|                 "test_coverage_analysis", |  | ||||||
|                 test_methods >= 10,  # Au moins 10 tests |  | ||||||
|                 f"Tests trouvés: {test_classes} classes, {test_methods} méthodes", |  | ||||||
|                 {"test_classes": test_classes, "test_methods": test_methods}, |  | ||||||
|                 severity="WARNING" if test_methods < 10 else "INFO" |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|         except Exception as e: |  | ||||||
|             self.add_result( |  | ||||||
|                 "test_coverage_analysis", |  | ||||||
|                 False, |  | ||||||
|                 f"Erreur lors de l'analyse des tests: {e}", |  | ||||||
|                 severity="WARNING" |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def validate_solid_principles(self): |  | ||||||
|         """Valide le respect des principes SOLID dans l'architecture.""" |  | ||||||
|          |  | ||||||
|         try: |  | ||||||
|             from services.assessment_services import ( |  | ||||||
|                 GradingStrategy, |  | ||||||
|                 AssessmentProgressService, |  | ||||||
|                 StudentScoreCalculator, |  | ||||||
|                 AssessmentStatisticsService, |  | ||||||
|                 AssessmentServicesFacade |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|             # Single Responsibility Principle: Chaque service a une responsabilité claire |  | ||||||
|             services_responsibilities = { |  | ||||||
|                 "AssessmentProgressService": "Calcul de progression", |  | ||||||
|                 "StudentScoreCalculator": "Calcul des scores", |  | ||||||
|                 "AssessmentStatisticsService": "Calcul des statistiques", |  | ||||||
|                 "AssessmentServicesFacade": "Orchestration des services" |  | ||||||
|             } |  | ||||||
|              |  | ||||||
|             self.add_result( |  | ||||||
|                 "solid_single_responsibility", |  | ||||||
|                 True, |  | ||||||
|                 f"Services avec responsabilité unique: {len(services_responsibilities)}", |  | ||||||
|                 {"services": list(services_responsibilities.keys())}, |  | ||||||
|                 severity="INFO" |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|             # Open/Closed Principle: GradingStrategy est extensible |  | ||||||
|             if inspect.isabstract(GradingStrategy): |  | ||||||
|                 self.add_result( |  | ||||||
|                     "solid_open_closed", |  | ||||||
|                     True, |  | ||||||
|                     "Pattern Strategy permet l'extension sans modification", |  | ||||||
|                     severity="INFO" |  | ||||||
|                 ) |  | ||||||
|             else: |  | ||||||
|                 self.add_result( |  | ||||||
|                     "solid_open_closed", |  | ||||||
|                     False, |  | ||||||
|                     "GradingStrategy devrait être une classe abstraite" |  | ||||||
|                 ) |  | ||||||
|              |  | ||||||
|             # Dependency Inversion: Services dépendent d'abstractions |  | ||||||
|             facade_init = inspect.signature(AssessmentServicesFacade.__init__) |  | ||||||
|             params = list(facade_init.parameters.keys()) |  | ||||||
|              |  | ||||||
|             # Vérifier que le Facade accepte des services en injection |  | ||||||
|             injectable_params = [p for p in params if not p.startswith('_') and p != 'self'] |  | ||||||
|              |  | ||||||
|             self.add_result( |  | ||||||
|                 "solid_dependency_inversion", |  | ||||||
|                 len(injectable_params) > 0, |  | ||||||
|                 f"Facade supporte l'injection de dépendances: {injectable_params}", |  | ||||||
|                 {"injectable_parameters": injectable_params}, |  | ||||||
|                 severity="INFO" |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|         except Exception as e: |  | ||||||
|             self.add_result( |  | ||||||
|                 "solid_principles_validation", |  | ||||||
|                 False, |  | ||||||
|                 f"Erreur lors de la validation SOLID: {e}", |  | ||||||
|                 severity="WARNING" |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def validate_compatibility_with_legacy(self): |  | ||||||
|         """Valide la compatibilité avec le code existant.""" |  | ||||||
|          |  | ||||||
|         try: |  | ||||||
|             # Tester que les nouveaux services peuvent être utilisés |  | ||||||
|             # avec les modèles existants (contexte déjà initialisé) |  | ||||||
|             from models import Assessment |  | ||||||
|             from services.assessment_services import AssessmentServicesFacade |  | ||||||
|          |  | ||||||
|             # Vérifier que les services acceptent les instances de modèles |  | ||||||
|             # Le Facade nécessite des providers - utilisons ceux par défaut |  | ||||||
|             from app_config import config_manager |  | ||||||
|              |  | ||||||
|             class MockDBProvider: |  | ||||||
|                 def get_db_session(self): |  | ||||||
|                     from models import db |  | ||||||
|                     return db.session |  | ||||||
|              |  | ||||||
|             facade = AssessmentServicesFacade( |  | ||||||
|                 config_provider=config_manager,  |  | ||||||
|                 db_provider=MockDBProvider() |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|             # Test avec None (pas de vrai Assessment en contexte de validation) |  | ||||||
|             try: |  | ||||||
|                 # Ces appels devraient gérer gracieusement None ou lever des erreurs cohérentes |  | ||||||
|                 facade.calculate_grading_progress(None) |  | ||||||
|             except Exception as e: |  | ||||||
|                 # On s'attend à une erreur cohérente, pas un crash |  | ||||||
|                 if "None" in str(e) or "NoneType" in str(e): |  | ||||||
|                     self.add_result( |  | ||||||
|                         "legacy_compatibility_error_handling", |  | ||||||
|                         True, |  | ||||||
|                         "Services gèrent correctement les entrées invalides", |  | ||||||
|                         severity="INFO" |  | ||||||
|                     ) |  | ||||||
|                 else: |  | ||||||
|                     self.add_result( |  | ||||||
|                         "legacy_compatibility_error_handling", |  | ||||||
|                         False, |  | ||||||
|                         f"Erreur inattendue: {e}", |  | ||||||
|                         severity="WARNING" |  | ||||||
|                     ) |  | ||||||
|              |  | ||||||
|             self.add_result( |  | ||||||
|                 "legacy_compatibility_import", |  | ||||||
|                 True, |  | ||||||
|                 "Services importables avec modèles existants" |  | ||||||
|             ) |  | ||||||
|              |  | ||||||
|         except Exception as e: |  | ||||||
|             self.add_result( |  | ||||||
|                 "legacy_compatibility_import", |  | ||||||
|                 False, |  | ||||||
|                 f"Problème de compatibilité: {e}" |  | ||||||
|             ) |  | ||||||
|      |  | ||||||
|     def run_full_validation(self) -> Dict[str, Any]: |  | ||||||
|         """Exécute la validation complète de l'architecture.""" |  | ||||||
|          |  | ||||||
|         print("🔍 Validation de l'Architecture des Services Refactorisés") |  | ||||||
|         print("=" * 60) |  | ||||||
|          |  | ||||||
|         # Exécution des validations dans l'ordre logique |  | ||||||
|         self.validate_services_module_structure() |  | ||||||
|         self.validate_assessment_services_classes() |  | ||||||
|         self.validate_service_interfaces() |  | ||||||
|         self.validate_type_hints() |  | ||||||
|         self.validate_test_coverage() |  | ||||||
|         self.validate_solid_principles() |  | ||||||
|         self.validate_compatibility_with_legacy() |  | ||||||
|          |  | ||||||
|         # Analyse des résultats |  | ||||||
|         total_tests = len(self.results) |  | ||||||
|         passed_tests = sum(1 for r in self.results if r.passed) |  | ||||||
|         failed_tests = total_tests - passed_tests |  | ||||||
|          |  | ||||||
|         errors = [r for r in self.results if not r.passed and r.severity == "ERROR"] |  | ||||||
|         warnings = [r for r in self.results if not r.passed and r.severity == "WARNING"] |  | ||||||
|          |  | ||||||
|         print("\n" + "=" * 60) |  | ||||||
|         print("📊 RÉSUMÉ DE LA VALIDATION") |  | ||||||
|         print("=" * 60) |  | ||||||
|          |  | ||||||
|         print(f"✅ Tests réussis: {passed_tests}/{total_tests}") |  | ||||||
|         print(f"❌ Erreurs: {len(errors)}") |  | ||||||
|         print(f"⚠️  Avertissements: {len(warnings)}") |  | ||||||
|          |  | ||||||
|         if errors: |  | ||||||
|             print("\n🔴 ERREURS À CORRIGER:") |  | ||||||
|             for error in errors: |  | ||||||
|                 print(f"  - {error.name}: {error.message}") |  | ||||||
|          |  | ||||||
|         if warnings: |  | ||||||
|             print("\n🟡 AVERTISSEMENTS:") |  | ||||||
|             for warning in warnings: |  | ||||||
|                 print(f"  - {warning.name}: {warning.message}") |  | ||||||
|          |  | ||||||
|         # Déterminer si l'architecture est prête pour la migration |  | ||||||
|         migration_ready = len(errors) == 0 |  | ||||||
|          |  | ||||||
|         print(f"\n🚀 État de préparation pour migration: {'✅ PRÊT' if migration_ready else '❌ NON PRÊT'}") |  | ||||||
|          |  | ||||||
|         if migration_ready: |  | ||||||
|             print("   L'architecture est correctement préparée pour la migration progressive.") |  | ||||||
|         else: |  | ||||||
|             print("   Corriger les erreurs avant de commencer la migration.") |  | ||||||
|          |  | ||||||
|         return { |  | ||||||
|             'total_tests': total_tests, |  | ||||||
|             'passed_tests': passed_tests, |  | ||||||
|             'failed_tests': failed_tests, |  | ||||||
|             'errors': [{'name': e.name, 'message': e.message} for e in errors], |  | ||||||
|             'warnings': [{'name': w.name, 'message': w.message} for w in warnings], |  | ||||||
|             'migration_ready': migration_ready, |  | ||||||
|             'results': self.results |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def main(): |  | ||||||
|     """Point d'entrée principal du script.""" |  | ||||||
|      |  | ||||||
|     import argparse |  | ||||||
|      |  | ||||||
|     parser = argparse.ArgumentParser(description="Validation de l'architecture des services") |  | ||||||
|     parser.add_argument('--json', action='store_true', |  | ||||||
|                        help='Sortie au format JSON') |  | ||||||
|      |  | ||||||
|     args = parser.parse_args() |  | ||||||
|      |  | ||||||
|     validator = ArchitectureValidator() |  | ||||||
|     results = validator.run_full_validation() |  | ||||||
|      |  | ||||||
|     if args.json: |  | ||||||
|         import json |  | ||||||
|         # Convertir les objets ValidationResult en dict pour JSON |  | ||||||
|         json_results = results.copy() |  | ||||||
|         json_results['results'] = [ |  | ||||||
|             { |  | ||||||
|                 'name': r.name, |  | ||||||
|                 'passed': r.passed, |  | ||||||
|                 'message': r.message, |  | ||||||
|                 'details': r.details, |  | ||||||
|                 'severity': r.severity |  | ||||||
|             } |  | ||||||
|             for r in results['results'] |  | ||||||
|         ] |  | ||||||
|         print(json.dumps(json_results, indent=2)) |  | ||||||
|      |  | ||||||
|     # Code de sortie approprié |  | ||||||
|     sys.exit(0 if results['migration_ready'] else 1) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     main() |  | ||||||
| @@ -160,15 +160,8 @@ const Notytex = { | |||||||
|             const url = new URL(window.location); |             const url = new URL(window.location); | ||||||
|             url.search = params.toString(); |             url.search = params.toString(); | ||||||
|              |              | ||||||
|             // Navigation sans rechargement complet si possible |             // Rechargement de la page avec les nouveaux filtres | ||||||
|             if (history.pushState) { |             window.location.href = url.toString(); | ||||||
|                 history.pushState(null, '', url.toString()); |  | ||||||
|                 // Déclenchement d'un événement personnalisé pour les composants qui écoutent |  | ||||||
|                 window.dispatchEvent(new CustomEvent('filtersChanged', { detail: params })); |  | ||||||
|             } else { |  | ||||||
|                 // Fallback pour les anciens navigateurs |  | ||||||
|                 window.location.href = url.toString(); |  | ||||||
|             } |  | ||||||
|         }, |         }, | ||||||
|  |  | ||||||
|         /** |         /** | ||||||
|   | |||||||
| @@ -52,6 +52,16 @@ | |||||||
|             'label': 'Classe', |             'label': 'Classe', | ||||||
|             'options': class_options |             'options': class_options | ||||||
|         }, |         }, | ||||||
|  |         { | ||||||
|  |             'id': 'correction-filter', | ||||||
|  |             'label': 'Correction', | ||||||
|  |             'options': [ | ||||||
|  |                 {'value': '', 'label': 'Toutes'}, | ||||||
|  |                 {'value': 'incomplete', 'label': 'Non terminées'}, | ||||||
|  |                 {'value': 'complete', 'label': 'Terminées'}, | ||||||
|  |                 {'value': 'not_started', 'label': 'Non commencées'} | ||||||
|  |             ] | ||||||
|  |         }, | ||||||
|         { |         { | ||||||
|             'id': 'sort-filter', |             'id': 'sort-filter', | ||||||
|             'label': 'Tri', |             'label': 'Tri', | ||||||
| @@ -64,7 +74,7 @@ | |||||||
|         } |         } | ||||||
|     ] %} |     ] %} | ||||||
|      |      | ||||||
|     {% call filter_section(filters, {'trimester-filter': current_trimester, 'class-filter': current_class, 'sort-filter': current_sort}) %} |     {% call filter_section(filters, {'trimester-filter': current_trimester, 'class-filter': current_class, 'correction-filter': current_correction, 'sort-filter': current_sort}) %} | ||||||
|         <div class="flex items-center space-x-4"> |         <div class="flex items-center space-x-4"> | ||||||
|             <div class="text-sm text-gray-500 font-medium"> |             <div class="text-sm text-gray-500 font-medium"> | ||||||
|                 {{ assessments|length }} évaluation(s) |                 {{ assessments|length }} évaluation(s) | ||||||
|   | |||||||
| @@ -191,7 +191,7 @@ | |||||||
|             {% for filter in filters %} |             {% for filter in filters %} | ||||||
|                 <div class="flex items-center space-x-2"> |                 <div class="flex items-center space-x-2"> | ||||||
|                     <label class="text-sm font-medium text-gray-700">{{ filter.label }} :</label> |                     <label class="text-sm font-medium text-gray-700">{{ filter.label }} :</label> | ||||||
|                     <select data-filter="{{ filter.id.replace('-filter', '') }}" class="filter-control"> |                     <select data-filter="{{ filter.id.replace('-filter', '') }}" class="filter-control border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-blue-500 focus:border-blue-500 bg-white shadow-sm hover:border-gray-400 transition-colors"> | ||||||
|                         {% for option in filter.options %} |                         {% for option in filter.options %} | ||||||
|                             <option value="{{ option.value }}" {% if current_values.get(filter.id) == option.value %}selected{% endif %}> |                             <option value="{{ option.value }}" {% if current_values.get(filter.id) == option.value %}selected{% endif %}> | ||||||
|                                 {{ option.label }} |                                 {{ option.label }} | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user