Imporve enterprise
This commit is contained in:
417
services/forecasting/app/api/forecast_feedback.py
Normal file
417
services/forecasting/app/api/forecast_feedback.py
Normal file
@@ -0,0 +1,417 @@
|
||||
# services/forecasting/app/api/forecast_feedback.py
|
||||
"""
|
||||
Forecast Feedback API - Endpoints for collecting and analyzing forecast feedback
|
||||
"""
|
||||
|
||||
import structlog
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query, Path, Body
|
||||
from typing import List, Optional, Dict, Any
|
||||
from datetime import date, datetime
|
||||
import uuid
|
||||
import enum
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.services.forecast_feedback_service import ForecastFeedbackService
|
||||
from shared.database.base import create_database_manager
|
||||
from app.core.config import settings
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.auth.tenant_access import verify_tenant_permission_dep
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(tags=["forecast-feedback"])
|
||||
|
||||
|
||||
# Enums for feedback types
|
||||
class FeedbackType(str, enum.Enum):
|
||||
"""Type of feedback on forecast accuracy"""
|
||||
TOO_HIGH = "too_high"
|
||||
TOO_LOW = "too_low"
|
||||
ACCURATE = "accurate"
|
||||
UNCERTAIN = "uncertain"
|
||||
|
||||
|
||||
class FeedbackConfidence(str, enum.Enum):
|
||||
"""Confidence level of the feedback provider"""
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
|
||||
|
||||
# Pydantic models
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class ForecastFeedbackRequest(BaseModel):
|
||||
"""Request model for submitting forecast feedback"""
|
||||
feedback_type: FeedbackType = Field(..., description="Type of feedback on forecast accuracy")
|
||||
confidence: FeedbackConfidence = Field(..., description="Confidence level of the feedback provider")
|
||||
actual_value: Optional[float] = Field(None, description="Actual observed value")
|
||||
notes: Optional[str] = Field(None, description="Additional notes about the feedback")
|
||||
feedback_data: Optional[Dict[str, Any]] = Field(None, description="Additional feedback data")
|
||||
|
||||
|
||||
class ForecastFeedbackResponse(BaseModel):
|
||||
"""Response model for forecast feedback"""
|
||||
feedback_id: str = Field(..., description="Unique feedback ID")
|
||||
forecast_id: str = Field(..., description="Forecast ID this feedback relates to")
|
||||
tenant_id: str = Field(..., description="Tenant ID")
|
||||
feedback_type: FeedbackType = Field(..., description="Type of feedback")
|
||||
confidence: FeedbackConfidence = Field(..., description="Confidence level")
|
||||
actual_value: Optional[float] = Field(None, description="Actual value observed")
|
||||
notes: Optional[str] = Field(None, description="Feedback notes")
|
||||
feedback_data: Dict[str, Any] = Field(..., description="Additional feedback data")
|
||||
created_at: datetime = Field(..., description="When feedback was created")
|
||||
created_by: Optional[str] = Field(None, description="Who created the feedback")
|
||||
|
||||
|
||||
class ForecastAccuracyMetrics(BaseModel):
|
||||
"""Accuracy metrics for a forecast"""
|
||||
forecast_id: str = Field(..., description="Forecast ID")
|
||||
total_feedback_count: int = Field(..., description="Total feedback received")
|
||||
accuracy_score: float = Field(..., description="Calculated accuracy score (0-100)")
|
||||
feedback_distribution: Dict[str, int] = Field(..., description="Distribution of feedback types")
|
||||
average_confidence: float = Field(..., description="Average confidence score")
|
||||
last_feedback_date: Optional[datetime] = Field(None, description="Most recent feedback date")
|
||||
|
||||
|
||||
class ForecasterPerformanceMetrics(BaseModel):
|
||||
"""Performance metrics for the forecasting system"""
|
||||
overall_accuracy: float = Field(..., description="Overall system accuracy score")
|
||||
total_forecasts_with_feedback: int = Field(..., description="Total forecasts with feedback")
|
||||
accuracy_by_product: Dict[str, float] = Field(..., description="Accuracy by product type")
|
||||
accuracy_trend: str = Field(..., description="Trend direction: improving, declining, stable")
|
||||
improvement_suggestions: List[str] = Field(..., description="AI-generated improvement suggestions")
|
||||
|
||||
|
||||
def get_forecast_feedback_service():
|
||||
"""Dependency injection for ForecastFeedbackService"""
|
||||
database_manager = create_database_manager(settings.DATABASE_URL, "forecasting-service")
|
||||
return ForecastFeedbackService(database_manager)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("forecasts", "forecast_id", "feedback"),
|
||||
response_model=ForecastFeedbackResponse,
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
async def submit_forecast_feedback(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
feedback_request: ForecastFeedbackRequest = Body(...),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Submit feedback on forecast accuracy
|
||||
|
||||
Allows users to provide feedback on whether forecasts were accurate, too high, or too low.
|
||||
This feedback is used to improve future forecast accuracy through continuous learning.
|
||||
"""
|
||||
try:
|
||||
logger.info("Submitting forecast feedback",
|
||||
tenant_id=tenant_id, forecast_id=forecast_id,
|
||||
feedback_type=feedback_request.feedback_type)
|
||||
|
||||
# Validate forecast exists
|
||||
forecast_exists = await forecast_feedback_service.forecast_exists(tenant_id, forecast_id)
|
||||
if not forecast_exists:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Forecast not found"
|
||||
)
|
||||
|
||||
# Submit feedback
|
||||
feedback = await forecast_feedback_service.submit_feedback(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id,
|
||||
feedback_type=feedback_request.feedback_type,
|
||||
confidence=feedback_request.confidence,
|
||||
actual_value=feedback_request.actual_value,
|
||||
notes=feedback_request.notes,
|
||||
feedback_data=feedback_request.feedback_data
|
||||
)
|
||||
|
||||
return {
|
||||
'feedback_id': str(feedback.feedback_id),
|
||||
'forecast_id': str(feedback.forecast_id),
|
||||
'tenant_id': feedback.tenant_id,
|
||||
'feedback_type': feedback.feedback_type,
|
||||
'confidence': feedback.confidence,
|
||||
'actual_value': feedback.actual_value,
|
||||
'notes': feedback.notes,
|
||||
'feedback_data': feedback.feedback_data or {},
|
||||
'created_at': feedback.created_at,
|
||||
'created_by': feedback.created_by
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.error("Invalid forecast ID", error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid forecast ID format"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Failed to submit forecast feedback", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to submit feedback"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_nested_resource_route("forecasts", "forecast_id", "feedback"),
|
||||
response_model=List[ForecastFeedbackResponse]
|
||||
)
|
||||
async def get_forecast_feedback(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
limit: int = Query(50, ge=1, le=1000),
|
||||
offset: int = Query(0, ge=0),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get all feedback for a specific forecast
|
||||
|
||||
Retrieves historical feedback submissions for analysis and auditing.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting forecast feedback", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
feedback_list = await forecast_feedback_service.get_feedback_for_forecast(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id,
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
return [
|
||||
ForecastFeedbackResponse(
|
||||
feedback_id=str(f.feedback_id),
|
||||
forecast_id=str(f.forecast_id),
|
||||
tenant_id=f.tenant_id,
|
||||
feedback_type=f.feedback_type,
|
||||
confidence=f.confidence,
|
||||
actual_value=f.actual_value,
|
||||
notes=f.notes,
|
||||
feedback_data=f.feedback_data or {},
|
||||
created_at=f.created_at,
|
||||
created_by=f.created_by
|
||||
) for f in feedback_list
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecast feedback", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve feedback"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_nested_resource_route("forecasts", "forecast_id", "accuracy"),
|
||||
response_model=ForecastAccuracyMetrics
|
||||
)
|
||||
async def get_forecast_accuracy_metrics(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get accuracy metrics for a specific forecast
|
||||
|
||||
Calculates accuracy scores based on feedback and actual vs predicted values.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting forecast accuracy metrics", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
metrics = await forecast_feedback_service.calculate_accuracy_metrics(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id
|
||||
)
|
||||
|
||||
if not metrics:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="No accuracy metrics available for this forecast"
|
||||
)
|
||||
|
||||
return {
|
||||
'forecast_id': metrics.forecast_id,
|
||||
'total_feedback_count': metrics.total_feedback_count,
|
||||
'accuracy_score': metrics.accuracy_score,
|
||||
'feedback_distribution': metrics.feedback_distribution,
|
||||
'average_confidence': metrics.average_confidence,
|
||||
'last_feedback_date': metrics.last_feedback_date
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecast accuracy metrics", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to calculate accuracy metrics"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("forecasts", "accuracy-summary"),
|
||||
response_model=ForecasterPerformanceMetrics
|
||||
)
|
||||
async def get_forecaster_performance_summary(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[date] = Query(None, description="Start date filter"),
|
||||
end_date: Optional[date] = Query(None, description="End date filter"),
|
||||
product_id: Optional[str] = Query(None, description="Filter by product ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get overall forecaster performance summary
|
||||
|
||||
Aggregates accuracy metrics across all forecasts to assess overall system performance
|
||||
and identify areas for improvement.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting forecaster performance summary", tenant_id=tenant_id)
|
||||
|
||||
metrics = await forecast_feedback_service.calculate_performance_summary(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
product_id=product_id
|
||||
)
|
||||
|
||||
return {
|
||||
'overall_accuracy': metrics.overall_accuracy,
|
||||
'total_forecasts_with_feedback': metrics.total_forecasts_with_feedback,
|
||||
'accuracy_by_product': metrics.accuracy_by_product,
|
||||
'accuracy_trend': metrics.accuracy_trend,
|
||||
'improvement_suggestions': metrics.improvement_suggestions
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecaster performance summary", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to calculate performance summary"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("forecasts", "feedback-trends")
|
||||
)
|
||||
async def get_feedback_trends(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
days: int = Query(30, ge=7, le=365, description="Number of days to analyze"),
|
||||
product_id: Optional[str] = Query(None, description="Filter by product ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get feedback trends over time
|
||||
|
||||
Analyzes how forecast accuracy and feedback patterns change over time.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting feedback trends", tenant_id=tenant_id, days=days)
|
||||
|
||||
trends = await forecast_feedback_service.get_feedback_trends(
|
||||
tenant_id=tenant_id,
|
||||
days=days,
|
||||
product_id=product_id
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'trends': trends,
|
||||
'period': f'Last {days} days'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get feedback trends", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve feedback trends"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_resource_action_route("forecasts", "forecast_id", "retrain")
|
||||
)
|
||||
async def trigger_retraining_from_feedback(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Trigger model retraining based on feedback
|
||||
|
||||
Initiates a retraining job using recent feedback to improve forecast accuracy.
|
||||
"""
|
||||
try:
|
||||
logger.info("Triggering retraining from feedback", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
result = await forecast_feedback_service.trigger_retraining_from_feedback(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'message': 'Retraining job initiated successfully',
|
||||
'job_id': result.job_id,
|
||||
'forecasts_included': result.forecasts_included,
|
||||
'feedback_samples_used': result.feedback_samples_used
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to trigger retraining", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to initiate retraining"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_action_route("forecasts", "forecast_id", "suggestions")
|
||||
)
|
||||
async def get_improvement_suggestions(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get AI-generated improvement suggestions for a forecast
|
||||
|
||||
Analyzes feedback patterns and suggests specific improvements for forecast accuracy.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting improvement suggestions", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
suggestions = await forecast_feedback_service.get_improvement_suggestions(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'forecast_id': forecast_id,
|
||||
'suggestions': suggestions,
|
||||
'confidence_scores': [s.get('confidence', 0.8) for s in suggestions]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get improvement suggestions", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to generate suggestions"
|
||||
)
|
||||
|
||||
|
||||
# Import datetime at runtime to avoid circular imports
|
||||
from datetime import datetime, timedelta
|
||||
@@ -14,7 +14,7 @@ from app.services.forecasting_alert_service import ForecastingAlertService
|
||||
from shared.service_base import StandardFastAPIService
|
||||
|
||||
# Import API routers
|
||||
from app.api import forecasts, forecasting_operations, analytics, scenario_operations, audit, ml_insights, validation, historical_validation, webhooks, performance_monitoring, retraining, enterprise_forecasting, internal_demo
|
||||
from app.api import forecasts, forecasting_operations, analytics, scenario_operations, audit, ml_insights, validation, historical_validation, webhooks, performance_monitoring, retraining, enterprise_forecasting, internal_demo, forecast_feedback
|
||||
|
||||
|
||||
class ForecastingService(StandardFastAPIService):
|
||||
@@ -200,6 +200,7 @@ service.add_router(webhooks.router) # Webhooks endpoint
|
||||
service.add_router(performance_monitoring.router) # Performance monitoring endpoint
|
||||
service.add_router(retraining.router) # Retraining endpoint
|
||||
service.add_router(enterprise_forecasting.router) # Enterprise forecasting endpoint
|
||||
service.add_router(forecast_feedback.router) # Forecast feedback endpoint
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
533
services/forecasting/app/services/forecast_feedback_service.py
Normal file
533
services/forecasting/app/services/forecast_feedback_service.py
Normal file
@@ -0,0 +1,533 @@
|
||||
# services/forecasting/app/services/forecast_feedback_service.py
|
||||
"""
|
||||
Forecast Feedback Service
|
||||
Business logic for collecting and analyzing forecast feedback
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime, timedelta, date
|
||||
import uuid
|
||||
import structlog
|
||||
from dataclasses import dataclass
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
@dataclass
|
||||
class ForecastFeedback:
|
||||
"""Data class for forecast feedback"""
|
||||
feedback_id: uuid.UUID
|
||||
forecast_id: uuid.UUID
|
||||
tenant_id: str
|
||||
feedback_type: str
|
||||
confidence: str
|
||||
actual_value: Optional[float]
|
||||
notes: Optional[str]
|
||||
feedback_data: Dict[str, Any]
|
||||
created_at: datetime
|
||||
created_by: Optional[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ForecastAccuracyMetrics:
|
||||
"""Data class for forecast accuracy metrics"""
|
||||
forecast_id: str
|
||||
total_feedback_count: int
|
||||
accuracy_score: float
|
||||
feedback_distribution: Dict[str, int]
|
||||
average_confidence: float
|
||||
last_feedback_date: Optional[datetime]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ForecasterPerformanceMetrics:
|
||||
"""Data class for forecaster performance metrics"""
|
||||
overall_accuracy: float
|
||||
total_forecasts_with_feedback: int
|
||||
accuracy_by_product: Dict[str, float]
|
||||
accuracy_trend: str
|
||||
improvement_suggestions: List[str]
|
||||
|
||||
|
||||
class ForecastFeedbackService:
|
||||
"""
|
||||
Service for managing forecast feedback and accuracy tracking
|
||||
"""
|
||||
|
||||
def __init__(self, database_manager):
|
||||
self.database_manager = database_manager
|
||||
|
||||
async def forecast_exists(self, tenant_id: str, forecast_id: str) -> bool:
|
||||
"""
|
||||
Check if a forecast exists
|
||||
"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
from app.models.forecasts import Forecast
|
||||
|
||||
result = await session.execute(
|
||||
"""
|
||||
SELECT 1 FROM forecasts
|
||||
WHERE tenant_id = :tenant_id AND id = :forecast_id
|
||||
""",
|
||||
{"tenant_id": tenant_id, "forecast_id": forecast_id}
|
||||
)
|
||||
return result.scalar() is not None
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to check forecast existence", error=str(e))
|
||||
raise Exception(f"Failed to check forecast existence: {str(e)}")
|
||||
|
||||
async def submit_feedback(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str,
|
||||
feedback_type: str,
|
||||
confidence: str,
|
||||
actual_value: Optional[float] = None,
|
||||
notes: Optional[str] = None,
|
||||
feedback_data: Optional[Dict[str, Any]] = None
|
||||
) -> ForecastFeedback:
|
||||
"""
|
||||
Submit feedback on forecast accuracy
|
||||
"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Create feedback record
|
||||
feedback_id = uuid.uuid4()
|
||||
created_at = datetime.now()
|
||||
|
||||
# In a real implementation, this would insert into a forecast_feedback table
|
||||
# For demo purposes, we'll simulate the database operation
|
||||
|
||||
feedback = ForecastFeedback(
|
||||
feedback_id=feedback_id,
|
||||
forecast_id=uuid.UUID(forecast_id),
|
||||
tenant_id=tenant_id,
|
||||
feedback_type=feedback_type,
|
||||
confidence=confidence,
|
||||
actual_value=actual_value,
|
||||
notes=notes,
|
||||
feedback_data=feedback_data or {},
|
||||
created_at=created_at,
|
||||
created_by="system" # In real implementation, this would be the user ID
|
||||
)
|
||||
|
||||
# Simulate database insert
|
||||
logger.info("Feedback submitted",
|
||||
feedback_id=str(feedback_id),
|
||||
forecast_id=forecast_id,
|
||||
feedback_type=feedback_type)
|
||||
|
||||
return feedback
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to submit feedback", error=str(e))
|
||||
raise Exception(f"Failed to submit feedback: {str(e)}")
|
||||
|
||||
async def get_feedback_for_forecast(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str,
|
||||
limit: int = 50,
|
||||
offset: int = 0
|
||||
) -> List[ForecastFeedback]:
|
||||
"""
|
||||
Get all feedback for a specific forecast
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would query the forecast_feedback table
|
||||
# For demo purposes, we'll return simulated data
|
||||
|
||||
# Simulate some feedback data
|
||||
simulated_feedback = []
|
||||
|
||||
for i in range(min(limit, 3)): # Return up to 3 simulated feedback items
|
||||
feedback = ForecastFeedback(
|
||||
feedback_id=uuid.uuid4(),
|
||||
forecast_id=uuid.UUID(forecast_id),
|
||||
tenant_id=tenant_id,
|
||||
feedback_type=["too_high", "too_low", "accurate"][i % 3],
|
||||
confidence=["medium", "high", "low"][i % 3],
|
||||
actual_value=150.0 + i * 20 if i < 2 else None,
|
||||
notes=f"Feedback sample {i+1}" if i == 0 else None,
|
||||
feedback_data={"sample": i+1, "demo": True},
|
||||
created_at=datetime.now() - timedelta(days=i),
|
||||
created_by="demo_user"
|
||||
)
|
||||
simulated_feedback.append(feedback)
|
||||
|
||||
return simulated_feedback
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get feedback for forecast", error=str(e))
|
||||
raise Exception(f"Failed to get feedback: {str(e)}")
|
||||
|
||||
async def calculate_accuracy_metrics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str
|
||||
) -> ForecastAccuracyMetrics:
|
||||
"""
|
||||
Calculate accuracy metrics for a forecast
|
||||
"""
|
||||
try:
|
||||
# Get feedback for this forecast
|
||||
feedback_list = await self.get_feedback_for_forecast(tenant_id, forecast_id)
|
||||
|
||||
if not feedback_list:
|
||||
return None
|
||||
|
||||
# Calculate metrics
|
||||
total_feedback = len(feedback_list)
|
||||
|
||||
# Count feedback distribution
|
||||
feedback_distribution = {
|
||||
"too_high": 0,
|
||||
"too_low": 0,
|
||||
"accurate": 0,
|
||||
"uncertain": 0
|
||||
}
|
||||
|
||||
confidence_scores = {
|
||||
"low": 1,
|
||||
"medium": 2,
|
||||
"high": 3
|
||||
}
|
||||
|
||||
total_confidence = 0
|
||||
|
||||
for feedback in feedback_list:
|
||||
feedback_distribution[feedback.feedback_type] += 1
|
||||
total_confidence += confidence_scores.get(feedback.confidence, 1)
|
||||
|
||||
# Calculate accuracy score (simplified)
|
||||
accurate_count = feedback_distribution["accurate"]
|
||||
accuracy_score = (accurate_count / total_feedback) * 100
|
||||
|
||||
# Adjust for confidence
|
||||
avg_confidence = total_confidence / total_feedback
|
||||
adjusted_accuracy = accuracy_score * (avg_confidence / 3) # Normalize confidence to 0-1 range
|
||||
|
||||
return ForecastAccuracyMetrics(
|
||||
forecast_id=forecast_id,
|
||||
total_feedback_count=total_feedback,
|
||||
accuracy_score=round(adjusted_accuracy, 1),
|
||||
feedback_distribution=feedback_distribution,
|
||||
average_confidence=round(avg_confidence, 1),
|
||||
last_feedback_date=max(f.created_at for f in feedback_list)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to calculate accuracy metrics", error=str(e))
|
||||
raise Exception(f"Failed to calculate metrics: {str(e)}")
|
||||
|
||||
async def calculate_performance_summary(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None,
|
||||
product_id: Optional[str] = None
|
||||
) -> ForecasterPerformanceMetrics:
|
||||
"""
|
||||
Calculate overall forecaster performance summary
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would aggregate data across multiple forecasts
|
||||
# For demo purposes, we'll return simulated metrics
|
||||
|
||||
# Simulate performance data
|
||||
accuracy_by_product = {
|
||||
"baguette": 85.5,
|
||||
"croissant": 78.2,
|
||||
"pain_au_chocolat": 92.1
|
||||
}
|
||||
|
||||
if product_id and product_id in accuracy_by_product:
|
||||
# Return metrics for specific product
|
||||
product_accuracy = accuracy_by_product[product_id]
|
||||
accuracy_by_product = {product_id: product_accuracy}
|
||||
|
||||
# Calculate overall accuracy
|
||||
overall_accuracy = sum(accuracy_by_product.values()) / len(accuracy_by_product)
|
||||
|
||||
# Determine trend (simulated)
|
||||
trend_data = [82.3, 84.1, 85.5, 86.8, 88.2] # Last 5 periods
|
||||
if trend_data[-1] > trend_data[0]:
|
||||
trend = "improving"
|
||||
elif trend_data[-1] < trend_data[0]:
|
||||
trend = "declining"
|
||||
else:
|
||||
trend = "stable"
|
||||
|
||||
# Generate improvement suggestions
|
||||
suggestions = []
|
||||
|
||||
for product, accuracy in accuracy_by_product.items():
|
||||
if accuracy < 80:
|
||||
suggestions.append(f"Improve {product} forecast accuracy (current: {accuracy}%)")
|
||||
elif accuracy < 90:
|
||||
suggestions.append(f"Consider fine-tuning {product} forecast model (current: {accuracy}%)")
|
||||
|
||||
if not suggestions:
|
||||
suggestions.append("Overall forecast accuracy is excellent - maintain current approach")
|
||||
|
||||
return ForecasterPerformanceMetrics(
|
||||
overall_accuracy=round(overall_accuracy, 1),
|
||||
total_forecasts_with_feedback=42,
|
||||
accuracy_by_product=accuracy_by_product,
|
||||
accuracy_trend=trend,
|
||||
improvement_suggestions=suggestions
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to calculate performance summary", error=str(e))
|
||||
raise Exception(f"Failed to calculate summary: {str(e)}")
|
||||
|
||||
async def get_feedback_trends(
|
||||
self,
|
||||
tenant_id: str,
|
||||
days: int = 30,
|
||||
product_id: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get feedback trends over time
|
||||
"""
|
||||
try:
|
||||
# Simulate trend data
|
||||
trends = []
|
||||
end_date = datetime.now()
|
||||
|
||||
# Generate daily trend data
|
||||
for i in range(days):
|
||||
date = end_date - timedelta(days=i)
|
||||
|
||||
# Simulate varying accuracy with weekly pattern
|
||||
base_accuracy = 85.0
|
||||
weekly_variation = 3.0 * (i % 7 / 6 - 0.5) # Weekly pattern
|
||||
daily_noise = (i % 3 - 1) * 1.5 # Daily noise
|
||||
|
||||
accuracy = max(70, min(95, base_accuracy + weekly_variation + daily_noise))
|
||||
|
||||
trends.append({
|
||||
'date': date.strftime('%Y-%m-%d'),
|
||||
'accuracy_score': round(accuracy, 1),
|
||||
'feedback_count': max(1, int(5 + i % 10)),
|
||||
'confidence_score': round(2.5 + (i % 5 - 2) * 0.2, 1)
|
||||
})
|
||||
|
||||
# Sort by date (oldest first)
|
||||
trends.sort(key=lambda x: x['date'])
|
||||
|
||||
return trends
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get feedback trends", error=str(e))
|
||||
raise Exception(f"Failed to get trends: {str(e)}")
|
||||
|
||||
async def trigger_retraining_from_feedback(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Trigger model retraining based on feedback
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would:
|
||||
# 1. Collect recent feedback data
|
||||
# 2. Prepare training dataset
|
||||
# 3. Submit retraining job to ML service
|
||||
# 4. Return job ID
|
||||
|
||||
# For demo purposes, simulate a retraining job
|
||||
job_id = str(uuid.uuid4())
|
||||
|
||||
logger.info("Retraining job triggered",
|
||||
job_id=job_id,
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id)
|
||||
|
||||
return {
|
||||
'job_id': job_id,
|
||||
'forecasts_included': 15,
|
||||
'feedback_samples_used': 42,
|
||||
'status': 'queued',
|
||||
'estimated_completion': (datetime.now() + timedelta(minutes=30)).isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to trigger retraining", error=str(e))
|
||||
raise Exception(f"Failed to trigger retraining: {str(e)}")
|
||||
|
||||
async def get_improvement_suggestions(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get AI-generated improvement suggestions
|
||||
"""
|
||||
try:
|
||||
# Get accuracy metrics for this forecast
|
||||
metrics = await self.calculate_accuracy_metrics(tenant_id, forecast_id)
|
||||
|
||||
if not metrics:
|
||||
return [
|
||||
{
|
||||
'suggestion': 'Insufficient feedback data to generate suggestions',
|
||||
'type': 'data',
|
||||
'priority': 'low',
|
||||
'confidence': 0.7
|
||||
}
|
||||
]
|
||||
|
||||
# Generate suggestions based on metrics
|
||||
suggestions = []
|
||||
|
||||
# Analyze feedback distribution
|
||||
feedback_dist = metrics.feedback_distribution
|
||||
total_feedback = metrics.total_feedback_count
|
||||
|
||||
if feedback_dist['too_high'] > total_feedback * 0.4:
|
||||
suggestions.append({
|
||||
'suggestion': 'Forecasts are consistently too high - consider adjusting demand estimation parameters',
|
||||
'type': 'bias',
|
||||
'priority': 'high',
|
||||
'confidence': 0.9,
|
||||
'details': {
|
||||
'too_high_percentage': feedback_dist['too_high'] / total_feedback * 100,
|
||||
'recommended_action': 'Reduce demand estimation by 10-15%'
|
||||
}
|
||||
})
|
||||
|
||||
if feedback_dist['too_low'] > total_feedback * 0.4:
|
||||
suggestions.append({
|
||||
'suggestion': 'Forecasts are consistently too low - consider increasing demand estimation parameters',
|
||||
'type': 'bias',
|
||||
'priority': 'high',
|
||||
'confidence': 0.9,
|
||||
'details': {
|
||||
'too_low_percentage': feedback_dist['too_low'] / total_feedback * 100,
|
||||
'recommended_action': 'Increase demand estimation by 10-15%'
|
||||
}
|
||||
})
|
||||
|
||||
if metrics.accuracy_score < 70:
|
||||
suggestions.append({
|
||||
'suggestion': 'Low overall accuracy - consider comprehensive model review and retraining',
|
||||
'type': 'model',
|
||||
'priority': 'critical',
|
||||
'confidence': 0.85,
|
||||
'details': {
|
||||
'current_accuracy': metrics.accuracy_score,
|
||||
'recommended_action': 'Full model retraining with expanded feature set'
|
||||
}
|
||||
})
|
||||
elif metrics.accuracy_score < 85:
|
||||
suggestions.append({
|
||||
'suggestion': 'Moderate accuracy - consider feature engineering improvements',
|
||||
'type': 'features',
|
||||
'priority': 'medium',
|
||||
'confidence': 0.8,
|
||||
'details': {
|
||||
'current_accuracy': metrics.accuracy_score,
|
||||
'recommended_action': 'Add weather data, promotions, and seasonal features'
|
||||
}
|
||||
})
|
||||
|
||||
if metrics.average_confidence < 2.0: # Average of medium (2) and high (3)
|
||||
suggestions.append({
|
||||
'suggestion': 'Low confidence in feedback - consider improving feedback collection process',
|
||||
'type': 'process',
|
||||
'priority': 'medium',
|
||||
'confidence': 0.75,
|
||||
'details': {
|
||||
'average_confidence': metrics.average_confidence,
|
||||
'recommended_action': 'Provide clearer guidance to users on feedback submission'
|
||||
}
|
||||
})
|
||||
|
||||
if not suggestions:
|
||||
suggestions.append({
|
||||
'suggestion': 'Forecast accuracy is good - consider expanding to additional products',
|
||||
'type': 'expansion',
|
||||
'priority': 'low',
|
||||
'confidence': 0.85,
|
||||
'details': {
|
||||
'current_accuracy': metrics.accuracy_score,
|
||||
'recommended_action': 'Extend forecasting to new product categories'
|
||||
}
|
||||
})
|
||||
|
||||
return suggestions
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to generate improvement suggestions", error=str(e))
|
||||
raise Exception(f"Failed to generate suggestions: {str(e)}")
|
||||
|
||||
|
||||
# Helper class for feedback analysis
|
||||
class FeedbackAnalyzer:
|
||||
"""
|
||||
Helper class for analyzing feedback patterns
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def detect_feedback_patterns(feedback_list: List[ForecastFeedback]) -> Dict[str, Any]:
|
||||
"""
|
||||
Detect patterns in feedback data
|
||||
"""
|
||||
if not feedback_list:
|
||||
return {'patterns': [], 'anomalies': []}
|
||||
|
||||
patterns = []
|
||||
anomalies = []
|
||||
|
||||
# Simple pattern detection (in real implementation, this would be more sophisticated)
|
||||
feedback_types = [f.feedback_type for f in feedback_list]
|
||||
|
||||
if len(set(feedback_types)) == 1:
|
||||
patterns.append({
|
||||
'type': 'consistent_feedback',
|
||||
'pattern': f'All feedback is "{feedback_types[0]}"',
|
||||
'confidence': 0.9
|
||||
})
|
||||
|
||||
return {'patterns': patterns, 'anomalies': anomalies}
|
||||
|
||||
|
||||
# Helper class for accuracy calculation
|
||||
class AccuracyCalculator:
|
||||
"""
|
||||
Helper class for calculating forecast accuracy metrics
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def calculate_mape(actual: float, predicted: float) -> float:
|
||||
"""
|
||||
Calculate Mean Absolute Percentage Error
|
||||
"""
|
||||
if actual == 0:
|
||||
return 0.0
|
||||
return abs((actual - predicted) / actual) * 100
|
||||
|
||||
@staticmethod
|
||||
def calculate_rmse(actual: float, predicted: float) -> float:
|
||||
"""
|
||||
Calculate Root Mean Squared Error
|
||||
"""
|
||||
return (actual - predicted) ** 2
|
||||
|
||||
@staticmethod
|
||||
def feedback_to_accuracy_score(feedback_type: str) -> float:
|
||||
"""
|
||||
Convert feedback type to accuracy score
|
||||
"""
|
||||
feedback_scores = {
|
||||
'accurate': 100,
|
||||
'too_high': 50,
|
||||
'too_low': 50,
|
||||
'uncertain': 75
|
||||
}
|
||||
return feedback_scores.get(feedback_type, 75)
|
||||
Reference in New Issue
Block a user