Initial commit - production deployment
This commit is contained in:
27
services/forecasting/app/api/__init__.py
Normal file
27
services/forecasting/app/api/__init__.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""
|
||||
Forecasting API Layer
|
||||
HTTP endpoints for demand forecasting and prediction operations
|
||||
"""
|
||||
|
||||
from .forecasts import router as forecasts_router
|
||||
from .forecasting_operations import router as forecasting_operations_router
|
||||
from .analytics import router as analytics_router
|
||||
from .validation import router as validation_router
|
||||
from .historical_validation import router as historical_validation_router
|
||||
from .webhooks import router as webhooks_router
|
||||
from .performance_monitoring import router as performance_monitoring_router
|
||||
from .retraining import router as retraining_router
|
||||
from .enterprise_forecasting import router as enterprise_forecasting_router
|
||||
|
||||
|
||||
__all__ = [
|
||||
"forecasts_router",
|
||||
"forecasting_operations_router",
|
||||
"analytics_router",
|
||||
"validation_router",
|
||||
"historical_validation_router",
|
||||
"webhooks_router",
|
||||
"performance_monitoring_router",
|
||||
"retraining_router",
|
||||
"enterprise_forecasting_router",
|
||||
]
|
||||
55
services/forecasting/app/api/analytics.py
Normal file
55
services/forecasting/app/api/analytics.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# services/forecasting/app/api/analytics.py
|
||||
"""
|
||||
Forecasting Analytics API - Reporting, statistics, and insights
|
||||
"""
|
||||
|
||||
import structlog
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query, Path
|
||||
from datetime import date
|
||||
from typing import Optional
|
||||
|
||||
from app.services.prediction_service import PredictionService
|
||||
from shared.database.base import create_database_manager
|
||||
from app.core.config import settings
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.auth.access_control import analytics_tier_required
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(tags=["forecasting-analytics"])
|
||||
|
||||
|
||||
def get_enhanced_prediction_service():
|
||||
"""Dependency injection for enhanced PredictionService"""
|
||||
database_manager = create_database_manager(settings.DATABASE_URL, "forecasting-service")
|
||||
return PredictionService(database_manager)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("predictions-performance")
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_predictions_performance(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
prediction_service: PredictionService = Depends(get_enhanced_prediction_service)
|
||||
):
|
||||
"""Get predictions performance analytics (Professional+ tier required)"""
|
||||
try:
|
||||
logger.info("Getting predictions performance", tenant_id=tenant_id)
|
||||
|
||||
performance = await prediction_service.get_performance_metrics(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
)
|
||||
|
||||
return performance
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get predictions performance", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve predictions performance"
|
||||
)
|
||||
237
services/forecasting/app/api/audit.py
Normal file
237
services/forecasting/app/api/audit.py
Normal file
@@ -0,0 +1,237 @@
|
||||
# services/forecasting/app/api/audit.py
|
||||
"""
|
||||
Audit Logs API - Retrieve audit trail for forecasting service
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Path, status
|
||||
from typing import Optional, Dict, Any
|
||||
from uuid import UUID
|
||||
from datetime import datetime
|
||||
import structlog
|
||||
from sqlalchemy import select, func, and_
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.models import AuditLog
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.models.audit_log_schemas import (
|
||||
AuditLogResponse,
|
||||
AuditLogListResponse,
|
||||
AuditLogStatsResponse
|
||||
)
|
||||
from app.core.database import database_manager
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
router = APIRouter(tags=["audit-logs"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
async def get_db():
|
||||
"""Database session dependency"""
|
||||
async with database_manager.get_session() as session:
|
||||
yield session
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("audit-logs"),
|
||||
response_model=AuditLogListResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def get_audit_logs(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Filter logs from this date"),
|
||||
end_date: Optional[datetime] = Query(None, description="Filter logs until this date"),
|
||||
user_id: Optional[UUID] = Query(None, description="Filter by user ID"),
|
||||
action: Optional[str] = Query(None, description="Filter by action type"),
|
||||
resource_type: Optional[str] = Query(None, description="Filter by resource type"),
|
||||
severity: Optional[str] = Query(None, description="Filter by severity level"),
|
||||
search: Optional[str] = Query(None, description="Search in description field"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Number of records to return"),
|
||||
offset: int = Query(0, ge=0, description="Number of records to skip"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get audit logs for forecasting service.
|
||||
Requires admin or owner role.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Retrieving audit logs",
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.get("user_id"),
|
||||
filters={
|
||||
"start_date": start_date,
|
||||
"end_date": end_date,
|
||||
"action": action,
|
||||
"resource_type": resource_type,
|
||||
"severity": severity
|
||||
}
|
||||
)
|
||||
|
||||
# Build query filters
|
||||
filters = [AuditLog.tenant_id == tenant_id]
|
||||
|
||||
if start_date:
|
||||
filters.append(AuditLog.created_at >= start_date)
|
||||
if end_date:
|
||||
filters.append(AuditLog.created_at <= end_date)
|
||||
if user_id:
|
||||
filters.append(AuditLog.user_id == user_id)
|
||||
if action:
|
||||
filters.append(AuditLog.action == action)
|
||||
if resource_type:
|
||||
filters.append(AuditLog.resource_type == resource_type)
|
||||
if severity:
|
||||
filters.append(AuditLog.severity == severity)
|
||||
if search:
|
||||
filters.append(AuditLog.description.ilike(f"%{search}%"))
|
||||
|
||||
# Count total matching records
|
||||
count_query = select(func.count()).select_from(AuditLog).where(and_(*filters))
|
||||
total_result = await db.execute(count_query)
|
||||
total = total_result.scalar() or 0
|
||||
|
||||
# Fetch paginated results
|
||||
query = (
|
||||
select(AuditLog)
|
||||
.where(and_(*filters))
|
||||
.order_by(AuditLog.created_at.desc())
|
||||
.limit(limit)
|
||||
.offset(offset)
|
||||
)
|
||||
|
||||
result = await db.execute(query)
|
||||
audit_logs = result.scalars().all()
|
||||
|
||||
# Convert to response models
|
||||
items = [AuditLogResponse.from_orm(log) for log in audit_logs]
|
||||
|
||||
logger.info(
|
||||
"Successfully retrieved audit logs",
|
||||
tenant_id=tenant_id,
|
||||
total=total,
|
||||
returned=len(items)
|
||||
)
|
||||
|
||||
return AuditLogListResponse(
|
||||
items=items,
|
||||
total=total,
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
has_more=(offset + len(items)) < total
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to retrieve audit logs",
|
||||
error=str(e),
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve audit logs: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("audit-logs/stats"),
|
||||
response_model=AuditLogStatsResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def get_audit_log_stats(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Filter logs from this date"),
|
||||
end_date: Optional[datetime] = Query(None, description="Filter logs until this date"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get audit log statistics for forecasting service.
|
||||
Requires admin or owner role.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Retrieving audit log statistics",
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
# Build base filters
|
||||
filters = [AuditLog.tenant_id == tenant_id]
|
||||
if start_date:
|
||||
filters.append(AuditLog.created_at >= start_date)
|
||||
if end_date:
|
||||
filters.append(AuditLog.created_at <= end_date)
|
||||
|
||||
# Total events
|
||||
count_query = select(func.count()).select_from(AuditLog).where(and_(*filters))
|
||||
total_result = await db.execute(count_query)
|
||||
total_events = total_result.scalar() or 0
|
||||
|
||||
# Events by action
|
||||
action_query = (
|
||||
select(AuditLog.action, func.count().label('count'))
|
||||
.where(and_(*filters))
|
||||
.group_by(AuditLog.action)
|
||||
)
|
||||
action_result = await db.execute(action_query)
|
||||
events_by_action = {row.action: row.count for row in action_result}
|
||||
|
||||
# Events by severity
|
||||
severity_query = (
|
||||
select(AuditLog.severity, func.count().label('count'))
|
||||
.where(and_(*filters))
|
||||
.group_by(AuditLog.severity)
|
||||
)
|
||||
severity_result = await db.execute(severity_query)
|
||||
events_by_severity = {row.severity: row.count for row in severity_result}
|
||||
|
||||
# Events by resource type
|
||||
resource_query = (
|
||||
select(AuditLog.resource_type, func.count().label('count'))
|
||||
.where(and_(*filters))
|
||||
.group_by(AuditLog.resource_type)
|
||||
)
|
||||
resource_result = await db.execute(resource_query)
|
||||
events_by_resource_type = {row.resource_type: row.count for row in resource_result}
|
||||
|
||||
# Date range
|
||||
date_range_query = (
|
||||
select(
|
||||
func.min(AuditLog.created_at).label('min_date'),
|
||||
func.max(AuditLog.created_at).label('max_date')
|
||||
)
|
||||
.where(and_(*filters))
|
||||
)
|
||||
date_result = await db.execute(date_range_query)
|
||||
date_row = date_result.one()
|
||||
|
||||
logger.info(
|
||||
"Successfully retrieved audit log statistics",
|
||||
tenant_id=tenant_id,
|
||||
total_events=total_events
|
||||
)
|
||||
|
||||
return AuditLogStatsResponse(
|
||||
total_events=total_events,
|
||||
events_by_action=events_by_action,
|
||||
events_by_severity=events_by_severity,
|
||||
events_by_resource_type=events_by_resource_type,
|
||||
date_range={
|
||||
"min": date_row.min_date,
|
||||
"max": date_row.max_date
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to retrieve audit log statistics",
|
||||
error=str(e),
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve audit log statistics: {str(e)}"
|
||||
)
|
||||
108
services/forecasting/app/api/enterprise_forecasting.py
Normal file
108
services/forecasting/app/api/enterprise_forecasting.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""
|
||||
Enterprise forecasting API endpoints
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from typing import Optional
|
||||
from datetime import date
|
||||
import structlog
|
||||
|
||||
from app.services.enterprise_forecasting_service import EnterpriseForecastingService
|
||||
from shared.auth.tenant_access import verify_tenant_permission_dep
|
||||
from shared.clients import get_forecast_client, get_tenant_client
|
||||
import shared.redis_utils
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter()
|
||||
|
||||
# Global Redis client
|
||||
_redis_client = None
|
||||
|
||||
|
||||
async def get_forecasting_redis_client():
|
||||
"""Get or create Redis client"""
|
||||
global _redis_client
|
||||
try:
|
||||
if _redis_client is None:
|
||||
_redis_client = await shared.redis_utils.initialize_redis(settings.REDIS_URL)
|
||||
logger.info("Redis client initialized for enterprise forecasting")
|
||||
return _redis_client
|
||||
except Exception as e:
|
||||
logger.warning("Failed to initialize Redis client, enterprise forecasting will work with limited functionality", error=str(e))
|
||||
return None
|
||||
|
||||
|
||||
async def get_enterprise_forecasting_service(
|
||||
redis_client = Depends(get_forecasting_redis_client)
|
||||
) -> EnterpriseForecastingService:
|
||||
"""Dependency injection for EnterpriseForecastingService"""
|
||||
forecast_client = get_forecast_client(settings, "forecasting-service")
|
||||
tenant_client = get_tenant_client(settings, "forecasting-service")
|
||||
return EnterpriseForecastingService(
|
||||
forecast_client=forecast_client,
|
||||
tenant_client=tenant_client,
|
||||
redis_client=redis_client
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/forecasting/enterprise/aggregated")
|
||||
async def get_aggregated_forecast(
|
||||
tenant_id: str,
|
||||
start_date: date = Query(..., description="Start date for forecast aggregation"),
|
||||
end_date: date = Query(..., description="End date for forecast aggregation"),
|
||||
product_id: Optional[str] = Query(None, description="Optional product ID to filter by"),
|
||||
enterprise_forecasting_service: EnterpriseForecastingService = Depends(get_enterprise_forecasting_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get aggregated forecasts across parent and child tenants
|
||||
"""
|
||||
try:
|
||||
# Check if this tenant is a parent tenant
|
||||
tenant_info = await enterprise_forecasting_service.tenant_client.get_tenant(tenant_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access aggregated enterprise forecasts"
|
||||
)
|
||||
|
||||
result = await enterprise_forecasting_service.get_aggregated_forecast(
|
||||
parent_tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
product_id=product_id
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get aggregated forecast: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/forecasting/enterprise/network-performance")
|
||||
async def get_network_performance_metrics(
|
||||
tenant_id: str,
|
||||
start_date: date = Query(..., description="Start date for metrics"),
|
||||
end_date: date = Query(..., description="End date for metrics"),
|
||||
enterprise_forecasting_service: EnterpriseForecastingService = Depends(get_enterprise_forecasting_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get aggregated performance metrics across tenant network
|
||||
"""
|
||||
try:
|
||||
# Check if this tenant is a parent tenant
|
||||
tenant_info = await enterprise_forecasting_service.tenant_client.get_tenant(tenant_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access network performance metrics"
|
||||
)
|
||||
|
||||
result = await enterprise_forecasting_service.get_network_performance_metrics(
|
||||
parent_tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get network performance: {str(e)}")
|
||||
417
services/forecasting/app/api/forecast_feedback.py
Normal file
417
services/forecasting/app/api/forecast_feedback.py
Normal file
@@ -0,0 +1,417 @@
|
||||
# services/forecasting/app/api/forecast_feedback.py
|
||||
"""
|
||||
Forecast Feedback API - Endpoints for collecting and analyzing forecast feedback
|
||||
"""
|
||||
|
||||
import structlog
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query, Path, Body
|
||||
from typing import List, Optional, Dict, Any
|
||||
from datetime import date, datetime
|
||||
import uuid
|
||||
import enum
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.services.forecast_feedback_service import ForecastFeedbackService
|
||||
from shared.database.base import create_database_manager
|
||||
from app.core.config import settings
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.auth.tenant_access import verify_tenant_permission_dep
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(tags=["forecast-feedback"])
|
||||
|
||||
|
||||
# Enums for feedback types
|
||||
class FeedbackType(str, enum.Enum):
|
||||
"""Type of feedback on forecast accuracy"""
|
||||
TOO_HIGH = "too_high"
|
||||
TOO_LOW = "too_low"
|
||||
ACCURATE = "accurate"
|
||||
UNCERTAIN = "uncertain"
|
||||
|
||||
|
||||
class FeedbackConfidence(str, enum.Enum):
|
||||
"""Confidence level of the feedback provider"""
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
|
||||
|
||||
# Pydantic models
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class ForecastFeedbackRequest(BaseModel):
|
||||
"""Request model for submitting forecast feedback"""
|
||||
feedback_type: FeedbackType = Field(..., description="Type of feedback on forecast accuracy")
|
||||
confidence: FeedbackConfidence = Field(..., description="Confidence level of the feedback provider")
|
||||
actual_value: Optional[float] = Field(None, description="Actual observed value")
|
||||
notes: Optional[str] = Field(None, description="Additional notes about the feedback")
|
||||
feedback_data: Optional[Dict[str, Any]] = Field(None, description="Additional feedback data")
|
||||
|
||||
|
||||
class ForecastFeedbackResponse(BaseModel):
|
||||
"""Response model for forecast feedback"""
|
||||
feedback_id: str = Field(..., description="Unique feedback ID")
|
||||
forecast_id: str = Field(..., description="Forecast ID this feedback relates to")
|
||||
tenant_id: str = Field(..., description="Tenant ID")
|
||||
feedback_type: FeedbackType = Field(..., description="Type of feedback")
|
||||
confidence: FeedbackConfidence = Field(..., description="Confidence level")
|
||||
actual_value: Optional[float] = Field(None, description="Actual value observed")
|
||||
notes: Optional[str] = Field(None, description="Feedback notes")
|
||||
feedback_data: Dict[str, Any] = Field(..., description="Additional feedback data")
|
||||
created_at: datetime = Field(..., description="When feedback was created")
|
||||
created_by: Optional[str] = Field(None, description="Who created the feedback")
|
||||
|
||||
|
||||
class ForecastAccuracyMetrics(BaseModel):
|
||||
"""Accuracy metrics for a forecast"""
|
||||
forecast_id: str = Field(..., description="Forecast ID")
|
||||
total_feedback_count: int = Field(..., description="Total feedback received")
|
||||
accuracy_score: float = Field(..., description="Calculated accuracy score (0-100)")
|
||||
feedback_distribution: Dict[str, int] = Field(..., description="Distribution of feedback types")
|
||||
average_confidence: float = Field(..., description="Average confidence score")
|
||||
last_feedback_date: Optional[datetime] = Field(None, description="Most recent feedback date")
|
||||
|
||||
|
||||
class ForecasterPerformanceMetrics(BaseModel):
|
||||
"""Performance metrics for the forecasting system"""
|
||||
overall_accuracy: float = Field(..., description="Overall system accuracy score")
|
||||
total_forecasts_with_feedback: int = Field(..., description="Total forecasts with feedback")
|
||||
accuracy_by_product: Dict[str, float] = Field(..., description="Accuracy by product type")
|
||||
accuracy_trend: str = Field(..., description="Trend direction: improving, declining, stable")
|
||||
improvement_suggestions: List[str] = Field(..., description="AI-generated improvement suggestions")
|
||||
|
||||
|
||||
def get_forecast_feedback_service():
|
||||
"""Dependency injection for ForecastFeedbackService"""
|
||||
database_manager = create_database_manager(settings.DATABASE_URL, "forecasting-service")
|
||||
return ForecastFeedbackService(database_manager)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("forecasts", "forecast_id", "feedback"),
|
||||
response_model=ForecastFeedbackResponse,
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
async def submit_forecast_feedback(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
feedback_request: ForecastFeedbackRequest = Body(...),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Submit feedback on forecast accuracy
|
||||
|
||||
Allows users to provide feedback on whether forecasts were accurate, too high, or too low.
|
||||
This feedback is used to improve future forecast accuracy through continuous learning.
|
||||
"""
|
||||
try:
|
||||
logger.info("Submitting forecast feedback",
|
||||
tenant_id=tenant_id, forecast_id=forecast_id,
|
||||
feedback_type=feedback_request.feedback_type)
|
||||
|
||||
# Validate forecast exists
|
||||
forecast_exists = await forecast_feedback_service.forecast_exists(tenant_id, forecast_id)
|
||||
if not forecast_exists:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Forecast not found"
|
||||
)
|
||||
|
||||
# Submit feedback
|
||||
feedback = await forecast_feedback_service.submit_feedback(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id,
|
||||
feedback_type=feedback_request.feedback_type,
|
||||
confidence=feedback_request.confidence,
|
||||
actual_value=feedback_request.actual_value,
|
||||
notes=feedback_request.notes,
|
||||
feedback_data=feedback_request.feedback_data
|
||||
)
|
||||
|
||||
return {
|
||||
'feedback_id': str(feedback.feedback_id),
|
||||
'forecast_id': str(feedback.forecast_id),
|
||||
'tenant_id': feedback.tenant_id,
|
||||
'feedback_type': feedback.feedback_type,
|
||||
'confidence': feedback.confidence,
|
||||
'actual_value': feedback.actual_value,
|
||||
'notes': feedback.notes,
|
||||
'feedback_data': feedback.feedback_data or {},
|
||||
'created_at': feedback.created_at,
|
||||
'created_by': feedback.created_by
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.error("Invalid forecast ID", error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid forecast ID format"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Failed to submit forecast feedback", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to submit feedback"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_nested_resource_route("forecasts", "forecast_id", "feedback"),
|
||||
response_model=List[ForecastFeedbackResponse]
|
||||
)
|
||||
async def get_forecast_feedback(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
limit: int = Query(50, ge=1, le=1000),
|
||||
offset: int = Query(0, ge=0),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get all feedback for a specific forecast
|
||||
|
||||
Retrieves historical feedback submissions for analysis and auditing.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting forecast feedback", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
feedback_list = await forecast_feedback_service.get_feedback_for_forecast(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id,
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
return [
|
||||
ForecastFeedbackResponse(
|
||||
feedback_id=str(f.feedback_id),
|
||||
forecast_id=str(f.forecast_id),
|
||||
tenant_id=f.tenant_id,
|
||||
feedback_type=f.feedback_type,
|
||||
confidence=f.confidence,
|
||||
actual_value=f.actual_value,
|
||||
notes=f.notes,
|
||||
feedback_data=f.feedback_data or {},
|
||||
created_at=f.created_at,
|
||||
created_by=f.created_by
|
||||
) for f in feedback_list
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecast feedback", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve feedback"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_nested_resource_route("forecasts", "forecast_id", "accuracy"),
|
||||
response_model=ForecastAccuracyMetrics
|
||||
)
|
||||
async def get_forecast_accuracy_metrics(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get accuracy metrics for a specific forecast
|
||||
|
||||
Calculates accuracy scores based on feedback and actual vs predicted values.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting forecast accuracy metrics", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
metrics = await forecast_feedback_service.calculate_accuracy_metrics(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id
|
||||
)
|
||||
|
||||
if not metrics:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="No accuracy metrics available for this forecast"
|
||||
)
|
||||
|
||||
return {
|
||||
'forecast_id': metrics.forecast_id,
|
||||
'total_feedback_count': metrics.total_feedback_count,
|
||||
'accuracy_score': metrics.accuracy_score,
|
||||
'feedback_distribution': metrics.feedback_distribution,
|
||||
'average_confidence': metrics.average_confidence,
|
||||
'last_feedback_date': metrics.last_feedback_date
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecast accuracy metrics", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to calculate accuracy metrics"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("forecasts", "accuracy-summary"),
|
||||
response_model=ForecasterPerformanceMetrics
|
||||
)
|
||||
async def get_forecaster_performance_summary(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[date] = Query(None, description="Start date filter"),
|
||||
end_date: Optional[date] = Query(None, description="End date filter"),
|
||||
product_id: Optional[str] = Query(None, description="Filter by product ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get overall forecaster performance summary
|
||||
|
||||
Aggregates accuracy metrics across all forecasts to assess overall system performance
|
||||
and identify areas for improvement.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting forecaster performance summary", tenant_id=tenant_id)
|
||||
|
||||
metrics = await forecast_feedback_service.calculate_performance_summary(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
product_id=product_id
|
||||
)
|
||||
|
||||
return {
|
||||
'overall_accuracy': metrics.overall_accuracy,
|
||||
'total_forecasts_with_feedback': metrics.total_forecasts_with_feedback,
|
||||
'accuracy_by_product': metrics.accuracy_by_product,
|
||||
'accuracy_trend': metrics.accuracy_trend,
|
||||
'improvement_suggestions': metrics.improvement_suggestions
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecaster performance summary", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to calculate performance summary"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("forecasts", "feedback-trends")
|
||||
)
|
||||
async def get_feedback_trends(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
days: int = Query(30, ge=7, le=365, description="Number of days to analyze"),
|
||||
product_id: Optional[str] = Query(None, description="Filter by product ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get feedback trends over time
|
||||
|
||||
Analyzes how forecast accuracy and feedback patterns change over time.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting feedback trends", tenant_id=tenant_id, days=days)
|
||||
|
||||
trends = await forecast_feedback_service.get_feedback_trends(
|
||||
tenant_id=tenant_id,
|
||||
days=days,
|
||||
product_id=product_id
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'trends': trends,
|
||||
'period': f'Last {days} days'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get feedback trends", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve feedback trends"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_resource_action_route("forecasts", "forecast_id", "retrain")
|
||||
)
|
||||
async def trigger_retraining_from_feedback(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Trigger model retraining based on feedback
|
||||
|
||||
Initiates a retraining job using recent feedback to improve forecast accuracy.
|
||||
"""
|
||||
try:
|
||||
logger.info("Triggering retraining from feedback", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
result = await forecast_feedback_service.trigger_retraining_from_feedback(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'message': 'Retraining job initiated successfully',
|
||||
'job_id': result.job_id,
|
||||
'forecasts_included': result.forecasts_included,
|
||||
'feedback_samples_used': result.feedback_samples_used
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to trigger retraining", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to initiate retraining"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_action_route("forecasts", "forecast_id", "suggestions")
|
||||
)
|
||||
async def get_improvement_suggestions(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get AI-generated improvement suggestions for a forecast
|
||||
|
||||
Analyzes feedback patterns and suggests specific improvements for forecast accuracy.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting improvement suggestions", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
suggestions = await forecast_feedback_service.get_improvement_suggestions(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'forecast_id': forecast_id,
|
||||
'suggestions': suggestions,
|
||||
'confidence_scores': [s.get('confidence', 0.8) for s in suggestions]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get improvement suggestions", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to generate suggestions"
|
||||
)
|
||||
|
||||
|
||||
# Import datetime at runtime to avoid circular imports
|
||||
from datetime import datetime, timedelta
|
||||
1038
services/forecasting/app/api/forecasting_operations.py
Normal file
1038
services/forecasting/app/api/forecasting_operations.py
Normal file
File diff suppressed because it is too large
Load Diff
145
services/forecasting/app/api/forecasts.py
Normal file
145
services/forecasting/app/api/forecasts.py
Normal file
@@ -0,0 +1,145 @@
|
||||
# services/forecasting/app/api/forecasts.py
|
||||
"""
|
||||
Forecasts API - Atomic CRUD operations on Forecast model
|
||||
"""
|
||||
|
||||
import structlog
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query, Path
|
||||
from typing import List, Optional
|
||||
from datetime import date, datetime
|
||||
import uuid
|
||||
|
||||
from app.services.forecasting_service import EnhancedForecastingService
|
||||
from app.schemas.forecasts import ForecastResponse
|
||||
from shared.database.base import create_database_manager
|
||||
from app.core.config import settings
|
||||
from shared.routing import RouteBuilder
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(tags=["forecasts"])
|
||||
|
||||
|
||||
def get_enhanced_forecasting_service():
|
||||
"""Dependency injection for EnhancedForecastingService"""
|
||||
database_manager = create_database_manager(settings.DATABASE_URL, "forecasting-service")
|
||||
return EnhancedForecastingService(database_manager)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("forecasts"),
|
||||
response_model=List[ForecastResponse]
|
||||
)
|
||||
async def list_forecasts(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
inventory_product_id: Optional[str] = Query(None, description="Filter by product ID"),
|
||||
start_date: Optional[date] = Query(None, description="Start date filter"),
|
||||
end_date: Optional[date] = Query(None, description="End date filter"),
|
||||
limit: int = Query(50, ge=1, le=1000),
|
||||
offset: int = Query(0, ge=0),
|
||||
enhanced_forecasting_service: EnhancedForecastingService = Depends(get_enhanced_forecasting_service)
|
||||
):
|
||||
"""List forecasts with optional filters"""
|
||||
try:
|
||||
logger.info("Listing forecasts", tenant_id=tenant_id)
|
||||
|
||||
forecasts = await enhanced_forecasting_service.list_forecasts(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_id=inventory_product_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
return forecasts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to list forecasts", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve forecasts"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_detail_route("forecasts", "forecast_id"),
|
||||
response_model=ForecastResponse
|
||||
)
|
||||
async def get_forecast(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
enhanced_forecasting_service: EnhancedForecastingService = Depends(get_enhanced_forecasting_service)
|
||||
):
|
||||
"""Get a specific forecast by ID"""
|
||||
try:
|
||||
logger.info("Getting forecast", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
forecast = await enhanced_forecasting_service.get_forecast(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=uuid.UUID(forecast_id)
|
||||
)
|
||||
|
||||
if not forecast:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Forecast not found"
|
||||
)
|
||||
|
||||
return forecast
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.error("Invalid forecast ID", error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid forecast ID format"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecast", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve forecast"
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_resource_detail_route("forecasts", "forecast_id")
|
||||
)
|
||||
async def delete_forecast(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
enhanced_forecasting_service: EnhancedForecastingService = Depends(get_enhanced_forecasting_service)
|
||||
):
|
||||
"""Delete a specific forecast"""
|
||||
try:
|
||||
logger.info("Deleting forecast", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
success = await enhanced_forecasting_service.delete_forecast(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=uuid.UUID(forecast_id)
|
||||
)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Forecast not found"
|
||||
)
|
||||
|
||||
return {"message": "Forecast deleted successfully"}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.error("Invalid forecast ID", error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid forecast ID format"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Failed to delete forecast", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to delete forecast"
|
||||
)
|
||||
304
services/forecasting/app/api/historical_validation.py
Normal file
304
services/forecasting/app/api/historical_validation.py
Normal file
@@ -0,0 +1,304 @@
|
||||
# ================================================================
|
||||
# services/forecasting/app/api/historical_validation.py
|
||||
# ================================================================
|
||||
"""
|
||||
Historical Validation API - Backfill validation for late-arriving sales data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, status
|
||||
from typing import Dict, Any, List, Optional
|
||||
from uuid import UUID
|
||||
from datetime import date
|
||||
import structlog
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from app.services.historical_validation_service import HistoricalValidationService
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from app.core.database import get_db
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
router = APIRouter(tags=["historical-validation"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Request/Response Schemas
|
||||
# ================================================================
|
||||
|
||||
class DetectGapsRequest(BaseModel):
|
||||
"""Request model for gap detection"""
|
||||
lookback_days: int = Field(default=90, ge=1, le=365, description="Days to look back")
|
||||
|
||||
|
||||
class BackfillRequest(BaseModel):
|
||||
"""Request model for manual backfill"""
|
||||
start_date: date = Field(..., description="Start date for backfill")
|
||||
end_date: date = Field(..., description="End date for backfill")
|
||||
|
||||
|
||||
class SalesDataUpdateRequest(BaseModel):
|
||||
"""Request model for registering sales data update"""
|
||||
start_date: date = Field(..., description="Start date of updated data")
|
||||
end_date: date = Field(..., description="End date of updated data")
|
||||
records_affected: int = Field(..., ge=0, description="Number of records affected")
|
||||
update_source: str = Field(default="import", description="Source of update")
|
||||
import_job_id: Optional[str] = Field(None, description="Import job ID if applicable")
|
||||
auto_trigger_validation: bool = Field(default=True, description="Auto-trigger validation")
|
||||
|
||||
|
||||
class AutoBackfillRequest(BaseModel):
|
||||
"""Request model for automatic backfill"""
|
||||
lookback_days: int = Field(default=90, ge=1, le=365, description="Days to look back")
|
||||
max_gaps_to_process: int = Field(default=10, ge=1, le=50, description="Max gaps to process")
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Endpoints
|
||||
# ================================================================
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("validation/detect-gaps"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def detect_validation_gaps(
|
||||
request: DetectGapsRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Detect date ranges where forecasts exist but haven't been validated yet
|
||||
|
||||
Returns list of gap periods that need validation backfill.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Detecting validation gaps",
|
||||
tenant_id=tenant_id,
|
||||
lookback_days=request.lookback_days,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = HistoricalValidationService(db)
|
||||
|
||||
gaps = await service.detect_validation_gaps(
|
||||
tenant_id=tenant_id,
|
||||
lookback_days=request.lookback_days
|
||||
)
|
||||
|
||||
return {
|
||||
"gaps_found": len(gaps),
|
||||
"lookback_days": request.lookback_days,
|
||||
"gaps": [
|
||||
{
|
||||
"start_date": gap["start_date"].isoformat(),
|
||||
"end_date": gap["end_date"].isoformat(),
|
||||
"days_count": gap["days_count"]
|
||||
}
|
||||
for gap in gaps
|
||||
]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to detect validation gaps",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to detect validation gaps: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("validation/backfill"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def backfill_validation(
|
||||
request: BackfillRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Manually trigger validation backfill for a specific date range
|
||||
|
||||
Validates forecasts against sales data for historical periods.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Manual validation backfill requested",
|
||||
tenant_id=tenant_id,
|
||||
start_date=request.start_date.isoformat(),
|
||||
end_date=request.end_date.isoformat(),
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = HistoricalValidationService(db)
|
||||
|
||||
result = await service.backfill_validation(
|
||||
tenant_id=tenant_id,
|
||||
start_date=request.start_date,
|
||||
end_date=request.end_date,
|
||||
triggered_by="manual"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to backfill validation",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to backfill validation: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("validation/auto-backfill"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def auto_backfill_validation_gaps(
|
||||
request: AutoBackfillRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Automatically detect and backfill validation gaps
|
||||
|
||||
Finds all date ranges with missing validations and processes them.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Auto backfill requested",
|
||||
tenant_id=tenant_id,
|
||||
lookback_days=request.lookback_days,
|
||||
max_gaps=request.max_gaps_to_process,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = HistoricalValidationService(db)
|
||||
|
||||
result = await service.auto_backfill_gaps(
|
||||
tenant_id=tenant_id,
|
||||
lookback_days=request.lookback_days,
|
||||
max_gaps_to_process=request.max_gaps_to_process
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to auto backfill",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to auto backfill: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("validation/register-sales-update"),
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def register_sales_data_update(
|
||||
request: SalesDataUpdateRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Register a sales data update and optionally trigger validation
|
||||
|
||||
Call this endpoint after importing historical sales data to automatically
|
||||
trigger validation for the affected date range.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Registering sales data update",
|
||||
tenant_id=tenant_id,
|
||||
date_range=f"{request.start_date} to {request.end_date}",
|
||||
records_affected=request.records_affected,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = HistoricalValidationService(db)
|
||||
|
||||
result = await service.register_sales_data_update(
|
||||
tenant_id=tenant_id,
|
||||
start_date=request.start_date,
|
||||
end_date=request.end_date,
|
||||
records_affected=request.records_affected,
|
||||
update_source=request.update_source,
|
||||
import_job_id=request.import_job_id,
|
||||
auto_trigger_validation=request.auto_trigger_validation
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to register sales data update",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to register sales data update: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("validation/pending"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def get_pending_validations(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
limit: int = Query(50, ge=1, le=100, description="Number of records to return"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get pending sales data updates awaiting validation
|
||||
|
||||
Returns list of sales data updates that have been registered
|
||||
but not yet validated.
|
||||
"""
|
||||
try:
|
||||
service = HistoricalValidationService(db)
|
||||
|
||||
pending = await service.get_pending_validations(
|
||||
tenant_id=tenant_id,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
return {
|
||||
"pending_count": len(pending),
|
||||
"pending_validations": [record.to_dict() for record in pending]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to get pending validations",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to get pending validations: {str(e)}"
|
||||
)
|
||||
477
services/forecasting/app/api/internal_demo.py
Normal file
477
services/forecasting/app/api/internal_demo.py
Normal file
@@ -0,0 +1,477 @@
|
||||
"""
|
||||
Internal Demo Cloning API for Forecasting Service
|
||||
Service-to-service endpoint for cloning forecast data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Header
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select
|
||||
import structlog
|
||||
import uuid
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Optional
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
||||
from shared.utils.demo_dates import adjust_date_for_demo, resolve_time_marker
|
||||
|
||||
from app.core.database import get_db
|
||||
from app.models.forecasts import Forecast, PredictionBatch
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(prefix="/internal/demo", tags=["internal"])
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_PROFESSIONAL = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
|
||||
|
||||
def parse_date_field(date_value, session_time: datetime, field_name: str = "date") -> Optional[datetime]:
|
||||
"""
|
||||
Parse date field, handling both ISO strings and BASE_TS markers.
|
||||
|
||||
Supports:
|
||||
- BASE_TS markers: "BASE_TS + 1h30m", "BASE_TS - 2d"
|
||||
- ISO 8601 strings: "2025-01-15T06:00:00Z"
|
||||
- None values (returns None)
|
||||
|
||||
Returns timezone-aware datetime or None.
|
||||
"""
|
||||
if not date_value:
|
||||
return None
|
||||
|
||||
# Check if it's a BASE_TS marker
|
||||
if isinstance(date_value, str) and date_value.startswith("BASE_TS"):
|
||||
try:
|
||||
return resolve_time_marker(date_value, session_time)
|
||||
except ValueError as e:
|
||||
logger.warning(
|
||||
f"Invalid BASE_TS marker in {field_name}",
|
||||
marker=date_value,
|
||||
error=str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
# Handle regular ISO date strings
|
||||
try:
|
||||
if isinstance(date_value, str):
|
||||
original_date = datetime.fromisoformat(date_value.replace('Z', '+00:00'))
|
||||
elif hasattr(date_value, 'isoformat'):
|
||||
original_date = date_value
|
||||
else:
|
||||
logger.warning(f"Unsupported date format in {field_name}", date_value=date_value)
|
||||
return None
|
||||
|
||||
return adjust_date_for_demo(original_date, session_time)
|
||||
except (ValueError, AttributeError) as e:
|
||||
logger.warning(
|
||||
f"Invalid date format in {field_name}",
|
||||
date_value=date_value,
|
||||
error=str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def align_to_week_start(target_date: datetime) -> datetime:
|
||||
"""Align forecast date to Monday (start of week)"""
|
||||
if target_date:
|
||||
days_since_monday = target_date.weekday()
|
||||
return target_date - timedelta(days=days_since_monday)
|
||||
return target_date
|
||||
|
||||
|
||||
@router.post("/clone")
|
||||
async def clone_demo_data(
|
||||
base_tenant_id: str,
|
||||
virtual_tenant_id: str,
|
||||
demo_account_type: str,
|
||||
session_id: Optional[str] = None,
|
||||
session_created_at: Optional[str] = None,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Clone forecasting service data for a virtual demo tenant
|
||||
|
||||
This endpoint creates fresh demo data by:
|
||||
1. Loading seed data from JSON files
|
||||
2. Applying XOR-based ID transformation
|
||||
3. Adjusting dates relative to session creation time
|
||||
4. Creating records in the virtual tenant
|
||||
|
||||
Args:
|
||||
base_tenant_id: Template tenant UUID (for reference)
|
||||
virtual_tenant_id: Target virtual tenant UUID
|
||||
demo_account_type: Type of demo account
|
||||
session_id: Originating session ID for tracing
|
||||
session_created_at: Session creation timestamp for date adjustment
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Dictionary with cloning results
|
||||
|
||||
Raises:
|
||||
HTTPException: On validation or cloning errors
|
||||
"""
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
try:
|
||||
# Validate UUIDs
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Parse session creation time for date adjustment
|
||||
if session_created_at:
|
||||
try:
|
||||
session_time = datetime.fromisoformat(session_created_at.replace('Z', '+00:00'))
|
||||
except (ValueError, AttributeError):
|
||||
session_time = start_time
|
||||
else:
|
||||
session_time = start_time
|
||||
|
||||
logger.info(
|
||||
"Starting forecasting data cloning with date adjustment",
|
||||
base_tenant_id=base_tenant_id,
|
||||
virtual_tenant_id=str(virtual_uuid),
|
||||
demo_account_type=demo_account_type,
|
||||
session_id=session_id,
|
||||
session_time=session_time.isoformat()
|
||||
)
|
||||
|
||||
# Load seed data using shared utility
|
||||
try:
|
||||
from shared.utils.seed_data_paths import get_seed_data_path
|
||||
|
||||
if demo_account_type == "enterprise":
|
||||
profile = "enterprise"
|
||||
else:
|
||||
profile = "professional"
|
||||
|
||||
json_file = get_seed_data_path(profile, "10-forecasting.json")
|
||||
|
||||
except ImportError:
|
||||
# Fallback to original path
|
||||
seed_data_dir = Path(__file__).parent.parent.parent.parent / "shared" / "demo" / "fixtures"
|
||||
if demo_account_type == "enterprise":
|
||||
json_file = seed_data_dir / "enterprise" / "parent" / "10-forecasting.json"
|
||||
else:
|
||||
json_file = seed_data_dir / "professional" / "10-forecasting.json"
|
||||
|
||||
if not json_file.exists():
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Seed data file not found: {json_file}"
|
||||
)
|
||||
|
||||
# Load JSON data
|
||||
with open(json_file, 'r', encoding='utf-8') as f:
|
||||
seed_data = json.load(f)
|
||||
|
||||
# Check if data already exists for this virtual tenant (idempotency)
|
||||
existing_check = await db.execute(
|
||||
select(Forecast).where(Forecast.tenant_id == virtual_uuid).limit(1)
|
||||
)
|
||||
existing_forecast = existing_check.scalar_one_or_none()
|
||||
|
||||
if existing_forecast:
|
||||
logger.warning(
|
||||
"Demo data already exists, skipping clone",
|
||||
virtual_tenant_id=str(virtual_uuid)
|
||||
)
|
||||
return {
|
||||
"status": "skipped",
|
||||
"reason": "Data already exists",
|
||||
"records_cloned": 0
|
||||
}
|
||||
|
||||
# Track cloning statistics
|
||||
stats = {
|
||||
"forecasts": 0,
|
||||
"prediction_batches": 0
|
||||
}
|
||||
|
||||
# Transform and insert forecasts
|
||||
for forecast_data in seed_data.get('forecasts', []):
|
||||
# Transform ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
forecast_uuid = uuid.UUID(forecast_data['id'])
|
||||
tenant_uuid = uuid.UUID(virtual_tenant_id)
|
||||
transformed_id = transform_id(forecast_data['id'], tenant_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse UUIDs for ID transformation",
|
||||
forecast_id=forecast_data['id'],
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid UUID format in forecast data: {str(e)}"
|
||||
)
|
||||
|
||||
# Transform dates using the proper parse_date_field function
|
||||
for date_field in ['forecast_date', 'created_at']:
|
||||
if date_field in forecast_data:
|
||||
try:
|
||||
parsed_date = parse_date_field(
|
||||
forecast_data[date_field],
|
||||
session_time,
|
||||
date_field
|
||||
)
|
||||
if parsed_date:
|
||||
forecast_data[date_field] = parsed_date
|
||||
else:
|
||||
# If parsing fails, use session_time as fallback
|
||||
forecast_data[date_field] = session_time
|
||||
logger.warning("Using fallback date for failed parsing",
|
||||
date_field=date_field,
|
||||
original_value=forecast_data[date_field])
|
||||
except Exception as e:
|
||||
logger.warning("Failed to parse date, using fallback",
|
||||
date_field=date_field,
|
||||
date_value=forecast_data[date_field],
|
||||
error=str(e))
|
||||
forecast_data[date_field] = session_time
|
||||
|
||||
# Create forecast
|
||||
# Map product_id to inventory_product_id if needed
|
||||
inventory_product_id_str = forecast_data.get('inventory_product_id') or forecast_data.get('product_id')
|
||||
# Convert to UUID if it's a string
|
||||
if isinstance(inventory_product_id_str, str):
|
||||
inventory_product_id = uuid.UUID(inventory_product_id_str)
|
||||
else:
|
||||
inventory_product_id = inventory_product_id_str
|
||||
|
||||
# Map predicted_quantity to predicted_demand if needed
|
||||
predicted_demand = forecast_data.get('predicted_demand') or forecast_data.get('predicted_quantity')
|
||||
|
||||
# Set default location if not provided in seed data
|
||||
location = forecast_data.get('location') or "Main Bakery"
|
||||
|
||||
# Get or calculate forecast date
|
||||
forecast_date = forecast_data.get('forecast_date')
|
||||
if not forecast_date:
|
||||
forecast_date = session_time
|
||||
|
||||
# Calculate day_of_week from forecast_date if not provided
|
||||
# day_of_week should be 0-6 (Monday=0, Sunday=6)
|
||||
day_of_week = forecast_data.get('day_of_week')
|
||||
if day_of_week is None and forecast_date:
|
||||
day_of_week = forecast_date.weekday()
|
||||
|
||||
# Calculate is_weekend from day_of_week if not provided
|
||||
is_weekend = forecast_data.get('is_weekend')
|
||||
if is_weekend is None and day_of_week is not None:
|
||||
is_weekend = day_of_week >= 5 # Saturday=5, Sunday=6
|
||||
else:
|
||||
is_weekend = False
|
||||
|
||||
new_forecast = Forecast(
|
||||
id=transformed_id,
|
||||
tenant_id=virtual_uuid,
|
||||
inventory_product_id=inventory_product_id,
|
||||
product_name=forecast_data.get('product_name'),
|
||||
location=location,
|
||||
forecast_date=forecast_date,
|
||||
created_at=forecast_data.get('created_at', session_time),
|
||||
predicted_demand=predicted_demand,
|
||||
confidence_lower=forecast_data.get('confidence_lower', max(0.0, float(predicted_demand or 0.0) * 0.8)),
|
||||
confidence_upper=forecast_data.get('confidence_upper', max(0.0, float(predicted_demand or 0.0) * 1.2)),
|
||||
confidence_level=forecast_data.get('confidence_level', 0.8),
|
||||
model_id=forecast_data.get('model_id') or 'default-fallback-model',
|
||||
model_version=forecast_data.get('model_version') or '1.0',
|
||||
algorithm=forecast_data.get('algorithm', 'prophet'),
|
||||
business_type=forecast_data.get('business_type', 'individual'),
|
||||
day_of_week=day_of_week,
|
||||
is_holiday=forecast_data.get('is_holiday', False),
|
||||
is_weekend=is_weekend,
|
||||
weather_temperature=forecast_data.get('weather_temperature'),
|
||||
weather_precipitation=forecast_data.get('weather_precipitation'),
|
||||
weather_description=forecast_data.get('weather_description'),
|
||||
traffic_volume=forecast_data.get('traffic_volume'),
|
||||
processing_time_ms=forecast_data.get('processing_time_ms'),
|
||||
features_used=forecast_data.get('features_used')
|
||||
)
|
||||
db.add(new_forecast)
|
||||
stats["forecasts"] += 1
|
||||
|
||||
# Transform and insert prediction batches
|
||||
for batch_data in seed_data.get('prediction_batches', []):
|
||||
# Transform ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
batch_uuid = uuid.UUID(batch_data['id'])
|
||||
tenant_uuid = uuid.UUID(virtual_tenant_id)
|
||||
transformed_id = transform_id(batch_data['id'], tenant_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse UUIDs for ID transformation",
|
||||
batch_id=batch_data['id'],
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid UUID format in batch data: {str(e)}"
|
||||
)
|
||||
|
||||
# Create prediction batch
|
||||
# Handle field mapping: batch_id -> batch_name, total_forecasts -> total_products
|
||||
batch_name = batch_data.get('batch_name') or batch_data.get('batch_id') or f"Batch-{transformed_id}"
|
||||
total_products = batch_data.get('total_products') or batch_data.get('total_forecasts') or 0
|
||||
completed_products = batch_data.get('completed_products') or (total_products if batch_data.get('status') == 'COMPLETED' else 0)
|
||||
|
||||
# Parse dates (handle created_at or prediction_date for requested_at)
|
||||
requested_at_raw = batch_data.get('requested_at') or batch_data.get('created_at') or batch_data.get('prediction_date')
|
||||
requested_at = parse_date_field(requested_at_raw, session_time, 'requested_at') if requested_at_raw else session_time
|
||||
|
||||
completed_at_raw = batch_data.get('completed_at')
|
||||
completed_at = parse_date_field(completed_at_raw, session_time, 'completed_at') if completed_at_raw else None
|
||||
|
||||
new_batch = PredictionBatch(
|
||||
id=transformed_id,
|
||||
tenant_id=virtual_uuid,
|
||||
batch_name=batch_name,
|
||||
requested_at=requested_at,
|
||||
completed_at=completed_at,
|
||||
status=batch_data.get('status', 'completed'),
|
||||
total_products=total_products,
|
||||
completed_products=completed_products,
|
||||
failed_products=batch_data.get('failed_products', 0),
|
||||
forecast_days=batch_data.get('forecast_days', 7),
|
||||
business_type=batch_data.get('business_type', 'individual'),
|
||||
error_message=batch_data.get('error_message'),
|
||||
processing_time_ms=batch_data.get('processing_time_ms'),
|
||||
cancelled_by=batch_data.get('cancelled_by')
|
||||
)
|
||||
db.add(new_batch)
|
||||
stats["prediction_batches"] += 1
|
||||
|
||||
# Commit all changes
|
||||
await db.commit()
|
||||
|
||||
total_records = sum(stats.values())
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
logger.info(
|
||||
"Forecasting data cloned successfully",
|
||||
virtual_tenant_id=str(virtual_uuid),
|
||||
records_cloned=total_records,
|
||||
duration_ms=duration_ms,
|
||||
forecasts_cloned=stats["forecasts"],
|
||||
batches_cloned=stats["prediction_batches"]
|
||||
)
|
||||
|
||||
return {
|
||||
"service": "forecasting",
|
||||
"status": "completed",
|
||||
"records_cloned": total_records,
|
||||
"duration_ms": duration_ms,
|
||||
"details": {
|
||||
"forecasts": stats["forecasts"],
|
||||
"prediction_batches": stats["prediction_batches"],
|
||||
"virtual_tenant_id": str(virtual_uuid)
|
||||
}
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("Invalid UUID format", error=str(e), virtual_tenant_id=virtual_tenant_id)
|
||||
raise HTTPException(status_code=400, detail=f"Invalid UUID: {str(e)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to clone forecasting data",
|
||||
error=str(e),
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
# Rollback on error
|
||||
await db.rollback()
|
||||
|
||||
return {
|
||||
"service": "forecasting",
|
||||
"status": "failed",
|
||||
"records_cloned": 0,
|
||||
"duration_ms": int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000),
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/clone/health")
|
||||
async def clone_health_check():
|
||||
"""
|
||||
Health check for internal cloning endpoint
|
||||
Used by orchestrator to verify service availability
|
||||
"""
|
||||
return {
|
||||
"service": "forecasting",
|
||||
"clone_endpoint": "available",
|
||||
"version": "2.0.0"
|
||||
}
|
||||
|
||||
|
||||
@router.delete("/tenant/{virtual_tenant_id}")
|
||||
async def delete_demo_tenant_data(
|
||||
virtual_tenant_id: uuid.UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Delete all demo data for a virtual tenant.
|
||||
This endpoint is idempotent - safe to call multiple times.
|
||||
"""
|
||||
from sqlalchemy import delete
|
||||
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
records_deleted = {
|
||||
"forecasts": 0,
|
||||
"prediction_batches": 0,
|
||||
"total": 0
|
||||
}
|
||||
|
||||
try:
|
||||
# Delete in reverse dependency order
|
||||
|
||||
# 1. Delete prediction batches
|
||||
result = await db.execute(
|
||||
delete(PredictionBatch)
|
||||
.where(PredictionBatch.tenant_id == virtual_tenant_id)
|
||||
)
|
||||
records_deleted["prediction_batches"] = result.rowcount
|
||||
|
||||
# 2. Delete forecasts
|
||||
result = await db.execute(
|
||||
delete(Forecast)
|
||||
.where(Forecast.tenant_id == virtual_tenant_id)
|
||||
)
|
||||
records_deleted["forecasts"] = result.rowcount
|
||||
|
||||
records_deleted["total"] = sum(records_deleted.values())
|
||||
|
||||
await db.commit()
|
||||
|
||||
logger.info(
|
||||
"demo_data_deleted",
|
||||
service="forecasting",
|
||||
virtual_tenant_id=str(virtual_tenant_id),
|
||||
records_deleted=records_deleted
|
||||
)
|
||||
|
||||
return {
|
||||
"service": "forecasting",
|
||||
"status": "deleted",
|
||||
"virtual_tenant_id": str(virtual_tenant_id),
|
||||
"records_deleted": records_deleted,
|
||||
"duration_ms": int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
"demo_data_deletion_failed",
|
||||
service="forecasting",
|
||||
virtual_tenant_id=str(virtual_tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to delete demo data: {str(e)}"
|
||||
)
|
||||
959
services/forecasting/app/api/ml_insights.py
Normal file
959
services/forecasting/app/api/ml_insights.py
Normal file
@@ -0,0 +1,959 @@
|
||||
"""
|
||||
ML Insights API Endpoints for Forecasting Service
|
||||
|
||||
Provides endpoints to trigger ML insight generation for:
|
||||
- Dynamic business rules learning
|
||||
- Demand pattern analysis
|
||||
- Seasonal trend detection
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks, Request
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, List
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timedelta
|
||||
import structlog
|
||||
import pandas as pd
|
||||
|
||||
from app.core.database import get_db
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/api/v1/tenants/{tenant_id}/forecasting/ml/insights",
|
||||
tags=["ML Insights"]
|
||||
)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# REQUEST/RESPONSE SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class RulesGenerationRequest(BaseModel):
|
||||
"""Request schema for rules generation"""
|
||||
product_ids: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="Specific product IDs to analyze. If None, analyzes all products"
|
||||
)
|
||||
lookback_days: int = Field(
|
||||
90,
|
||||
description="Days of historical data to analyze",
|
||||
ge=30,
|
||||
le=365
|
||||
)
|
||||
min_samples: int = Field(
|
||||
10,
|
||||
description="Minimum samples required for rule learning",
|
||||
ge=5,
|
||||
le=100
|
||||
)
|
||||
|
||||
|
||||
class RulesGenerationResponse(BaseModel):
|
||||
"""Response schema for rules generation"""
|
||||
success: bool
|
||||
message: str
|
||||
tenant_id: str
|
||||
products_analyzed: int
|
||||
total_insights_generated: int
|
||||
total_insights_posted: int
|
||||
insights_by_product: dict
|
||||
errors: List[str] = []
|
||||
|
||||
|
||||
class DemandAnalysisRequest(BaseModel):
|
||||
"""Request schema for demand analysis"""
|
||||
product_ids: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="Specific product IDs to analyze. If None, analyzes all products"
|
||||
)
|
||||
lookback_days: int = Field(
|
||||
90,
|
||||
description="Days of historical data to analyze",
|
||||
ge=30,
|
||||
le=365
|
||||
)
|
||||
forecast_horizon_days: int = Field(
|
||||
30,
|
||||
description="Days to forecast ahead",
|
||||
ge=7,
|
||||
le=90
|
||||
)
|
||||
|
||||
|
||||
class DemandAnalysisResponse(BaseModel):
|
||||
"""Response schema for demand analysis"""
|
||||
success: bool
|
||||
message: str
|
||||
tenant_id: str
|
||||
products_analyzed: int
|
||||
total_insights_generated: int
|
||||
total_insights_posted: int
|
||||
insights_by_product: dict
|
||||
errors: List[str] = []
|
||||
|
||||
|
||||
class BusinessRulesAnalysisRequest(BaseModel):
|
||||
"""Request schema for business rules analysis"""
|
||||
product_ids: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="Specific product IDs to analyze. If None, analyzes all products"
|
||||
)
|
||||
lookback_days: int = Field(
|
||||
90,
|
||||
description="Days of historical data to analyze",
|
||||
ge=30,
|
||||
le=365
|
||||
)
|
||||
min_samples: int = Field(
|
||||
10,
|
||||
description="Minimum samples required for rule analysis",
|
||||
ge=5,
|
||||
le=100
|
||||
)
|
||||
|
||||
|
||||
class BusinessRulesAnalysisResponse(BaseModel):
|
||||
"""Response schema for business rules analysis"""
|
||||
success: bool
|
||||
message: str
|
||||
tenant_id: str
|
||||
products_analyzed: int
|
||||
total_insights_generated: int
|
||||
total_insights_posted: int
|
||||
insights_by_product: dict
|
||||
errors: List[str] = []
|
||||
|
||||
|
||||
# ================================================================
|
||||
# API ENDPOINTS
|
||||
# ================================================================
|
||||
|
||||
@router.post("/generate-rules", response_model=RulesGenerationResponse)
|
||||
async def trigger_rules_generation(
|
||||
tenant_id: str,
|
||||
request_data: RulesGenerationRequest,
|
||||
request: Request,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Trigger dynamic business rules learning from historical sales data.
|
||||
|
||||
This endpoint:
|
||||
1. Fetches historical sales data for specified products
|
||||
2. Runs the RulesOrchestrator to learn patterns
|
||||
3. Generates insights about optimal business rules
|
||||
4. Posts insights to AI Insights Service
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
request_data: Rules generation parameters
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
RulesGenerationResponse with generation results
|
||||
"""
|
||||
logger.info(
|
||||
"ML insights rules generation requested",
|
||||
tenant_id=tenant_id,
|
||||
product_ids=request_data.product_ids,
|
||||
lookback_days=request_data.lookback_days
|
||||
)
|
||||
|
||||
try:
|
||||
# Import ML orchestrator and clients
|
||||
from app.ml.rules_orchestrator import RulesOrchestrator
|
||||
from shared.clients.sales_client import SalesServiceClient
|
||||
from shared.clients.inventory_client import InventoryServiceClient
|
||||
from app.core.config import settings
|
||||
|
||||
# Get event publisher from app state
|
||||
event_publisher = getattr(request.app.state, 'event_publisher', None)
|
||||
|
||||
# Initialize orchestrator and clients
|
||||
orchestrator = RulesOrchestrator(event_publisher=event_publisher)
|
||||
inventory_client = InventoryServiceClient(settings)
|
||||
|
||||
# Get products to analyze from inventory service via API
|
||||
if request_data.product_ids:
|
||||
# Fetch specific products
|
||||
products = []
|
||||
for product_id in request_data.product_ids:
|
||||
product = await inventory_client.get_ingredient_by_id(
|
||||
ingredient_id=UUID(product_id),
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
if product:
|
||||
products.append(product)
|
||||
else:
|
||||
# Fetch all products for tenant (limit to 10)
|
||||
all_products = await inventory_client.get_all_ingredients(tenant_id=tenant_id)
|
||||
products = all_products[:10] # Limit to prevent timeout
|
||||
|
||||
if not products:
|
||||
return RulesGenerationResponse(
|
||||
success=False,
|
||||
message="No products found for analysis",
|
||||
tenant_id=tenant_id,
|
||||
products_analyzed=0,
|
||||
total_insights_generated=0,
|
||||
total_insights_posted=0,
|
||||
insights_by_product={},
|
||||
errors=["No products found"]
|
||||
)
|
||||
|
||||
# Initialize sales client to fetch historical data
|
||||
sales_client = SalesServiceClient(config=settings, calling_service_name="forecasting")
|
||||
|
||||
# Calculate date range
|
||||
end_date = datetime.utcnow()
|
||||
start_date = end_date - timedelta(days=request_data.lookback_days)
|
||||
|
||||
# Process each product
|
||||
total_insights_generated = 0
|
||||
total_insights_posted = 0
|
||||
insights_by_product = {}
|
||||
errors = []
|
||||
|
||||
for product in products:
|
||||
try:
|
||||
product_id = str(product['id'])
|
||||
product_name = product.get('name', 'Unknown')
|
||||
logger.info(f"Analyzing product {product_name} ({product_id})")
|
||||
|
||||
# Fetch sales data for product
|
||||
sales_data = await sales_client.get_sales_data(
|
||||
tenant_id=tenant_id,
|
||||
product_id=product_id,
|
||||
start_date=start_date.strftime('%Y-%m-%d'),
|
||||
end_date=end_date.strftime('%Y-%m-%d')
|
||||
)
|
||||
|
||||
if not sales_data:
|
||||
logger.warning(f"No sales data for product {product_id}")
|
||||
continue
|
||||
|
||||
# Convert to DataFrame
|
||||
sales_df = pd.DataFrame(sales_data)
|
||||
|
||||
if len(sales_df) < request_data.min_samples:
|
||||
logger.warning(
|
||||
f"Insufficient data for product {product_id}: "
|
||||
f"{len(sales_df)} samples < {request_data.min_samples} required"
|
||||
)
|
||||
continue
|
||||
|
||||
# Check what columns are available and map to expected format
|
||||
logger.debug(f"Sales data columns for product {product_id}: {sales_df.columns.tolist()}")
|
||||
|
||||
# Map common field names to 'quantity' and 'date'
|
||||
if 'quantity' not in sales_df.columns:
|
||||
if 'total_quantity' in sales_df.columns:
|
||||
sales_df['quantity'] = sales_df['total_quantity']
|
||||
elif 'amount' in sales_df.columns:
|
||||
sales_df['quantity'] = sales_df['amount']
|
||||
else:
|
||||
logger.warning(f"No quantity field found for product {product_id}, skipping")
|
||||
continue
|
||||
|
||||
if 'date' not in sales_df.columns:
|
||||
if 'sale_date' in sales_df.columns:
|
||||
sales_df['date'] = sales_df['sale_date']
|
||||
else:
|
||||
logger.warning(f"No date field found for product {product_id}, skipping")
|
||||
continue
|
||||
|
||||
# Prepare sales data with required columns
|
||||
sales_df['date'] = pd.to_datetime(sales_df['date'])
|
||||
sales_df['quantity'] = sales_df['quantity'].astype(float)
|
||||
sales_df['day_of_week'] = sales_df['date'].dt.dayofweek
|
||||
|
||||
# NOTE: Holiday detection for historical data requires:
|
||||
# 1. Tenant location context (calendar_id)
|
||||
# 2. Bulk holiday check API (currently single-date only)
|
||||
# 3. Historical calendar data
|
||||
# For real-time forecasts, holiday detection IS implemented via data_client.py
|
||||
sales_df['is_holiday'] = False
|
||||
|
||||
# NOTE: Weather data for historical analysis requires:
|
||||
# 1. Historical weather API integration
|
||||
# 2. Tenant location coordinates
|
||||
# For real-time forecasts, weather data IS fetched via external service
|
||||
sales_df['weather'] = 'unknown'
|
||||
|
||||
# Run rules learning
|
||||
results = await orchestrator.learn_and_post_rules(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_id=product_id,
|
||||
sales_data=sales_df,
|
||||
external_data=None,
|
||||
min_samples=request_data.min_samples
|
||||
)
|
||||
|
||||
# Track results
|
||||
total_insights_generated += results['insights_generated']
|
||||
total_insights_posted += results['insights_posted']
|
||||
insights_by_product[product_id] = {
|
||||
'product_name': product_name,
|
||||
'insights_posted': results['insights_posted'],
|
||||
'rules_learned': len(results['rules'])
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Product {product_id} analysis complete",
|
||||
insights_posted=results['insights_posted']
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error analyzing product {product_id}: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
errors.append(error_msg)
|
||||
|
||||
# Close orchestrator
|
||||
await orchestrator.close()
|
||||
|
||||
# Build response
|
||||
response = RulesGenerationResponse(
|
||||
success=total_insights_posted > 0,
|
||||
message=f"Successfully generated {total_insights_posted} insights from {len(products)} products",
|
||||
tenant_id=tenant_id,
|
||||
products_analyzed=len(products),
|
||||
total_insights_generated=total_insights_generated,
|
||||
total_insights_posted=total_insights_posted,
|
||||
insights_by_product=insights_by_product,
|
||||
errors=errors
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"ML insights rules generation complete",
|
||||
tenant_id=tenant_id,
|
||||
total_insights=total_insights_posted
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"ML insights rules generation failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Rules generation failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post("/analyze-demand", response_model=DemandAnalysisResponse)
|
||||
async def trigger_demand_analysis(
|
||||
tenant_id: str,
|
||||
request_data: DemandAnalysisRequest,
|
||||
request: Request,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Trigger demand pattern analysis from historical sales data.
|
||||
|
||||
This endpoint:
|
||||
1. Fetches historical sales data for specified products
|
||||
2. Runs the DemandInsightsOrchestrator to analyze patterns
|
||||
3. Generates insights about demand forecasting optimization
|
||||
4. Posts insights to AI Insights Service
|
||||
5. Publishes events to RabbitMQ
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
request_data: Demand analysis parameters
|
||||
request: FastAPI request object to access app state
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
DemandAnalysisResponse with analysis results
|
||||
"""
|
||||
logger.info(
|
||||
"ML insights demand analysis requested",
|
||||
tenant_id=tenant_id,
|
||||
product_ids=request_data.product_ids,
|
||||
lookback_days=request_data.lookback_days
|
||||
)
|
||||
|
||||
try:
|
||||
# Import ML orchestrator and clients
|
||||
from app.ml.demand_insights_orchestrator import DemandInsightsOrchestrator
|
||||
from shared.clients.sales_client import SalesServiceClient
|
||||
from shared.clients.inventory_client import InventoryServiceClient
|
||||
from app.core.config import settings
|
||||
|
||||
# Get event publisher from app state
|
||||
event_publisher = getattr(request.app.state, 'event_publisher', None)
|
||||
|
||||
# Initialize orchestrator and clients
|
||||
orchestrator = DemandInsightsOrchestrator(event_publisher=event_publisher)
|
||||
inventory_client = InventoryServiceClient(settings)
|
||||
|
||||
# Get products to analyze from inventory service via API
|
||||
if request_data.product_ids:
|
||||
# Fetch specific products
|
||||
products = []
|
||||
for product_id in request_data.product_ids:
|
||||
product = await inventory_client.get_ingredient_by_id(
|
||||
ingredient_id=UUID(product_id),
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
if product:
|
||||
products.append(product)
|
||||
else:
|
||||
# Fetch all products for tenant (limit to 10)
|
||||
all_products = await inventory_client.get_all_ingredients(tenant_id=tenant_id)
|
||||
products = all_products[:10] # Limit to prevent timeout
|
||||
|
||||
if not products:
|
||||
return DemandAnalysisResponse(
|
||||
success=False,
|
||||
message="No products found for analysis",
|
||||
tenant_id=tenant_id,
|
||||
products_analyzed=0,
|
||||
total_insights_generated=0,
|
||||
total_insights_posted=0,
|
||||
insights_by_product={},
|
||||
errors=["No products found"]
|
||||
)
|
||||
|
||||
# Initialize sales client to fetch historical data
|
||||
sales_client = SalesServiceClient(config=settings, calling_service_name="forecasting")
|
||||
|
||||
# Calculate date range
|
||||
end_date = datetime.utcnow()
|
||||
start_date = end_date - timedelta(days=request_data.lookback_days)
|
||||
|
||||
# Process each product
|
||||
total_insights_generated = 0
|
||||
total_insights_posted = 0
|
||||
insights_by_product = {}
|
||||
errors = []
|
||||
|
||||
for product in products:
|
||||
try:
|
||||
product_id = str(product['id'])
|
||||
product_name = product.get('name', 'Unknown')
|
||||
logger.info(f"Analyzing product {product_name} ({product_id})")
|
||||
|
||||
# Fetch sales data for product
|
||||
sales_data = await sales_client.get_sales_data(
|
||||
tenant_id=tenant_id,
|
||||
product_id=product_id,
|
||||
start_date=start_date.strftime('%Y-%m-%d'),
|
||||
end_date=end_date.strftime('%Y-%m-%d')
|
||||
)
|
||||
|
||||
if not sales_data:
|
||||
logger.warning(f"No sales data for product {product_id}")
|
||||
continue
|
||||
|
||||
# Convert to DataFrame
|
||||
sales_df = pd.DataFrame(sales_data)
|
||||
|
||||
if len(sales_df) < 30: # Minimum for demand analysis
|
||||
logger.warning(
|
||||
f"Insufficient data for product {product_id}: "
|
||||
f"{len(sales_df)} samples < 30 required"
|
||||
)
|
||||
continue
|
||||
|
||||
# Check what columns are available and map to expected format
|
||||
logger.debug(f"Sales data columns for product {product_id}: {sales_df.columns.tolist()}")
|
||||
|
||||
# Map common field names to 'quantity' and 'date'
|
||||
if 'quantity' not in sales_df.columns:
|
||||
if 'total_quantity' in sales_df.columns:
|
||||
sales_df['quantity'] = sales_df['total_quantity']
|
||||
elif 'amount' in sales_df.columns:
|
||||
sales_df['quantity'] = sales_df['amount']
|
||||
else:
|
||||
logger.warning(f"No quantity field found for product {product_id}, skipping")
|
||||
continue
|
||||
|
||||
if 'date' not in sales_df.columns:
|
||||
if 'sale_date' in sales_df.columns:
|
||||
sales_df['date'] = sales_df['sale_date']
|
||||
else:
|
||||
logger.warning(f"No date field found for product {product_id}, skipping")
|
||||
continue
|
||||
|
||||
# Prepare sales data with required columns
|
||||
sales_df['date'] = pd.to_datetime(sales_df['date'])
|
||||
sales_df['quantity'] = sales_df['quantity'].astype(float)
|
||||
sales_df['day_of_week'] = sales_df['date'].dt.dayofweek
|
||||
|
||||
# Run demand analysis
|
||||
results = await orchestrator.analyze_and_post_demand_insights(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_id=product_id,
|
||||
sales_data=sales_df,
|
||||
forecast_horizon_days=request_data.forecast_horizon_days,
|
||||
min_history_days=request_data.lookback_days
|
||||
)
|
||||
|
||||
# Track results
|
||||
total_insights_generated += results['insights_generated']
|
||||
total_insights_posted += results['insights_posted']
|
||||
insights_by_product[product_id] = {
|
||||
'product_name': product_name,
|
||||
'insights_posted': results['insights_posted'],
|
||||
'trend_analysis': results.get('trend_analysis', {})
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Product {product_id} demand analysis complete",
|
||||
insights_posted=results['insights_posted']
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error analyzing product {product_id}: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
errors.append(error_msg)
|
||||
|
||||
# Close orchestrator
|
||||
await orchestrator.close()
|
||||
|
||||
# Build response
|
||||
response = DemandAnalysisResponse(
|
||||
success=total_insights_posted > 0,
|
||||
message=f"Successfully generated {total_insights_posted} insights from {len(products)} products",
|
||||
tenant_id=tenant_id,
|
||||
products_analyzed=len(products),
|
||||
total_insights_generated=total_insights_generated,
|
||||
total_insights_posted=total_insights_posted,
|
||||
insights_by_product=insights_by_product,
|
||||
errors=errors
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"ML insights demand analysis complete",
|
||||
tenant_id=tenant_id,
|
||||
total_insights=total_insights_posted
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"ML insights demand analysis failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Demand analysis failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post("/analyze-business-rules", response_model=BusinessRulesAnalysisResponse)
|
||||
async def trigger_business_rules_analysis(
|
||||
tenant_id: str,
|
||||
request_data: BusinessRulesAnalysisRequest,
|
||||
request: Request,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Trigger business rules optimization analysis from historical sales data.
|
||||
|
||||
This endpoint:
|
||||
1. Fetches historical sales data for specified products
|
||||
2. Runs the BusinessRulesInsightsOrchestrator to analyze rules
|
||||
3. Generates insights about business rule optimization
|
||||
4. Posts insights to AI Insights Service
|
||||
5. Publishes events to RabbitMQ
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
request_data: Business rules analysis parameters
|
||||
request: FastAPI request object to access app state
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
BusinessRulesAnalysisResponse with analysis results
|
||||
"""
|
||||
logger.info(
|
||||
"ML insights business rules analysis requested",
|
||||
tenant_id=tenant_id,
|
||||
product_ids=request_data.product_ids,
|
||||
lookback_days=request_data.lookback_days
|
||||
)
|
||||
|
||||
try:
|
||||
# Import ML orchestrator and clients
|
||||
from app.ml.business_rules_insights_orchestrator import BusinessRulesInsightsOrchestrator
|
||||
from shared.clients.sales_client import SalesServiceClient
|
||||
from shared.clients.inventory_client import InventoryServiceClient
|
||||
from app.core.config import settings
|
||||
|
||||
# Get event publisher from app state
|
||||
event_publisher = getattr(request.app.state, 'event_publisher', None)
|
||||
|
||||
# Initialize orchestrator and clients
|
||||
orchestrator = BusinessRulesInsightsOrchestrator(event_publisher=event_publisher)
|
||||
inventory_client = InventoryServiceClient(settings)
|
||||
|
||||
# Get products to analyze from inventory service via API
|
||||
if request_data.product_ids:
|
||||
# Fetch specific products
|
||||
products = []
|
||||
for product_id in request_data.product_ids:
|
||||
product = await inventory_client.get_ingredient_by_id(
|
||||
ingredient_id=UUID(product_id),
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
if product:
|
||||
products.append(product)
|
||||
else:
|
||||
# Fetch all products for tenant (limit to 10)
|
||||
all_products = await inventory_client.get_all_ingredients(tenant_id=tenant_id)
|
||||
products = all_products[:10] # Limit to prevent timeout
|
||||
|
||||
if not products:
|
||||
return BusinessRulesAnalysisResponse(
|
||||
success=False,
|
||||
message="No products found for analysis",
|
||||
tenant_id=tenant_id,
|
||||
products_analyzed=0,
|
||||
total_insights_generated=0,
|
||||
total_insights_posted=0,
|
||||
insights_by_product={},
|
||||
errors=["No products found"]
|
||||
)
|
||||
|
||||
# Initialize sales client to fetch historical data
|
||||
sales_client = SalesServiceClient(config=settings, calling_service_name="forecasting")
|
||||
|
||||
# Calculate date range
|
||||
end_date = datetime.utcnow()
|
||||
start_date = end_date - timedelta(days=request_data.lookback_days)
|
||||
|
||||
# Process each product
|
||||
total_insights_generated = 0
|
||||
total_insights_posted = 0
|
||||
insights_by_product = {}
|
||||
errors = []
|
||||
|
||||
for product in products:
|
||||
try:
|
||||
product_id = str(product['id'])
|
||||
product_name = product.get('name', 'Unknown')
|
||||
logger.info(f"Analyzing product {product_name} ({product_id})")
|
||||
|
||||
# Fetch sales data for product
|
||||
sales_data = await sales_client.get_sales_data(
|
||||
tenant_id=tenant_id,
|
||||
product_id=product_id,
|
||||
start_date=start_date.strftime('%Y-%m-%d'),
|
||||
end_date=end_date.strftime('%Y-%m-%d')
|
||||
)
|
||||
|
||||
if not sales_data:
|
||||
logger.warning(f"No sales data for product {product_id}")
|
||||
continue
|
||||
|
||||
# Convert to DataFrame
|
||||
sales_df = pd.DataFrame(sales_data)
|
||||
|
||||
if len(sales_df) < request_data.min_samples:
|
||||
logger.warning(
|
||||
f"Insufficient data for product {product_id}: "
|
||||
f"{len(sales_df)} samples < {request_data.min_samples} required"
|
||||
)
|
||||
continue
|
||||
|
||||
# Check what columns are available and map to expected format
|
||||
logger.debug(f"Sales data columns for product {product_id}: {sales_df.columns.tolist()}")
|
||||
|
||||
# Map common field names to 'quantity' and 'date'
|
||||
if 'quantity' not in sales_df.columns:
|
||||
if 'total_quantity' in sales_df.columns:
|
||||
sales_df['quantity'] = sales_df['total_quantity']
|
||||
elif 'amount' in sales_df.columns:
|
||||
sales_df['quantity'] = sales_df['amount']
|
||||
else:
|
||||
logger.warning(f"No quantity field found for product {product_id}, skipping")
|
||||
continue
|
||||
|
||||
if 'date' not in sales_df.columns:
|
||||
if 'sale_date' in sales_df.columns:
|
||||
sales_df['date'] = sales_df['sale_date']
|
||||
else:
|
||||
logger.warning(f"No date field found for product {product_id}, skipping")
|
||||
continue
|
||||
|
||||
# Prepare sales data with required columns
|
||||
sales_df['date'] = pd.to_datetime(sales_df['date'])
|
||||
sales_df['quantity'] = sales_df['quantity'].astype(float)
|
||||
sales_df['day_of_week'] = sales_df['date'].dt.dayofweek
|
||||
|
||||
# Run business rules analysis
|
||||
results = await orchestrator.analyze_and_post_business_rules_insights(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_id=product_id,
|
||||
sales_data=sales_df,
|
||||
min_samples=request_data.min_samples
|
||||
)
|
||||
|
||||
# Track results
|
||||
total_insights_generated += results['insights_generated']
|
||||
total_insights_posted += results['insights_posted']
|
||||
insights_by_product[product_id] = {
|
||||
'product_name': product_name,
|
||||
'insights_posted': results['insights_posted'],
|
||||
'rules_learned': len(results.get('rules', {}))
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Product {product_id} business rules analysis complete",
|
||||
insights_posted=results['insights_posted']
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error analyzing product {product_id}: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
errors.append(error_msg)
|
||||
|
||||
# Close orchestrator
|
||||
await orchestrator.close()
|
||||
|
||||
# Build response
|
||||
response = BusinessRulesAnalysisResponse(
|
||||
success=total_insights_posted > 0,
|
||||
message=f"Successfully generated {total_insights_posted} insights from {len(products)} products",
|
||||
tenant_id=tenant_id,
|
||||
products_analyzed=len(products),
|
||||
total_insights_generated=total_insights_generated,
|
||||
total_insights_posted=total_insights_posted,
|
||||
insights_by_product=insights_by_product,
|
||||
errors=errors
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"ML insights business rules analysis complete",
|
||||
tenant_id=tenant_id,
|
||||
total_insights=total_insights_posted
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"ML insights business rules analysis failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Business rules analysis failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def ml_insights_health():
|
||||
"""Health check for ML insights endpoints"""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "forecasting-ml-insights",
|
||||
"endpoints": [
|
||||
"POST /ml/insights/generate-rules",
|
||||
"POST /ml/insights/analyze-demand",
|
||||
"POST /ml/insights/analyze-business-rules"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# ================================================================
|
||||
# INTERNAL ML INSIGHTS ENDPOINTS (for demo session service)
|
||||
# ================================================================
|
||||
|
||||
internal_router = APIRouter(tags=["Internal ML"])
|
||||
|
||||
|
||||
@internal_router.post("/api/v1/tenants/{tenant_id}/forecasting/internal/ml/generate-demand-insights")
|
||||
async def trigger_demand_insights_internal(
|
||||
tenant_id: str,
|
||||
request: Request,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Internal endpoint to trigger demand forecasting insights for a tenant.
|
||||
|
||||
This endpoint is called by the demo-session service after cloning to generate
|
||||
AI insights from the seeded forecast data.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
request: FastAPI request object to access app state
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Dict with insights generation results
|
||||
"""
|
||||
logger.info(
|
||||
"Internal demand insights generation triggered",
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
try:
|
||||
# Import ML orchestrator and clients
|
||||
from app.ml.demand_insights_orchestrator import DemandInsightsOrchestrator
|
||||
from shared.clients.sales_client import SalesServiceClient
|
||||
from shared.clients.inventory_client import InventoryServiceClient
|
||||
from app.core.config import settings
|
||||
|
||||
# Get event publisher from app state
|
||||
event_publisher = getattr(request.app.state, 'event_publisher', None)
|
||||
|
||||
# Initialize orchestrator and clients
|
||||
orchestrator = DemandInsightsOrchestrator(event_publisher=event_publisher)
|
||||
inventory_client = InventoryServiceClient(settings)
|
||||
|
||||
# Get all products for tenant (limit to 10 for performance)
|
||||
all_products = await inventory_client.get_all_ingredients(tenant_id=tenant_id)
|
||||
products = all_products[:10] if all_products else []
|
||||
|
||||
logger.info(
|
||||
"Retrieved products from inventory service",
|
||||
tenant_id=tenant_id,
|
||||
product_count=len(products)
|
||||
)
|
||||
|
||||
if not products:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "No products found for analysis",
|
||||
"tenant_id": tenant_id,
|
||||
"products_analyzed": 0,
|
||||
"insights_posted": 0
|
||||
}
|
||||
|
||||
# Initialize sales client
|
||||
sales_client = SalesServiceClient(config=settings, calling_service_name="forecasting")
|
||||
|
||||
# Calculate date range (90 days lookback)
|
||||
end_date = datetime.utcnow()
|
||||
start_date = end_date - timedelta(days=90)
|
||||
|
||||
# Process each product
|
||||
total_insights_generated = 0
|
||||
total_insights_posted = 0
|
||||
|
||||
for product in products:
|
||||
try:
|
||||
product_id = str(product['id'])
|
||||
product_name = product.get('name', 'Unknown Product')
|
||||
|
||||
logger.debug(
|
||||
"Analyzing demand for product",
|
||||
tenant_id=tenant_id,
|
||||
product_id=product_id,
|
||||
product_name=product_name
|
||||
)
|
||||
|
||||
# Fetch historical sales data
|
||||
sales_data_raw = await sales_client.get_sales_data(
|
||||
tenant_id=tenant_id,
|
||||
product_id=product_id,
|
||||
start_date=start_date.strftime('%Y-%m-%d'),
|
||||
end_date=end_date.strftime('%Y-%m-%d')
|
||||
)
|
||||
|
||||
if not sales_data_raw or len(sales_data_raw) < 10:
|
||||
logger.debug(
|
||||
"Insufficient sales data for product",
|
||||
product_id=product_id,
|
||||
sales_records=len(sales_data_raw) if sales_data_raw else 0
|
||||
)
|
||||
continue
|
||||
|
||||
# Convert to DataFrame
|
||||
sales_df = pd.DataFrame(sales_data_raw)
|
||||
|
||||
# Map field names to expected format
|
||||
if 'quantity' not in sales_df.columns:
|
||||
if 'total_quantity' in sales_df.columns:
|
||||
sales_df['quantity'] = sales_df['total_quantity']
|
||||
elif 'quantity_sold' in sales_df.columns:
|
||||
sales_df['quantity'] = sales_df['quantity_sold']
|
||||
else:
|
||||
logger.warning(
|
||||
"No quantity field found for product",
|
||||
product_id=product_id
|
||||
)
|
||||
continue
|
||||
|
||||
if 'date' not in sales_df.columns:
|
||||
if 'sale_date' in sales_df.columns:
|
||||
sales_df['date'] = sales_df['sale_date']
|
||||
else:
|
||||
logger.warning(
|
||||
"No date field found for product",
|
||||
product_id=product_id
|
||||
)
|
||||
continue
|
||||
|
||||
# Run demand insights orchestrator
|
||||
results = await orchestrator.analyze_and_post_demand_insights(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_id=product_id,
|
||||
sales_data=sales_df,
|
||||
forecast_horizon_days=30,
|
||||
min_history_days=90
|
||||
)
|
||||
|
||||
total_insights_generated += results['insights_generated']
|
||||
total_insights_posted += results['insights_posted']
|
||||
|
||||
logger.info(
|
||||
"Demand insights generated for product",
|
||||
tenant_id=tenant_id,
|
||||
product_id=product_id,
|
||||
insights_posted=results['insights_posted']
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to analyze product demand (non-fatal)",
|
||||
tenant_id=tenant_id,
|
||||
product_id=product_id,
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Internal demand insights generation complete",
|
||||
tenant_id=tenant_id,
|
||||
products_analyzed=len(products),
|
||||
insights_generated=total_insights_generated,
|
||||
insights_posted=total_insights_posted
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Generated {total_insights_posted} demand forecasting insights",
|
||||
"tenant_id": tenant_id,
|
||||
"products_analyzed": len(products),
|
||||
"insights_posted": total_insights_posted
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Internal demand insights generation failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Demand insights generation failed: {str(e)}",
|
||||
"tenant_id": tenant_id,
|
||||
"products_analyzed": 0,
|
||||
"insights_posted": 0
|
||||
}
|
||||
287
services/forecasting/app/api/performance_monitoring.py
Normal file
287
services/forecasting/app/api/performance_monitoring.py
Normal file
@@ -0,0 +1,287 @@
|
||||
# ================================================================
|
||||
# services/forecasting/app/api/performance_monitoring.py
|
||||
# ================================================================
|
||||
"""
|
||||
Performance Monitoring API - Track and analyze forecast accuracy over time
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, status
|
||||
from typing import Dict, Any
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from app.services.performance_monitoring_service import PerformanceMonitoringService
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from app.core.database import get_db
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
router = APIRouter(tags=["performance-monitoring"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Request/Response Schemas
|
||||
# ================================================================
|
||||
|
||||
class AccuracySummaryRequest(BaseModel):
|
||||
"""Request model for accuracy summary"""
|
||||
days: int = Field(default=30, ge=1, le=365, description="Analysis period in days")
|
||||
|
||||
|
||||
class DegradationAnalysisRequest(BaseModel):
|
||||
"""Request model for degradation analysis"""
|
||||
lookback_days: int = Field(default=30, ge=7, le=365, description="Days to analyze")
|
||||
|
||||
|
||||
class ModelAgeCheckRequest(BaseModel):
|
||||
"""Request model for model age check"""
|
||||
max_age_days: int = Field(default=30, ge=1, le=90, description="Max acceptable model age")
|
||||
|
||||
|
||||
class PerformanceReportRequest(BaseModel):
|
||||
"""Request model for comprehensive performance report"""
|
||||
days: int = Field(default=30, ge=1, le=365, description="Analysis period in days")
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Endpoints
|
||||
# ================================================================
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("monitoring/accuracy-summary"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def get_accuracy_summary(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(30, ge=1, le=365, description="Analysis period in days"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get forecast accuracy summary for recent period
|
||||
|
||||
Returns overall metrics, validation coverage, and health status.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Getting accuracy summary",
|
||||
tenant_id=tenant_id,
|
||||
days=days,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = PerformanceMonitoringService(db)
|
||||
|
||||
summary = await service.get_accuracy_summary(
|
||||
tenant_id=tenant_id,
|
||||
days=days
|
||||
)
|
||||
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to get accuracy summary",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to get accuracy summary: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("monitoring/degradation-analysis"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def analyze_performance_degradation(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
lookback_days: int = Query(30, ge=7, le=365, description="Days to analyze"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Detect if forecast performance is degrading over time
|
||||
|
||||
Compares first half vs second half of period and identifies poor performers.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Analyzing performance degradation",
|
||||
tenant_id=tenant_id,
|
||||
lookback_days=lookback_days,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = PerformanceMonitoringService(db)
|
||||
|
||||
analysis = await service.detect_performance_degradation(
|
||||
tenant_id=tenant_id,
|
||||
lookback_days=lookback_days
|
||||
)
|
||||
|
||||
return analysis
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to analyze degradation",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to analyze degradation: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("monitoring/model-age"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def check_model_age(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
max_age_days: int = Query(30, ge=1, le=90, description="Max acceptable model age"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Check if models are outdated and need retraining
|
||||
|
||||
Returns models in use and identifies those needing updates.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Checking model age",
|
||||
tenant_id=tenant_id,
|
||||
max_age_days=max_age_days,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = PerformanceMonitoringService(db)
|
||||
|
||||
analysis = await service.check_model_age(
|
||||
tenant_id=tenant_id,
|
||||
max_age_days=max_age_days
|
||||
)
|
||||
|
||||
return analysis
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to check model age",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to check model age: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("monitoring/performance-report"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def generate_performance_report(
|
||||
request: PerformanceReportRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Generate comprehensive performance report
|
||||
|
||||
Combines accuracy summary, degradation analysis, and model age check
|
||||
with actionable recommendations.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Generating performance report",
|
||||
tenant_id=tenant_id,
|
||||
days=request.days,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = PerformanceMonitoringService(db)
|
||||
|
||||
report = await service.generate_performance_report(
|
||||
tenant_id=tenant_id,
|
||||
days=request.days
|
||||
)
|
||||
|
||||
return report
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to generate performance report",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to generate performance report: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("monitoring/health"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def get_health_status(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get quick health status for dashboards
|
||||
|
||||
Returns simplified health metrics for UI display.
|
||||
"""
|
||||
try:
|
||||
service = PerformanceMonitoringService(db)
|
||||
|
||||
# Get 7-day summary for quick health check
|
||||
summary = await service.get_accuracy_summary(
|
||||
tenant_id=tenant_id,
|
||||
days=7
|
||||
)
|
||||
|
||||
if summary.get("status") == "no_data":
|
||||
return {
|
||||
"status": "unknown",
|
||||
"message": "No recent validation data available",
|
||||
"health_status": "unknown"
|
||||
}
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"health_status": summary.get("health_status"),
|
||||
"current_mape": summary["average_metrics"].get("mape"),
|
||||
"accuracy_percentage": summary["average_metrics"].get("accuracy_percentage"),
|
||||
"validation_coverage": summary.get("coverage_percentage"),
|
||||
"last_7_days": {
|
||||
"validation_runs": summary.get("validation_runs"),
|
||||
"forecasts_evaluated": summary.get("total_forecasts_evaluated")
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to get health status",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to get health status: {str(e)}"
|
||||
)
|
||||
297
services/forecasting/app/api/retraining.py
Normal file
297
services/forecasting/app/api/retraining.py
Normal file
@@ -0,0 +1,297 @@
|
||||
# ================================================================
|
||||
# services/forecasting/app/api/retraining.py
|
||||
# ================================================================
|
||||
"""
|
||||
Retraining API - Trigger and manage model retraining based on performance
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, status
|
||||
from typing import Dict, Any, List
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from app.services.retraining_trigger_service import RetrainingTriggerService
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from app.core.database import get_db
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
router = APIRouter(tags=["retraining"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Request/Response Schemas
|
||||
# ================================================================
|
||||
|
||||
class EvaluateRetrainingRequest(BaseModel):
|
||||
"""Request model for retraining evaluation"""
|
||||
auto_trigger: bool = Field(
|
||||
default=False,
|
||||
description="Automatically trigger retraining for poor performers"
|
||||
)
|
||||
|
||||
|
||||
class TriggerProductRetrainingRequest(BaseModel):
|
||||
"""Request model for single product retraining"""
|
||||
inventory_product_id: UUID = Field(..., description="Product to retrain")
|
||||
reason: str = Field(..., description="Reason for retraining")
|
||||
priority: str = Field(
|
||||
default="normal",
|
||||
description="Priority level: low, normal, high"
|
||||
)
|
||||
|
||||
|
||||
class TriggerBulkRetrainingRequest(BaseModel):
|
||||
"""Request model for bulk retraining"""
|
||||
product_ids: List[UUID] = Field(..., description="List of products to retrain")
|
||||
reason: str = Field(
|
||||
default="Bulk retraining requested",
|
||||
description="Reason for bulk retraining"
|
||||
)
|
||||
|
||||
|
||||
class ScheduledRetrainingCheckRequest(BaseModel):
|
||||
"""Request model for scheduled retraining check"""
|
||||
max_model_age_days: int = Field(
|
||||
default=30,
|
||||
ge=1,
|
||||
le=90,
|
||||
description="Maximum acceptable model age"
|
||||
)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Endpoints
|
||||
# ================================================================
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("retraining/evaluate"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def evaluate_retraining_needs(
|
||||
request: EvaluateRetrainingRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Evaluate performance and optionally trigger retraining
|
||||
|
||||
Analyzes 30-day performance and identifies products needing retraining.
|
||||
If auto_trigger=true, automatically triggers retraining for poor performers.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Evaluating retraining needs",
|
||||
tenant_id=tenant_id,
|
||||
auto_trigger=request.auto_trigger,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = RetrainingTriggerService(db)
|
||||
|
||||
result = await service.evaluate_and_trigger_retraining(
|
||||
tenant_id=tenant_id,
|
||||
auto_trigger=request.auto_trigger
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to evaluate retraining needs",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to evaluate retraining: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("retraining/trigger-product"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def trigger_product_retraining(
|
||||
request: TriggerProductRetrainingRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Trigger retraining for a specific product
|
||||
|
||||
Manually trigger model retraining for a single product.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Triggering product retraining",
|
||||
tenant_id=tenant_id,
|
||||
product_id=request.inventory_product_id,
|
||||
reason=request.reason,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = RetrainingTriggerService(db)
|
||||
|
||||
result = await service._trigger_product_retraining(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_id=request.inventory_product_id,
|
||||
reason=request.reason,
|
||||
priority=request.priority
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to trigger product retraining",
|
||||
tenant_id=tenant_id,
|
||||
product_id=request.inventory_product_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to trigger retraining: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("retraining/trigger-bulk"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def trigger_bulk_retraining(
|
||||
request: TriggerBulkRetrainingRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Trigger retraining for multiple products
|
||||
|
||||
Bulk retraining operation for multiple products at once.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Triggering bulk retraining",
|
||||
tenant_id=tenant_id,
|
||||
product_count=len(request.product_ids),
|
||||
reason=request.reason,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = RetrainingTriggerService(db)
|
||||
|
||||
result = await service.trigger_bulk_retraining(
|
||||
tenant_id=tenant_id,
|
||||
product_ids=request.product_ids,
|
||||
reason=request.reason
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to trigger bulk retraining",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to trigger bulk retraining: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("retraining/recommendations"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def get_retraining_recommendations(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get retraining recommendations without triggering
|
||||
|
||||
Returns recommendations for manual review and decision-making.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Getting retraining recommendations",
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = RetrainingTriggerService(db)
|
||||
|
||||
recommendations = await service.get_retraining_recommendations(
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
return recommendations
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to get recommendations",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to get recommendations: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("retraining/check-scheduled"),
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def check_scheduled_retraining(
|
||||
request: ScheduledRetrainingCheckRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Check for models needing scheduled retraining based on age
|
||||
|
||||
Identifies models that haven't been updated in max_model_age_days.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Checking scheduled retraining needs",
|
||||
tenant_id=tenant_id,
|
||||
max_model_age_days=request.max_model_age_days,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
service = RetrainingTriggerService(db)
|
||||
|
||||
result = await service.check_and_trigger_scheduled_retraining(
|
||||
tenant_id=tenant_id,
|
||||
max_model_age_days=request.max_model_age_days
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to check scheduled retraining",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to check scheduled retraining: {str(e)}"
|
||||
)
|
||||
455
services/forecasting/app/api/scenario_operations.py
Normal file
455
services/forecasting/app/api/scenario_operations.py
Normal file
@@ -0,0 +1,455 @@
|
||||
"""
|
||||
Scenario Simulation Operations API - PROFESSIONAL/ENTERPRISE ONLY
|
||||
Business operations for "what-if" scenario testing and strategic planning
|
||||
"""
|
||||
|
||||
import structlog
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Path, Request
|
||||
from typing import List, Dict, Any
|
||||
from datetime import date, datetime, timedelta, timezone
|
||||
import uuid
|
||||
|
||||
from app.schemas.forecasts import (
|
||||
ScenarioSimulationRequest,
|
||||
ScenarioSimulationResponse,
|
||||
ScenarioComparisonRequest,
|
||||
ScenarioComparisonResponse,
|
||||
ScenarioType,
|
||||
ScenarioImpact,
|
||||
ForecastResponse,
|
||||
ForecastRequest
|
||||
)
|
||||
from app.services.forecasting_service import EnhancedForecastingService
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.database.base import create_database_manager
|
||||
from shared.monitoring.decorators import track_execution_time
|
||||
from shared.monitoring.metrics import get_metrics_collector
|
||||
from app.core.config import settings
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.auth.access_control import require_user_role, analytics_tier_required
|
||||
from shared.clients.tenant_client import TenantServiceClient
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(tags=["scenario-simulation"])
|
||||
|
||||
|
||||
def get_enhanced_forecasting_service():
|
||||
"""Dependency injection for EnhancedForecastingService"""
|
||||
database_manager = create_database_manager(settings.DATABASE_URL, "forecasting-service")
|
||||
return EnhancedForecastingService(database_manager)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_analytics_route("scenario-simulation"),
|
||||
response_model=ScenarioSimulationResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
@analytics_tier_required
|
||||
@track_execution_time("scenario_simulation_duration_seconds", "forecasting-service")
|
||||
async def simulate_scenario(
|
||||
request: ScenarioSimulationRequest,
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
request_obj: Request = None,
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
forecasting_service: EnhancedForecastingService = Depends(get_enhanced_forecasting_service)
|
||||
):
|
||||
"""
|
||||
Run a "what-if" scenario simulation on forecasts
|
||||
|
||||
This endpoint allows users to test how different scenarios might impact demand:
|
||||
- Weather events (heatwaves, cold snaps, rain)
|
||||
- Competition (new competitors opening nearby)
|
||||
- Events (festivals, concerts, sports events)
|
||||
- Pricing changes
|
||||
- Promotions
|
||||
- Supply disruptions
|
||||
|
||||
**ENTERPRISE TIER ONLY - Admin+ role required**
|
||||
"""
|
||||
metrics = get_metrics_collector(request_obj)
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
try:
|
||||
logger.info("Starting scenario simulation",
|
||||
tenant_id=tenant_id,
|
||||
scenario_name=request.scenario_name,
|
||||
scenario_type=request.scenario_type.value,
|
||||
products=len(request.inventory_product_ids))
|
||||
|
||||
if metrics:
|
||||
metrics.increment_counter(f"scenario_simulations_total")
|
||||
metrics.increment_counter(f"scenario_simulations_{request.scenario_type.value}_total")
|
||||
|
||||
# Generate simulation ID
|
||||
simulation_id = str(uuid.uuid4())
|
||||
end_date = request.start_date + timedelta(days=request.duration_days - 1)
|
||||
|
||||
# Step 1: Generate baseline forecasts
|
||||
baseline_forecasts = []
|
||||
if request.include_baseline:
|
||||
logger.info("Generating baseline forecasts", tenant_id=tenant_id)
|
||||
|
||||
# Get tenant location (city) from tenant service
|
||||
location = "default"
|
||||
try:
|
||||
tenant_client = TenantServiceClient(settings)
|
||||
tenant_info = await tenant_client.get_tenant(tenant_id)
|
||||
if tenant_info and tenant_info.get('city'):
|
||||
location = tenant_info['city']
|
||||
logger.info("Using tenant location for forecasts", tenant_id=tenant_id, location=location)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to get tenant location, using default", error=str(e), tenant_id=tenant_id)
|
||||
|
||||
for product_id in request.inventory_product_ids:
|
||||
forecast_request = ForecastRequest(
|
||||
inventory_product_id=product_id,
|
||||
forecast_date=request.start_date,
|
||||
forecast_days=request.duration_days,
|
||||
location=location
|
||||
)
|
||||
multi_day_result = await forecasting_service.generate_multi_day_forecast(
|
||||
tenant_id=tenant_id,
|
||||
request=forecast_request
|
||||
)
|
||||
# Convert forecast dictionaries to ForecastResponse objects
|
||||
forecast_dicts = multi_day_result.get("forecasts", [])
|
||||
for forecast_dict in forecast_dicts:
|
||||
if isinstance(forecast_dict, dict):
|
||||
baseline_forecasts.append(ForecastResponse(**forecast_dict))
|
||||
else:
|
||||
baseline_forecasts.append(forecast_dict)
|
||||
|
||||
# Step 2: Apply scenario adjustments to generate scenario forecasts
|
||||
scenario_forecasts = await _apply_scenario_adjustments(
|
||||
tenant_id=tenant_id,
|
||||
request=request,
|
||||
baseline_forecasts=baseline_forecasts if request.include_baseline else [],
|
||||
forecasting_service=forecasting_service
|
||||
)
|
||||
|
||||
# Step 3: Calculate impacts
|
||||
product_impacts = _calculate_product_impacts(
|
||||
baseline_forecasts,
|
||||
scenario_forecasts,
|
||||
request.inventory_product_ids
|
||||
)
|
||||
|
||||
# Step 4: Calculate totals
|
||||
total_baseline_demand = sum(f.predicted_demand for f in baseline_forecasts) if baseline_forecasts else 0
|
||||
total_scenario_demand = sum(f.predicted_demand for f in scenario_forecasts)
|
||||
overall_impact_percent = (
|
||||
((total_scenario_demand - total_baseline_demand) / total_baseline_demand * 100)
|
||||
if total_baseline_demand > 0 else 0
|
||||
)
|
||||
|
||||
# Step 5: Generate insights and recommendations
|
||||
insights, recommendations, risk_level = _generate_insights(
|
||||
request.scenario_type,
|
||||
request,
|
||||
product_impacts,
|
||||
overall_impact_percent
|
||||
)
|
||||
|
||||
# Calculate processing time
|
||||
processing_time_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
if metrics:
|
||||
metrics.increment_counter("scenario_simulations_success_total")
|
||||
metrics.observe_histogram("scenario_simulation_processing_time_ms", processing_time_ms)
|
||||
|
||||
logger.info("Scenario simulation completed successfully",
|
||||
tenant_id=tenant_id,
|
||||
simulation_id=simulation_id,
|
||||
overall_impact=f"{overall_impact_percent:.2f}%",
|
||||
processing_time_ms=processing_time_ms)
|
||||
|
||||
return ScenarioSimulationResponse(
|
||||
id=simulation_id,
|
||||
tenant_id=tenant_id,
|
||||
scenario_name=request.scenario_name,
|
||||
scenario_type=request.scenario_type,
|
||||
start_date=request.start_date,
|
||||
end_date=end_date,
|
||||
duration_days=request.duration_days,
|
||||
baseline_forecasts=baseline_forecasts if request.include_baseline else None,
|
||||
scenario_forecasts=scenario_forecasts,
|
||||
total_baseline_demand=total_baseline_demand,
|
||||
total_scenario_demand=total_scenario_demand,
|
||||
overall_impact_percent=overall_impact_percent,
|
||||
product_impacts=product_impacts,
|
||||
insights=insights,
|
||||
recommendations=recommendations,
|
||||
risk_level=risk_level,
|
||||
created_at=datetime.now(timezone.utc),
|
||||
processing_time_ms=processing_time_ms
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
if metrics:
|
||||
metrics.increment_counter("scenario_simulation_validation_errors_total")
|
||||
logger.error("Scenario simulation validation error", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e)
|
||||
)
|
||||
except Exception as e:
|
||||
if metrics:
|
||||
metrics.increment_counter("scenario_simulations_errors_total")
|
||||
logger.error("Scenario simulation failed", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Scenario simulation failed"
|
||||
)
|
||||
|
||||
|
||||
async def _apply_scenario_adjustments(
|
||||
tenant_id: str,
|
||||
request: ScenarioSimulationRequest,
|
||||
baseline_forecasts: List[ForecastResponse],
|
||||
forecasting_service: EnhancedForecastingService
|
||||
) -> List[ForecastResponse]:
|
||||
"""
|
||||
Apply scenario-specific adjustments to forecasts
|
||||
"""
|
||||
scenario_forecasts = []
|
||||
|
||||
# If no baseline, generate fresh forecasts
|
||||
if not baseline_forecasts:
|
||||
for product_id in request.inventory_product_ids:
|
||||
forecast_request = ForecastRequest(
|
||||
inventory_product_id=product_id,
|
||||
forecast_date=request.start_date,
|
||||
forecast_days=request.duration_days,
|
||||
location="default"
|
||||
)
|
||||
multi_day_result = await forecasting_service.generate_multi_day_forecast(
|
||||
tenant_id=tenant_id,
|
||||
request=forecast_request
|
||||
)
|
||||
baseline_forecasts = multi_day_result.get("forecasts", [])
|
||||
|
||||
# Apply multipliers based on scenario type
|
||||
for forecast in baseline_forecasts:
|
||||
adjusted_forecast = forecast.copy()
|
||||
multiplier = _get_scenario_multiplier(request)
|
||||
|
||||
# Adjust predicted demand
|
||||
adjusted_forecast.predicted_demand *= multiplier
|
||||
adjusted_forecast.confidence_lower *= multiplier
|
||||
adjusted_forecast.confidence_upper *= multiplier
|
||||
|
||||
scenario_forecasts.append(adjusted_forecast)
|
||||
|
||||
return scenario_forecasts
|
||||
|
||||
|
||||
def _get_scenario_multiplier(request: ScenarioSimulationRequest) -> float:
|
||||
"""
|
||||
Calculate demand multiplier based on scenario type and parameters
|
||||
"""
|
||||
if request.scenario_type == ScenarioType.WEATHER:
|
||||
if request.weather_params:
|
||||
# Heatwave increases demand for cold items, decreases for hot items
|
||||
if request.weather_params.temperature_change and request.weather_params.temperature_change > 10:
|
||||
return 1.25 # 25% increase during heatwave
|
||||
elif request.weather_params.temperature_change and request.weather_params.temperature_change < -10:
|
||||
return 0.85 # 15% decrease during cold snap
|
||||
elif request.weather_params.precipitation_change and request.weather_params.precipitation_change > 10:
|
||||
return 0.90 # 10% decrease during heavy rain
|
||||
return 1.0
|
||||
|
||||
elif request.scenario_type == ScenarioType.COMPETITION:
|
||||
if request.competition_params:
|
||||
# New competition reduces demand based on market share loss
|
||||
return 1.0 - request.competition_params.estimated_market_share_loss
|
||||
return 0.85 # Default 15% reduction
|
||||
|
||||
elif request.scenario_type == ScenarioType.EVENT:
|
||||
if request.event_params:
|
||||
# Events increase demand based on attendance and proximity
|
||||
if request.event_params.distance_km < 1.0:
|
||||
return 1.5 # 50% increase for very close events
|
||||
elif request.event_params.distance_km < 5.0:
|
||||
return 1.2 # 20% increase for nearby events
|
||||
return 1.15 # Default 15% increase
|
||||
|
||||
elif request.scenario_type == ScenarioType.PRICING:
|
||||
if request.pricing_params:
|
||||
# Price elasticity: typically -0.5 to -2.0
|
||||
# 10% price increase = 5-20% demand decrease
|
||||
elasticity = -1.0 # Average elasticity
|
||||
return 1.0 + (request.pricing_params.price_change_percent / 100) * elasticity
|
||||
return 1.0
|
||||
|
||||
elif request.scenario_type == ScenarioType.PROMOTION:
|
||||
if request.promotion_params:
|
||||
# Promotions increase traffic and conversion
|
||||
traffic_boost = 1.0 + request.promotion_params.expected_traffic_increase
|
||||
discount_boost = 1.0 + (request.promotion_params.discount_percent / 100) * 0.5
|
||||
return traffic_boost * discount_boost
|
||||
return 1.3 # Default 30% increase
|
||||
|
||||
elif request.scenario_type == ScenarioType.SUPPLY_DISRUPTION:
|
||||
return 0.6 # 40% reduction due to limited supply
|
||||
|
||||
elif request.scenario_type == ScenarioType.CUSTOM:
|
||||
if request.custom_multipliers and 'demand' in request.custom_multipliers:
|
||||
return request.custom_multipliers['demand']
|
||||
return 1.0
|
||||
|
||||
return 1.0
|
||||
|
||||
|
||||
def _calculate_product_impacts(
|
||||
baseline_forecasts: List[ForecastResponse],
|
||||
scenario_forecasts: List[ForecastResponse],
|
||||
product_ids: List[str]
|
||||
) -> List[ScenarioImpact]:
|
||||
"""
|
||||
Calculate per-product impact of the scenario
|
||||
"""
|
||||
impacts = []
|
||||
|
||||
for product_id in product_ids:
|
||||
baseline_total = sum(
|
||||
f.predicted_demand for f in baseline_forecasts
|
||||
if f.inventory_product_id == product_id
|
||||
)
|
||||
scenario_total = sum(
|
||||
f.predicted_demand for f in scenario_forecasts
|
||||
if f.inventory_product_id == product_id
|
||||
)
|
||||
|
||||
if baseline_total > 0:
|
||||
change_percent = ((scenario_total - baseline_total) / baseline_total) * 100
|
||||
else:
|
||||
change_percent = 0
|
||||
|
||||
# Get confidence ranges
|
||||
scenario_product_forecasts = [
|
||||
f for f in scenario_forecasts if f.inventory_product_id == product_id
|
||||
]
|
||||
avg_lower = sum(f.confidence_lower for f in scenario_product_forecasts) / len(scenario_product_forecasts) if scenario_product_forecasts else 0
|
||||
avg_upper = sum(f.confidence_upper for f in scenario_product_forecasts) / len(scenario_product_forecasts) if scenario_product_forecasts else 0
|
||||
|
||||
impacts.append(ScenarioImpact(
|
||||
inventory_product_id=product_id,
|
||||
baseline_demand=baseline_total,
|
||||
simulated_demand=scenario_total,
|
||||
demand_change_percent=change_percent,
|
||||
confidence_range=(avg_lower, avg_upper),
|
||||
impact_factors={"primary_driver": "scenario_adjustment"}
|
||||
))
|
||||
|
||||
return impacts
|
||||
|
||||
|
||||
def _generate_insights(
|
||||
scenario_type: ScenarioType,
|
||||
request: ScenarioSimulationRequest,
|
||||
impacts: List[ScenarioImpact],
|
||||
overall_impact: float
|
||||
) -> tuple[List[str], List[str], str]:
|
||||
"""
|
||||
Generate AI-powered insights and recommendations
|
||||
"""
|
||||
insights = []
|
||||
recommendations = []
|
||||
risk_level = "low"
|
||||
|
||||
# Determine risk level
|
||||
if abs(overall_impact) > 30:
|
||||
risk_level = "high"
|
||||
elif abs(overall_impact) > 15:
|
||||
risk_level = "medium"
|
||||
|
||||
# Generate scenario-specific insights
|
||||
if scenario_type == ScenarioType.WEATHER:
|
||||
if request.weather_params:
|
||||
if request.weather_params.temperature_change and request.weather_params.temperature_change > 10:
|
||||
insights.append(f"Heatwave of +{request.weather_params.temperature_change}°C expected to increase demand by {overall_impact:.1f}%")
|
||||
recommendations.append("Increase inventory of cold beverages and refrigerated items")
|
||||
recommendations.append("Extend operating hours to capture increased evening traffic")
|
||||
elif request.weather_params.temperature_change and request.weather_params.temperature_change < -10:
|
||||
insights.append(f"Cold snap of {request.weather_params.temperature_change}°C expected to decrease demand by {abs(overall_impact):.1f}%")
|
||||
recommendations.append("Increase production of warm comfort foods")
|
||||
recommendations.append("Reduce inventory of cold items")
|
||||
|
||||
elif scenario_type == ScenarioType.COMPETITION:
|
||||
insights.append(f"New competitor expected to reduce demand by {abs(overall_impact):.1f}%")
|
||||
recommendations.append("Consider launching loyalty program to retain customers")
|
||||
recommendations.append("Differentiate with unique product offerings")
|
||||
recommendations.append("Focus on customer service excellence")
|
||||
|
||||
elif scenario_type == ScenarioType.EVENT:
|
||||
insights.append(f"Local event expected to increase demand by {overall_impact:.1f}%")
|
||||
recommendations.append("Increase staffing for the event period")
|
||||
recommendations.append("Stock additional inventory of popular items")
|
||||
recommendations.append("Consider event-specific promotions")
|
||||
|
||||
elif scenario_type == ScenarioType.PRICING:
|
||||
if overall_impact < 0:
|
||||
insights.append(f"Price increase expected to reduce demand by {abs(overall_impact):.1f}%")
|
||||
recommendations.append("Consider smaller price increases")
|
||||
recommendations.append("Communicate value proposition to customers")
|
||||
else:
|
||||
insights.append(f"Price decrease expected to increase demand by {overall_impact:.1f}%")
|
||||
recommendations.append("Ensure adequate inventory to meet increased demand")
|
||||
|
||||
elif scenario_type == ScenarioType.PROMOTION:
|
||||
insights.append(f"Promotion expected to increase demand by {overall_impact:.1f}%")
|
||||
recommendations.append("Stock additional inventory before promotion starts")
|
||||
recommendations.append("Increase staffing during promotion period")
|
||||
recommendations.append("Prepare marketing materials and signage")
|
||||
|
||||
# Add product-specific insights
|
||||
high_impact_products = [
|
||||
impact for impact in impacts
|
||||
if abs(impact.demand_change_percent) > 20
|
||||
]
|
||||
if high_impact_products:
|
||||
insights.append(f"{len(high_impact_products)} products show significant impact (>20% change)")
|
||||
|
||||
# Add general recommendation
|
||||
if risk_level == "high":
|
||||
recommendations.append("⚠️ High-impact scenario - review and adjust operational plans immediately")
|
||||
elif risk_level == "medium":
|
||||
recommendations.append("Monitor situation closely and prepare contingency plans")
|
||||
|
||||
return insights, recommendations, risk_level
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_analytics_route("scenario-comparison"),
|
||||
response_model=ScenarioComparisonResponse
|
||||
)
|
||||
@require_user_role(['viewer', 'member', 'admin', 'owner'])
|
||||
@analytics_tier_required
|
||||
async def compare_scenarios(
|
||||
request: ScenarioComparisonRequest,
|
||||
tenant_id: str = Path(..., description="Tenant ID")
|
||||
):
|
||||
"""
|
||||
Compare multiple scenario simulations
|
||||
|
||||
**PROFESSIONAL/ENTERPRISE ONLY**
|
||||
|
||||
**STATUS**: Not yet implemented - requires scenario persistence layer
|
||||
|
||||
**Future implementation would**:
|
||||
1. Retrieve saved scenarios by ID from database
|
||||
2. Use ScenarioPlanner.compare_scenarios() to analyze them
|
||||
3. Return comparison matrix with best/worst case analysis
|
||||
|
||||
**Prerequisites**:
|
||||
- Scenario storage/retrieval database layer
|
||||
- Scenario CRUD endpoints
|
||||
- UI for scenario management
|
||||
"""
|
||||
# NOTE: HTTP 501 Not Implemented is the correct response for unimplemented optional features
|
||||
# The ML logic exists in scenario_planner.py but requires a persistence layer
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail="Scenario comparison requires scenario persistence layer (future feature)"
|
||||
)
|
||||
346
services/forecasting/app/api/validation.py
Normal file
346
services/forecasting/app/api/validation.py
Normal file
@@ -0,0 +1,346 @@
|
||||
# ================================================================
|
||||
# services/forecasting/app/api/validation.py
|
||||
# ================================================================
|
||||
"""
|
||||
Validation API - Forecast validation endpoints
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, status
|
||||
from typing import Dict, Any, List, Optional
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timedelta, timezone
|
||||
import structlog
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from app.services.validation_service import ValidationService
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from app.core.database import get_db
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
router = APIRouter(tags=["validation"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Request/Response Schemas
|
||||
# ================================================================
|
||||
|
||||
class ValidationRequest(BaseModel):
|
||||
"""Request model for validation"""
|
||||
start_date: datetime = Field(..., description="Start date for validation period")
|
||||
end_date: datetime = Field(..., description="End date for validation period")
|
||||
orchestration_run_id: Optional[UUID] = Field(None, description="Optional orchestration run ID")
|
||||
triggered_by: str = Field(default="manual", description="Trigger source")
|
||||
|
||||
|
||||
class ValidationResponse(BaseModel):
|
||||
"""Response model for validation results"""
|
||||
validation_run_id: str
|
||||
status: str
|
||||
forecasts_evaluated: int
|
||||
forecasts_with_actuals: int
|
||||
forecasts_without_actuals: int
|
||||
metrics_created: int
|
||||
overall_metrics: Optional[Dict[str, float]] = None
|
||||
total_predicted_demand: Optional[float] = None
|
||||
total_actual_demand: Optional[float] = None
|
||||
duration_seconds: Optional[float] = None
|
||||
message: Optional[str] = None
|
||||
|
||||
|
||||
class ValidationRunResponse(BaseModel):
|
||||
"""Response model for validation run details"""
|
||||
id: str
|
||||
tenant_id: str
|
||||
orchestration_run_id: Optional[str]
|
||||
validation_start_date: str
|
||||
validation_end_date: str
|
||||
started_at: str
|
||||
completed_at: Optional[str]
|
||||
duration_seconds: Optional[float]
|
||||
status: str
|
||||
total_forecasts_evaluated: int
|
||||
forecasts_with_actuals: int
|
||||
forecasts_without_actuals: int
|
||||
overall_mae: Optional[float]
|
||||
overall_mape: Optional[float]
|
||||
overall_rmse: Optional[float]
|
||||
overall_r2_score: Optional[float]
|
||||
overall_accuracy_percentage: Optional[float]
|
||||
total_predicted_demand: float
|
||||
total_actual_demand: float
|
||||
metrics_by_product: Optional[Dict[str, Any]]
|
||||
metrics_by_location: Optional[Dict[str, Any]]
|
||||
metrics_records_created: int
|
||||
error_message: Optional[str]
|
||||
triggered_by: str
|
||||
execution_mode: str
|
||||
|
||||
|
||||
class AccuracyTrendResponse(BaseModel):
|
||||
"""Response model for accuracy trends"""
|
||||
period_days: int
|
||||
total_runs: int
|
||||
average_mape: Optional[float]
|
||||
average_accuracy: Optional[float]
|
||||
trends: List[Dict[str, Any]]
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Endpoints
|
||||
# ================================================================
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("validation/validate-date-range"),
|
||||
response_model=ValidationResponse,
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def validate_date_range(
|
||||
validation_request: ValidationRequest,
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Validate forecasts against actual sales for a date range
|
||||
|
||||
This endpoint:
|
||||
- Fetches forecasts for the specified date range
|
||||
- Retrieves corresponding actual sales data
|
||||
- Calculates accuracy metrics (MAE, MAPE, RMSE, R², accuracy %)
|
||||
- Stores performance metrics in the database
|
||||
- Returns validation summary
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Starting date range validation",
|
||||
tenant_id=tenant_id,
|
||||
start_date=validation_request.start_date.isoformat(),
|
||||
end_date=validation_request.end_date.isoformat(),
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
validation_service = ValidationService(db)
|
||||
|
||||
result = await validation_service.validate_date_range(
|
||||
tenant_id=tenant_id,
|
||||
start_date=validation_request.start_date,
|
||||
end_date=validation_request.end_date,
|
||||
orchestration_run_id=validation_request.orchestration_run_id,
|
||||
triggered_by=validation_request.triggered_by
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Date range validation completed",
|
||||
tenant_id=tenant_id,
|
||||
validation_run_id=result.get("validation_run_id"),
|
||||
forecasts_evaluated=result.get("forecasts_evaluated")
|
||||
)
|
||||
|
||||
return ValidationResponse(**result)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to validate date range",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
error_type=type(e).__name__
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to validate forecasts: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("validation/validate-yesterday"),
|
||||
response_model=ValidationResponse,
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def validate_yesterday(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
orchestration_run_id: Optional[UUID] = Query(None, description="Optional orchestration run ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Validate yesterday's forecasts against actual sales
|
||||
|
||||
Convenience endpoint for validating the most recent day's forecasts.
|
||||
This is typically called by the orchestrator as part of the daily workflow.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Starting yesterday validation",
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
validation_service = ValidationService(db)
|
||||
|
||||
result = await validation_service.validate_yesterday(
|
||||
tenant_id=tenant_id,
|
||||
orchestration_run_id=orchestration_run_id,
|
||||
triggered_by="manual"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Yesterday validation completed",
|
||||
tenant_id=tenant_id,
|
||||
validation_run_id=result.get("validation_run_id"),
|
||||
forecasts_evaluated=result.get("forecasts_evaluated")
|
||||
)
|
||||
|
||||
return ValidationResponse(**result)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to validate yesterday",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
error_type=type(e).__name__
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to validate yesterday's forecasts: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("validation/runs/{validation_run_id}"),
|
||||
response_model=ValidationRunResponse,
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def get_validation_run(
|
||||
validation_run_id: UUID = Path(..., description="Validation run ID"),
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get details of a specific validation run
|
||||
|
||||
Returns complete information about a validation execution including:
|
||||
- Summary statistics
|
||||
- Overall accuracy metrics
|
||||
- Breakdown by product and location
|
||||
- Execution metadata
|
||||
"""
|
||||
try:
|
||||
validation_service = ValidationService(db)
|
||||
|
||||
validation_run = await validation_service.get_validation_run(validation_run_id)
|
||||
|
||||
if not validation_run:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Validation run {validation_run_id} not found"
|
||||
)
|
||||
|
||||
if validation_run.tenant_id != tenant_id:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to this validation run"
|
||||
)
|
||||
|
||||
return ValidationRunResponse(**validation_run.to_dict())
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to get validation run",
|
||||
validation_run_id=validation_run_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to get validation run: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("validation/runs"),
|
||||
response_model=List[ValidationRunResponse],
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def get_validation_runs(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
limit: int = Query(50, ge=1, le=100, description="Number of records to return"),
|
||||
skip: int = Query(0, ge=0, description="Number of records to skip"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get validation runs for a tenant
|
||||
|
||||
Returns a list of validation executions with pagination support.
|
||||
"""
|
||||
try:
|
||||
validation_service = ValidationService(db)
|
||||
|
||||
runs = await validation_service.get_validation_runs_by_tenant(
|
||||
tenant_id=tenant_id,
|
||||
limit=limit,
|
||||
skip=skip
|
||||
)
|
||||
|
||||
return [ValidationRunResponse(**run.to_dict()) for run in runs]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to get validation runs",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to get validation runs: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("validation/trends"),
|
||||
response_model=AccuracyTrendResponse,
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def get_accuracy_trends(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(30, ge=1, le=365, description="Number of days to analyze"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get accuracy trends over time
|
||||
|
||||
Returns validation accuracy metrics over the specified time period.
|
||||
Useful for monitoring model performance degradation and improvement.
|
||||
"""
|
||||
try:
|
||||
validation_service = ValidationService(db)
|
||||
|
||||
trends = await validation_service.get_accuracy_trends(
|
||||
tenant_id=tenant_id,
|
||||
days=days
|
||||
)
|
||||
|
||||
return AccuracyTrendResponse(**trends)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to get accuracy trends",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to get accuracy trends: {str(e)}"
|
||||
)
|
||||
174
services/forecasting/app/api/webhooks.py
Normal file
174
services/forecasting/app/api/webhooks.py
Normal file
@@ -0,0 +1,174 @@
|
||||
# ================================================================
|
||||
# services/forecasting/app/api/webhooks.py
|
||||
# ================================================================
|
||||
"""
|
||||
Webhooks API - Receive events from other services
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, status, Header
|
||||
from typing import Dict, Any, Optional
|
||||
from uuid import UUID
|
||||
from datetime import date
|
||||
import structlog
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from app.jobs.sales_data_listener import (
|
||||
handle_sales_import_completion,
|
||||
handle_pos_sync_completion
|
||||
)
|
||||
from shared.routing import RouteBuilder
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
router = APIRouter(tags=["webhooks"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Request Schemas
|
||||
# ================================================================
|
||||
|
||||
class SalesImportWebhook(BaseModel):
|
||||
"""Webhook payload for sales data import completion"""
|
||||
tenant_id: UUID = Field(..., description="Tenant ID")
|
||||
import_job_id: str = Field(..., description="Import job ID")
|
||||
start_date: date = Field(..., description="Start date of imported data")
|
||||
end_date: date = Field(..., description="End date of imported data")
|
||||
records_count: int = Field(..., ge=0, description="Number of records imported")
|
||||
import_source: str = Field(default="import", description="Source of import")
|
||||
|
||||
|
||||
class POSSyncWebhook(BaseModel):
|
||||
"""Webhook payload for POS sync completion"""
|
||||
tenant_id: UUID = Field(..., description="Tenant ID")
|
||||
sync_log_id: str = Field(..., description="POS sync log ID")
|
||||
sync_date: date = Field(..., description="Date of synced data")
|
||||
records_synced: int = Field(..., ge=0, description="Number of records synced")
|
||||
|
||||
|
||||
# ================================================================
|
||||
# Endpoints
|
||||
# ================================================================
|
||||
|
||||
@router.post(
|
||||
"/webhooks/sales-import-completed",
|
||||
status_code=status.HTTP_202_ACCEPTED
|
||||
)
|
||||
async def sales_import_completed_webhook(
|
||||
payload: SalesImportWebhook,
|
||||
x_webhook_signature: Optional[str] = Header(None, description="Webhook signature for verification")
|
||||
):
|
||||
"""
|
||||
Webhook endpoint for sales data import completion
|
||||
|
||||
Called by the sales service when a data import completes.
|
||||
Triggers validation backfill for the imported date range.
|
||||
|
||||
Note: In production, this should verify the webhook signature
|
||||
to ensure the request comes from a trusted source.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Received sales import completion webhook",
|
||||
tenant_id=payload.tenant_id,
|
||||
import_job_id=payload.import_job_id,
|
||||
date_range=f"{payload.start_date} to {payload.end_date}"
|
||||
)
|
||||
|
||||
# In production, verify webhook signature here
|
||||
# if not verify_webhook_signature(x_webhook_signature, payload):
|
||||
# raise HTTPException(status_code=401, detail="Invalid webhook signature")
|
||||
|
||||
# Handle the import completion asynchronously
|
||||
result = await handle_sales_import_completion(
|
||||
tenant_id=payload.tenant_id,
|
||||
import_job_id=payload.import_job_id,
|
||||
start_date=payload.start_date,
|
||||
end_date=payload.end_date,
|
||||
records_count=payload.records_count,
|
||||
import_source=payload.import_source
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "accepted",
|
||||
"message": "Sales import completion event received and processing",
|
||||
"result": result
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to process sales import webhook",
|
||||
payload=payload.dict(),
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to process webhook: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/webhooks/pos-sync-completed",
|
||||
status_code=status.HTTP_202_ACCEPTED
|
||||
)
|
||||
async def pos_sync_completed_webhook(
|
||||
payload: POSSyncWebhook,
|
||||
x_webhook_signature: Optional[str] = Header(None, description="Webhook signature for verification")
|
||||
):
|
||||
"""
|
||||
Webhook endpoint for POS sync completion
|
||||
|
||||
Called by the POS service when data synchronization completes.
|
||||
Triggers validation for the synced date.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Received POS sync completion webhook",
|
||||
tenant_id=payload.tenant_id,
|
||||
sync_log_id=payload.sync_log_id,
|
||||
sync_date=payload.sync_date.isoformat()
|
||||
)
|
||||
|
||||
# In production, verify webhook signature here
|
||||
# if not verify_webhook_signature(x_webhook_signature, payload):
|
||||
# raise HTTPException(status_code=401, detail="Invalid webhook signature")
|
||||
|
||||
# Handle the sync completion
|
||||
result = await handle_pos_sync_completion(
|
||||
tenant_id=payload.tenant_id,
|
||||
sync_log_id=payload.sync_log_id,
|
||||
sync_date=payload.sync_date,
|
||||
records_synced=payload.records_synced
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "accepted",
|
||||
"message": "POS sync completion event received and processing",
|
||||
"result": result
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to process POS sync webhook",
|
||||
payload=payload.dict(),
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to process webhook: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/webhooks/health",
|
||||
status_code=status.HTTP_200_OK
|
||||
)
|
||||
async def webhook_health_check():
|
||||
"""Health check endpoint for webhook receiver"""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "forecasting-webhooks",
|
||||
"endpoints": [
|
||||
"/webhooks/sales-import-completed",
|
||||
"/webhooks/pos-sync-completed"
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user