Add more services
This commit is contained in:
599
services/suppliers/app/api/performance.py
Normal file
599
services/suppliers/app/api/performance.py
Normal file
@@ -0,0 +1,599 @@
|
||||
# ================================================================
|
||||
# services/suppliers/app/api/performance.py
|
||||
# ================================================================
|
||||
"""
|
||||
Supplier Performance Tracking API endpoints
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Optional
|
||||
from uuid import UUID
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Path, status
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep, get_current_tenant_id_dep
|
||||
from app.core.database import get_db
|
||||
from app.services.performance_service import PerformanceTrackingService, AlertService
|
||||
from app.services.dashboard_service import DashboardService
|
||||
from app.schemas.performance import (
|
||||
PerformanceMetric, PerformanceMetricCreate, PerformanceMetricUpdate,
|
||||
Alert, AlertCreate, AlertUpdate, Scorecard, ScorecardCreate, ScorecardUpdate,
|
||||
PerformanceDashboardSummary, SupplierPerformanceInsights, PerformanceAnalytics,
|
||||
BusinessModelInsights, AlertSummary, DashboardFilter, AlertFilter,
|
||||
PerformanceReportRequest, ExportDataResponse
|
||||
)
|
||||
from app.models.performance import PerformancePeriod, PerformanceMetricType, AlertType, AlertSeverity
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter(prefix="/performance", tags=["performance"])
|
||||
|
||||
|
||||
# ===== Dependency Injection =====
|
||||
|
||||
async def get_performance_service() -> PerformanceTrackingService:
|
||||
"""Get performance tracking service"""
|
||||
return PerformanceTrackingService()
|
||||
|
||||
async def get_alert_service() -> AlertService:
|
||||
"""Get alert service"""
|
||||
return AlertService()
|
||||
|
||||
async def get_dashboard_service() -> DashboardService:
|
||||
"""Get dashboard service"""
|
||||
return DashboardService()
|
||||
|
||||
|
||||
# ===== Performance Metrics Endpoints =====
|
||||
|
||||
@router.post("/tenants/{tenant_id}/suppliers/{supplier_id}/calculate", response_model=PerformanceMetric)
|
||||
async def calculate_supplier_performance(
|
||||
tenant_id: UUID = Path(...),
|
||||
supplier_id: UUID = Path(...),
|
||||
period: PerformancePeriod = Query(...),
|
||||
period_start: datetime = Query(...),
|
||||
period_end: datetime = Query(...),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
performance_service: PerformanceTrackingService = Depends(get_performance_service),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Calculate performance metrics for a supplier"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
metric = await performance_service.calculate_supplier_performance(
|
||||
db, supplier_id, tenant_id, period, period_start, period_end
|
||||
)
|
||||
|
||||
if not metric:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Unable to calculate performance metrics"
|
||||
)
|
||||
|
||||
logger.info("Performance metrics calculated",
|
||||
tenant_id=str(tenant_id),
|
||||
supplier_id=str(supplier_id),
|
||||
period=period.value)
|
||||
|
||||
return metric
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating performance metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
supplier_id=str(supplier_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to calculate performance metrics"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/suppliers/{supplier_id}/metrics", response_model=List[PerformanceMetric])
|
||||
async def get_supplier_performance_metrics(
|
||||
tenant_id: UUID = Path(...),
|
||||
supplier_id: UUID = Path(...),
|
||||
metric_type: Optional[PerformanceMetricType] = Query(None),
|
||||
period: Optional[PerformancePeriod] = Query(None),
|
||||
date_from: Optional[datetime] = Query(None),
|
||||
date_to: Optional[datetime] = Query(None),
|
||||
limit: int = Query(50, ge=1, le=500),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Get performance metrics for a supplier"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
# TODO: Implement get_supplier_performance_metrics in service
|
||||
# For now, return empty list
|
||||
metrics = []
|
||||
|
||||
return metrics
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting performance metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
supplier_id=str(supplier_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve performance metrics"
|
||||
)
|
||||
|
||||
|
||||
# ===== Alert Management Endpoints =====
|
||||
|
||||
@router.post("/tenants/{tenant_id}/alerts/evaluate", response_model=List[Alert])
|
||||
async def evaluate_performance_alerts(
|
||||
tenant_id: UUID = Path(...),
|
||||
supplier_id: Optional[UUID] = Query(None, description="Specific supplier to evaluate"),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
alert_service: AlertService = Depends(get_alert_service),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Evaluate and create performance-based alerts"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
alerts = await alert_service.evaluate_performance_alerts(db, tenant_id, supplier_id)
|
||||
|
||||
logger.info("Performance alerts evaluated",
|
||||
tenant_id=str(tenant_id),
|
||||
alerts_created=len(alerts))
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error evaluating performance alerts",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to evaluate performance alerts"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/alerts", response_model=List[Alert])
|
||||
async def get_supplier_alerts(
|
||||
tenant_id: UUID = Path(...),
|
||||
supplier_id: Optional[UUID] = Query(None),
|
||||
alert_type: Optional[AlertType] = Query(None),
|
||||
severity: Optional[AlertSeverity] = Query(None),
|
||||
date_from: Optional[datetime] = Query(None),
|
||||
date_to: Optional[datetime] = Query(None),
|
||||
limit: int = Query(50, ge=1, le=500),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Get supplier alerts with filtering"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
# TODO: Implement get_supplier_alerts in service
|
||||
# For now, return empty list
|
||||
alerts = []
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting supplier alerts",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve supplier alerts"
|
||||
)
|
||||
|
||||
|
||||
@router.patch("/tenants/{tenant_id}/alerts/{alert_id}", response_model=Alert)
|
||||
async def update_alert(
|
||||
alert_update: AlertUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
alert_id: UUID = Path(...),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Update an alert (acknowledge, resolve, etc.)"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
# TODO: Implement update_alert in service
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail="Alert update not yet implemented"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error updating alert",
|
||||
tenant_id=str(tenant_id),
|
||||
alert_id=str(alert_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to update alert"
|
||||
)
|
||||
|
||||
|
||||
# ===== Dashboard Endpoints =====
|
||||
|
||||
@router.get("/tenants/{tenant_id}/dashboard/summary", response_model=PerformanceDashboardSummary)
|
||||
async def get_performance_dashboard_summary(
|
||||
tenant_id: UUID = Path(...),
|
||||
date_from: Optional[datetime] = Query(None),
|
||||
date_to: Optional[datetime] = Query(None),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
dashboard_service: DashboardService = Depends(get_dashboard_service),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Get comprehensive performance dashboard summary"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
summary = await dashboard_service.get_performance_dashboard_summary(
|
||||
db, tenant_id, date_from, date_to
|
||||
)
|
||||
|
||||
logger.info("Performance dashboard summary retrieved",
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting dashboard summary",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve dashboard summary"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/suppliers/{supplier_id}/insights", response_model=SupplierPerformanceInsights)
|
||||
async def get_supplier_performance_insights(
|
||||
tenant_id: UUID = Path(...),
|
||||
supplier_id: UUID = Path(...),
|
||||
days_back: int = Query(30, ge=1, le=365),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
dashboard_service: DashboardService = Depends(get_dashboard_service),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Get detailed performance insights for a specific supplier"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
insights = await dashboard_service.get_supplier_performance_insights(
|
||||
db, tenant_id, supplier_id, days_back
|
||||
)
|
||||
|
||||
logger.info("Supplier performance insights retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
supplier_id=str(supplier_id))
|
||||
|
||||
return insights
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting supplier insights",
|
||||
tenant_id=str(tenant_id),
|
||||
supplier_id=str(supplier_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve supplier insights"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/analytics", response_model=PerformanceAnalytics)
|
||||
async def get_performance_analytics(
|
||||
tenant_id: UUID = Path(...),
|
||||
period_days: int = Query(90, ge=1, le=365),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
dashboard_service: DashboardService = Depends(get_dashboard_service),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Get advanced performance analytics"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
analytics = await dashboard_service.get_performance_analytics(
|
||||
db, tenant_id, period_days
|
||||
)
|
||||
|
||||
logger.info("Performance analytics retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
period_days=period_days)
|
||||
|
||||
return analytics
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting performance analytics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve performance analytics"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/business-model", response_model=BusinessModelInsights)
|
||||
async def get_business_model_insights(
|
||||
tenant_id: UUID = Path(...),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
dashboard_service: DashboardService = Depends(get_dashboard_service),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Get business model detection and insights"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
insights = await dashboard_service.get_business_model_insights(db, tenant_id)
|
||||
|
||||
logger.info("Business model insights retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
detected_model=insights.detected_model)
|
||||
|
||||
return insights
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting business model insights",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve business model insights"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/alerts/summary", response_model=List[AlertSummary])
|
||||
async def get_alert_summary(
|
||||
tenant_id: UUID = Path(...),
|
||||
date_from: Optional[datetime] = Query(None),
|
||||
date_to: Optional[datetime] = Query(None),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
dashboard_service: DashboardService = Depends(get_dashboard_service),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Get alert summary by type and severity"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
summary = await dashboard_service.get_alert_summary(db, tenant_id, date_from, date_to)
|
||||
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting alert summary",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve alert summary"
|
||||
)
|
||||
|
||||
|
||||
# ===== Export and Reporting Endpoints =====
|
||||
|
||||
@router.post("/tenants/{tenant_id}/reports/generate", response_model=ExportDataResponse)
|
||||
async def generate_performance_report(
|
||||
report_request: PerformanceReportRequest,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Generate a performance report"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
# TODO: Implement report generation
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail="Report generation not yet implemented"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating performance report",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to generate performance report"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/export")
|
||||
async def export_performance_data(
|
||||
tenant_id: UUID = Path(...),
|
||||
format: str = Query("json", description="Export format: json, csv, excel"),
|
||||
date_from: Optional[datetime] = Query(None),
|
||||
date_to: Optional[datetime] = Query(None),
|
||||
supplier_ids: Optional[List[UUID]] = Query(None),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Export performance data"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
if format.lower() not in ["json", "csv", "excel"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Unsupported export format. Use: json, csv, excel"
|
||||
)
|
||||
|
||||
# TODO: Implement data export
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail="Data export not yet implemented"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error exporting performance data",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to export performance data"
|
||||
)
|
||||
|
||||
|
||||
# ===== Configuration and Health Endpoints =====
|
||||
|
||||
@router.get("/tenants/{tenant_id}/config")
|
||||
async def get_performance_config(
|
||||
tenant_id: UUID = Path(...),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""Get performance tracking configuration"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
config = {
|
||||
"performance_tracking": {
|
||||
"enabled": settings.PERFORMANCE_TRACKING_ENABLED,
|
||||
"calculation_interval_minutes": settings.PERFORMANCE_CALCULATION_INTERVAL_MINUTES,
|
||||
"cache_ttl_seconds": settings.PERFORMANCE_CACHE_TTL
|
||||
},
|
||||
"thresholds": {
|
||||
"excellent_delivery_rate": settings.EXCELLENT_DELIVERY_RATE,
|
||||
"good_delivery_rate": settings.GOOD_DELIVERY_RATE,
|
||||
"acceptable_delivery_rate": settings.ACCEPTABLE_DELIVERY_RATE,
|
||||
"poor_delivery_rate": settings.POOR_DELIVERY_RATE,
|
||||
"excellent_quality_rate": settings.EXCELLENT_QUALITY_RATE,
|
||||
"good_quality_rate": settings.GOOD_QUALITY_RATE,
|
||||
"acceptable_quality_rate": settings.ACCEPTABLE_QUALITY_RATE,
|
||||
"poor_quality_rate": settings.POOR_QUALITY_RATE
|
||||
},
|
||||
"alerts": {
|
||||
"enabled": settings.ALERTS_ENABLED,
|
||||
"evaluation_interval_minutes": settings.ALERT_EVALUATION_INTERVAL_MINUTES,
|
||||
"retention_days": settings.ALERT_RETENTION_DAYS,
|
||||
"critical_delivery_delay_hours": settings.CRITICAL_DELIVERY_DELAY_HOURS,
|
||||
"critical_quality_rejection_rate": settings.CRITICAL_QUALITY_REJECTION_RATE
|
||||
},
|
||||
"dashboard": {
|
||||
"cache_ttl_seconds": settings.DASHBOARD_CACHE_TTL,
|
||||
"refresh_interval_seconds": settings.DASHBOARD_REFRESH_INTERVAL,
|
||||
"default_analytics_period_days": settings.DEFAULT_ANALYTICS_PERIOD_DAYS
|
||||
},
|
||||
"business_model": {
|
||||
"detection_enabled": settings.ENABLE_BUSINESS_MODEL_DETECTION,
|
||||
"central_bakery_threshold": settings.CENTRAL_BAKERY_THRESHOLD_SUPPLIERS,
|
||||
"individual_bakery_threshold": settings.INDIVIDUAL_BAKERY_THRESHOLD_SUPPLIERS
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting performance config",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve performance configuration"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/health")
|
||||
async def get_performance_health(
|
||||
tenant_id: UUID = Path(...),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""Get performance service health status"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to tenant data"
|
||||
)
|
||||
|
||||
return {
|
||||
"service": "suppliers-performance",
|
||||
"status": "healthy",
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"tenant_id": str(tenant_id),
|
||||
"features": {
|
||||
"performance_tracking": "enabled",
|
||||
"alerts": "enabled",
|
||||
"dashboard_analytics": "enabled",
|
||||
"business_model_detection": "enabled"
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting performance health",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to get performance health status"
|
||||
)
|
||||
@@ -78,6 +78,56 @@ class Settings(BaseServiceSettings):
|
||||
# Business hours for supplier contact (24h format)
|
||||
BUSINESS_HOURS_START: int = 8
|
||||
BUSINESS_HOURS_END: int = 18
|
||||
|
||||
# Performance Tracking Settings
|
||||
PERFORMANCE_TRACKING_ENABLED: bool = Field(default=True, env="PERFORMANCE_TRACKING_ENABLED")
|
||||
PERFORMANCE_CALCULATION_INTERVAL_MINUTES: int = Field(default=60, env="PERFORMANCE_CALCULATION_INTERVAL")
|
||||
PERFORMANCE_CACHE_TTL: int = Field(default=300, env="PERFORMANCE_CACHE_TTL") # 5 minutes
|
||||
|
||||
# Performance Thresholds
|
||||
EXCELLENT_DELIVERY_RATE: float = 95.0
|
||||
GOOD_DELIVERY_RATE: float = 90.0
|
||||
ACCEPTABLE_DELIVERY_RATE: float = 85.0
|
||||
POOR_DELIVERY_RATE: float = 80.0
|
||||
|
||||
EXCELLENT_QUALITY_RATE: float = 98.0
|
||||
GOOD_QUALITY_RATE: float = 95.0
|
||||
ACCEPTABLE_QUALITY_RATE: float = 90.0
|
||||
POOR_QUALITY_RATE: float = 85.0
|
||||
|
||||
# Alert Settings
|
||||
ALERTS_ENABLED: bool = Field(default=True, env="SUPPLIERS_ALERTS_ENABLED")
|
||||
ALERT_EVALUATION_INTERVAL_MINUTES: int = Field(default=15, env="ALERT_EVALUATION_INTERVAL")
|
||||
ALERT_RETENTION_DAYS: int = Field(default=365, env="ALERT_RETENTION_DAYS")
|
||||
|
||||
# Critical alert thresholds
|
||||
CRITICAL_DELIVERY_DELAY_HOURS: int = 24
|
||||
CRITICAL_QUALITY_REJECTION_RATE: float = 10.0
|
||||
HIGH_COST_VARIANCE_PERCENTAGE: float = 15.0
|
||||
|
||||
# Dashboard Settings
|
||||
DASHBOARD_CACHE_TTL: int = Field(default=180, env="SUPPLIERS_DASHBOARD_CACHE_TTL") # 3 minutes
|
||||
DASHBOARD_REFRESH_INTERVAL: int = Field(default=300, env="DASHBOARD_REFRESH_INTERVAL") # 5 minutes
|
||||
|
||||
# Performance Analytics
|
||||
DEFAULT_ANALYTICS_PERIOD_DAYS: int = 30
|
||||
MAX_ANALYTICS_PERIOD_DAYS: int = 365
|
||||
SCORECARD_GENERATION_DAY: int = 1 # Day of month to generate scorecards
|
||||
|
||||
# Notification Settings
|
||||
NOTIFICATION_EMAIL_ENABLED: bool = Field(default=True, env="NOTIFICATION_EMAIL_ENABLED")
|
||||
NOTIFICATION_WEBHOOK_ENABLED: bool = Field(default=False, env="NOTIFICATION_WEBHOOK_ENABLED")
|
||||
NOTIFICATION_WEBHOOK_URL: str = Field(default="", env="NOTIFICATION_WEBHOOK_URL")
|
||||
|
||||
# Business Model Detection
|
||||
ENABLE_BUSINESS_MODEL_DETECTION: bool = Field(default=True, env="ENABLE_BUSINESS_MODEL_DETECTION")
|
||||
CENTRAL_BAKERY_THRESHOLD_SUPPLIERS: int = Field(default=20, env="CENTRAL_BAKERY_THRESHOLD_SUPPLIERS")
|
||||
INDIVIDUAL_BAKERY_THRESHOLD_SUPPLIERS: int = Field(default=10, env="INDIVIDUAL_BAKERY_THRESHOLD_SUPPLIERS")
|
||||
|
||||
# Performance Report Settings
|
||||
AUTO_GENERATE_MONTHLY_REPORTS: bool = Field(default=True, env="AUTO_GENERATE_MONTHLY_REPORTS")
|
||||
AUTO_GENERATE_QUARTERLY_REPORTS: bool = Field(default=True, env="AUTO_GENERATE_QUARTERLY_REPORTS")
|
||||
REPORT_EXPORT_FORMATS: List[str] = ["pdf", "excel", "csv"]
|
||||
|
||||
|
||||
# Global settings instance
|
||||
|
||||
@@ -119,6 +119,10 @@ app.include_router(suppliers.router, prefix=settings.API_V1_STR)
|
||||
app.include_router(purchase_orders.router, prefix=settings.API_V1_STR)
|
||||
app.include_router(deliveries.router, prefix=settings.API_V1_STR)
|
||||
|
||||
# Include enhanced performance tracking router
|
||||
from app.api.performance import router as performance_router
|
||||
app.include_router(performance_router, prefix=settings.API_V1_STR)
|
||||
|
||||
|
||||
# Root endpoint
|
||||
@app.get("/")
|
||||
@@ -153,7 +157,16 @@ async def service_info():
|
||||
"price_list_management",
|
||||
"invoice_tracking",
|
||||
"supplier_ratings",
|
||||
"procurement_workflow"
|
||||
"procurement_workflow",
|
||||
"performance_tracking",
|
||||
"performance_analytics",
|
||||
"supplier_scorecards",
|
||||
"performance_alerts",
|
||||
"business_model_detection",
|
||||
"dashboard_analytics",
|
||||
"cost_optimization",
|
||||
"risk_assessment",
|
||||
"benchmarking"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -1 +1,53 @@
|
||||
# services/suppliers/app/models/__init__.py
|
||||
# services/suppliers/app/models/__init__.py
|
||||
"""
|
||||
Models package for the Supplier service
|
||||
"""
|
||||
|
||||
from .suppliers import (
|
||||
Supplier, SupplierPriceList, PurchaseOrder, PurchaseOrderItem,
|
||||
Delivery, DeliveryItem, SupplierQualityReview, SupplierInvoice,
|
||||
SupplierType, SupplierStatus, PaymentTerms, PurchaseOrderStatus,
|
||||
DeliveryStatus, QualityRating, DeliveryRating, InvoiceStatus
|
||||
)
|
||||
|
||||
from .performance import (
|
||||
SupplierPerformanceMetric, SupplierAlert, SupplierScorecard,
|
||||
SupplierBenchmark, AlertRule, AlertSeverity, AlertType, AlertStatus,
|
||||
PerformanceMetricType, PerformancePeriod
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Supplier Models
|
||||
'Supplier',
|
||||
'SupplierPriceList',
|
||||
'PurchaseOrder',
|
||||
'PurchaseOrderItem',
|
||||
'Delivery',
|
||||
'DeliveryItem',
|
||||
'SupplierQualityReview',
|
||||
'SupplierInvoice',
|
||||
|
||||
# Performance Models
|
||||
'SupplierPerformanceMetric',
|
||||
'SupplierAlert',
|
||||
'SupplierScorecard',
|
||||
'SupplierBenchmark',
|
||||
'AlertRule',
|
||||
|
||||
# Supplier Enums
|
||||
'SupplierType',
|
||||
'SupplierStatus',
|
||||
'PaymentTerms',
|
||||
'PurchaseOrderStatus',
|
||||
'DeliveryStatus',
|
||||
'QualityRating',
|
||||
'DeliveryRating',
|
||||
'InvoiceStatus',
|
||||
|
||||
# Performance Enums
|
||||
'AlertSeverity',
|
||||
'AlertType',
|
||||
'AlertStatus',
|
||||
'PerformanceMetricType',
|
||||
'PerformancePeriod'
|
||||
]
|
||||
392
services/suppliers/app/models/performance.py
Normal file
392
services/suppliers/app/models/performance.py
Normal file
@@ -0,0 +1,392 @@
|
||||
# ================================================================
|
||||
# services/suppliers/app/models/performance.py
|
||||
# ================================================================
|
||||
"""
|
||||
Supplier Performance Tracking and Alert Models for Suppliers Service
|
||||
Comprehensive supplier performance metrics, KPIs, and alert management
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, String, DateTime, Float, Integer, Text, Index, Boolean, Numeric, ForeignKey, Enum as SQLEnum
|
||||
from sqlalchemy.dialects.postgresql import UUID, JSONB
|
||||
from sqlalchemy.orm import relationship
|
||||
import uuid
|
||||
import enum
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, Any, Optional, List
|
||||
from decimal import Decimal
|
||||
|
||||
from shared.database.base import Base
|
||||
|
||||
|
||||
class AlertSeverity(enum.Enum):
|
||||
"""Alert severity levels"""
|
||||
CRITICAL = "critical"
|
||||
HIGH = "high"
|
||||
MEDIUM = "medium"
|
||||
LOW = "low"
|
||||
INFO = "info"
|
||||
|
||||
|
||||
class AlertType(enum.Enum):
|
||||
"""Types of supplier alerts"""
|
||||
POOR_QUALITY = "poor_quality"
|
||||
LATE_DELIVERY = "late_delivery"
|
||||
PRICE_INCREASE = "price_increase"
|
||||
LOW_PERFORMANCE = "low_performance"
|
||||
CONTRACT_EXPIRY = "contract_expiry"
|
||||
COMPLIANCE_ISSUE = "compliance_issue"
|
||||
FINANCIAL_RISK = "financial_risk"
|
||||
COMMUNICATION_ISSUE = "communication_issue"
|
||||
CAPACITY_CONSTRAINT = "capacity_constraint"
|
||||
CERTIFICATION_EXPIRY = "certification_expiry"
|
||||
|
||||
|
||||
class AlertStatus(enum.Enum):
|
||||
"""Alert processing status"""
|
||||
ACTIVE = "active"
|
||||
ACKNOWLEDGED = "acknowledged"
|
||||
IN_PROGRESS = "in_progress"
|
||||
RESOLVED = "resolved"
|
||||
DISMISSED = "dismissed"
|
||||
|
||||
|
||||
class PerformanceMetricType(enum.Enum):
|
||||
"""Types of performance metrics"""
|
||||
DELIVERY_PERFORMANCE = "delivery_performance"
|
||||
QUALITY_SCORE = "quality_score"
|
||||
PRICE_COMPETITIVENESS = "price_competitiveness"
|
||||
COMMUNICATION_RATING = "communication_rating"
|
||||
ORDER_ACCURACY = "order_accuracy"
|
||||
RESPONSE_TIME = "response_time"
|
||||
COMPLIANCE_SCORE = "compliance_score"
|
||||
FINANCIAL_STABILITY = "financial_stability"
|
||||
|
||||
|
||||
class PerformancePeriod(enum.Enum):
|
||||
"""Performance measurement periods"""
|
||||
DAILY = "daily"
|
||||
WEEKLY = "weekly"
|
||||
MONTHLY = "monthly"
|
||||
QUARTERLY = "quarterly"
|
||||
YEARLY = "yearly"
|
||||
|
||||
|
||||
class SupplierPerformanceMetric(Base):
|
||||
"""Supplier performance metrics tracking"""
|
||||
__tablename__ = "supplier_performance_metrics"
|
||||
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
supplier_id = Column(UUID(as_uuid=True), ForeignKey('suppliers.id'), nullable=False, index=True)
|
||||
|
||||
# Metric details
|
||||
metric_type = Column(SQLEnum(PerformanceMetricType), nullable=False, index=True)
|
||||
period = Column(SQLEnum(PerformancePeriod), nullable=False, index=True)
|
||||
period_start = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
period_end = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
|
||||
# Performance values
|
||||
metric_value = Column(Float, nullable=False) # Main metric value (0-100 scale)
|
||||
target_value = Column(Float, nullable=True) # Target/benchmark value
|
||||
previous_value = Column(Float, nullable=True) # Previous period value for comparison
|
||||
|
||||
# Supporting data
|
||||
total_orders = Column(Integer, nullable=False, default=0)
|
||||
total_deliveries = Column(Integer, nullable=False, default=0)
|
||||
on_time_deliveries = Column(Integer, nullable=False, default=0)
|
||||
late_deliveries = Column(Integer, nullable=False, default=0)
|
||||
quality_issues = Column(Integer, nullable=False, default=0)
|
||||
total_amount = Column(Numeric(12, 2), nullable=False, default=0.0)
|
||||
|
||||
# Detailed metrics breakdown
|
||||
metrics_data = Column(JSONB, nullable=True) # Detailed breakdown of calculations
|
||||
|
||||
# Performance trends
|
||||
trend_direction = Column(String(20), nullable=True) # improving, declining, stable
|
||||
trend_percentage = Column(Float, nullable=True) # % change from previous period
|
||||
|
||||
# Contextual information
|
||||
notes = Column(Text, nullable=True)
|
||||
external_factors = Column(JSONB, nullable=True) # External factors affecting performance
|
||||
|
||||
# Audit fields
|
||||
calculated_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
|
||||
calculated_by = Column(UUID(as_uuid=True), nullable=True) # System or user ID
|
||||
|
||||
# Relationships
|
||||
supplier = relationship("Supplier")
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('ix_performance_metrics_tenant_supplier', 'tenant_id', 'supplier_id'),
|
||||
Index('ix_performance_metrics_type_period', 'metric_type', 'period'),
|
||||
Index('ix_performance_metrics_period_dates', 'period_start', 'period_end'),
|
||||
Index('ix_performance_metrics_value', 'metric_value'),
|
||||
)
|
||||
|
||||
|
||||
class SupplierAlert(Base):
|
||||
"""Supplier-related alerts and notifications"""
|
||||
__tablename__ = "supplier_alerts"
|
||||
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
supplier_id = Column(UUID(as_uuid=True), ForeignKey('suppliers.id'), nullable=False, index=True)
|
||||
|
||||
# Alert classification
|
||||
alert_type = Column(SQLEnum(AlertType), nullable=False, index=True)
|
||||
severity = Column(SQLEnum(AlertSeverity), nullable=False, index=True)
|
||||
status = Column(SQLEnum(AlertStatus), nullable=False, default=AlertStatus.ACTIVE, index=True)
|
||||
|
||||
# Alert content
|
||||
title = Column(String(255), nullable=False)
|
||||
message = Column(Text, nullable=False)
|
||||
description = Column(Text, nullable=True)
|
||||
|
||||
# Alert triggers and context
|
||||
trigger_value = Column(Float, nullable=True) # The value that triggered the alert
|
||||
threshold_value = Column(Float, nullable=True) # The threshold that was exceeded
|
||||
metric_type = Column(SQLEnum(PerformanceMetricType), nullable=True, index=True)
|
||||
|
||||
# Related entities
|
||||
purchase_order_id = Column(UUID(as_uuid=True), nullable=True, index=True)
|
||||
delivery_id = Column(UUID(as_uuid=True), nullable=True, index=True)
|
||||
performance_metric_id = Column(UUID(as_uuid=True), ForeignKey('supplier_performance_metrics.id'), nullable=True)
|
||||
|
||||
# Alert lifecycle
|
||||
triggered_at = Column(DateTime(timezone=True), nullable=False, default=lambda: datetime.now(timezone.utc))
|
||||
acknowledged_at = Column(DateTime(timezone=True), nullable=True)
|
||||
acknowledged_by = Column(UUID(as_uuid=True), nullable=True)
|
||||
resolved_at = Column(DateTime(timezone=True), nullable=True)
|
||||
resolved_by = Column(UUID(as_uuid=True), nullable=True)
|
||||
|
||||
# Actions and resolution
|
||||
recommended_actions = Column(JSONB, nullable=True) # Suggested actions
|
||||
actions_taken = Column(JSONB, nullable=True) # Actions that were taken
|
||||
resolution_notes = Column(Text, nullable=True)
|
||||
|
||||
# Auto-resolution
|
||||
auto_resolve = Column(Boolean, nullable=False, default=False)
|
||||
auto_resolve_condition = Column(JSONB, nullable=True) # Conditions for auto-resolution
|
||||
|
||||
# Escalation
|
||||
escalated = Column(Boolean, nullable=False, default=False)
|
||||
escalated_at = Column(DateTime(timezone=True), nullable=True)
|
||||
escalated_to = Column(UUID(as_uuid=True), nullable=True) # User/role escalated to
|
||||
|
||||
# Notification tracking
|
||||
notification_sent = Column(Boolean, nullable=False, default=False)
|
||||
notification_sent_at = Column(DateTime(timezone=True), nullable=True)
|
||||
notification_recipients = Column(JSONB, nullable=True) # List of recipients
|
||||
|
||||
# Additional metadata
|
||||
priority_score = Column(Integer, nullable=False, default=50) # 1-100 priority scoring
|
||||
business_impact = Column(String(50), nullable=True) # high, medium, low impact
|
||||
tags = Column(JSONB, nullable=True) # Categorization tags
|
||||
|
||||
# Audit fields
|
||||
created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
|
||||
updated_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc), onupdate=lambda: datetime.now(timezone.utc))
|
||||
created_by = Column(UUID(as_uuid=True), nullable=True)
|
||||
|
||||
# Relationships
|
||||
supplier = relationship("Supplier")
|
||||
performance_metric = relationship("SupplierPerformanceMetric")
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('ix_supplier_alerts_tenant_supplier', 'tenant_id', 'supplier_id'),
|
||||
Index('ix_supplier_alerts_type_severity', 'alert_type', 'severity'),
|
||||
Index('ix_supplier_alerts_status_triggered', 'status', 'triggered_at'),
|
||||
Index('ix_supplier_alerts_metric_type', 'metric_type'),
|
||||
Index('ix_supplier_alerts_priority', 'priority_score'),
|
||||
)
|
||||
|
||||
|
||||
class SupplierScorecard(Base):
|
||||
"""Comprehensive supplier scorecards for performance evaluation"""
|
||||
__tablename__ = "supplier_scorecards"
|
||||
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
supplier_id = Column(UUID(as_uuid=True), ForeignKey('suppliers.id'), nullable=False, index=True)
|
||||
|
||||
# Scorecard details
|
||||
scorecard_name = Column(String(255), nullable=False)
|
||||
period = Column(SQLEnum(PerformancePeriod), nullable=False, index=True)
|
||||
period_start = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
period_end = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
|
||||
# Overall performance scores
|
||||
overall_score = Column(Float, nullable=False) # Weighted overall score (0-100)
|
||||
quality_score = Column(Float, nullable=False) # Quality performance (0-100)
|
||||
delivery_score = Column(Float, nullable=False) # Delivery performance (0-100)
|
||||
cost_score = Column(Float, nullable=False) # Cost competitiveness (0-100)
|
||||
service_score = Column(Float, nullable=False) # Service quality (0-100)
|
||||
|
||||
# Performance rankings
|
||||
overall_rank = Column(Integer, nullable=True) # Rank among all suppliers
|
||||
category_rank = Column(Integer, nullable=True) # Rank within supplier category
|
||||
total_suppliers_evaluated = Column(Integer, nullable=True)
|
||||
|
||||
# Detailed performance breakdown
|
||||
on_time_delivery_rate = Column(Float, nullable=False) # % of on-time deliveries
|
||||
quality_rejection_rate = Column(Float, nullable=False) # % of quality rejections
|
||||
order_accuracy_rate = Column(Float, nullable=False) # % of accurate orders
|
||||
response_time_hours = Column(Float, nullable=False) # Average response time
|
||||
cost_variance_percentage = Column(Float, nullable=False) # Cost variance from budget
|
||||
|
||||
# Business metrics
|
||||
total_orders_processed = Column(Integer, nullable=False, default=0)
|
||||
total_amount_processed = Column(Numeric(12, 2), nullable=False, default=0.0)
|
||||
average_order_value = Column(Numeric(10, 2), nullable=False, default=0.0)
|
||||
cost_savings_achieved = Column(Numeric(10, 2), nullable=False, default=0.0)
|
||||
|
||||
# Performance trends
|
||||
score_trend = Column(String(20), nullable=True) # improving, declining, stable
|
||||
score_change_percentage = Column(Float, nullable=True) # % change from previous period
|
||||
|
||||
# Recommendations and actions
|
||||
strengths = Column(JSONB, nullable=True) # List of strengths
|
||||
improvement_areas = Column(JSONB, nullable=True) # Areas for improvement
|
||||
recommended_actions = Column(JSONB, nullable=True) # Recommended actions
|
||||
|
||||
# Scorecard status
|
||||
is_final = Column(Boolean, nullable=False, default=False)
|
||||
approved_by = Column(UUID(as_uuid=True), nullable=True)
|
||||
approved_at = Column(DateTime(timezone=True), nullable=True)
|
||||
|
||||
# Additional information
|
||||
notes = Column(Text, nullable=True)
|
||||
attachments = Column(JSONB, nullable=True) # Supporting documents
|
||||
|
||||
# Audit fields
|
||||
generated_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
|
||||
generated_by = Column(UUID(as_uuid=True), nullable=False)
|
||||
|
||||
# Relationships
|
||||
supplier = relationship("Supplier")
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('ix_scorecards_tenant_supplier', 'tenant_id', 'supplier_id'),
|
||||
Index('ix_scorecards_period_dates', 'period_start', 'period_end'),
|
||||
Index('ix_scorecards_overall_score', 'overall_score'),
|
||||
Index('ix_scorecards_period', 'period'),
|
||||
Index('ix_scorecards_final', 'is_final'),
|
||||
)
|
||||
|
||||
|
||||
class SupplierBenchmark(Base):
|
||||
"""Supplier performance benchmarks and industry standards"""
|
||||
__tablename__ = "supplier_benchmarks"
|
||||
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
|
||||
# Benchmark details
|
||||
benchmark_name = Column(String(255), nullable=False)
|
||||
benchmark_type = Column(String(50), nullable=False, index=True) # industry, internal, custom
|
||||
supplier_category = Column(String(100), nullable=True, index=True) # Target supplier category
|
||||
|
||||
# Metric thresholds
|
||||
metric_type = Column(SQLEnum(PerformanceMetricType), nullable=False, index=True)
|
||||
excellent_threshold = Column(Float, nullable=False) # Excellent performance threshold
|
||||
good_threshold = Column(Float, nullable=False) # Good performance threshold
|
||||
acceptable_threshold = Column(Float, nullable=False) # Acceptable performance threshold
|
||||
poor_threshold = Column(Float, nullable=False) # Poor performance threshold
|
||||
|
||||
# Benchmark context
|
||||
data_source = Column(String(255), nullable=True) # Source of benchmark data
|
||||
sample_size = Column(Integer, nullable=True) # Sample size for benchmark
|
||||
confidence_level = Column(Float, nullable=True) # Statistical confidence level
|
||||
|
||||
# Validity and updates
|
||||
effective_date = Column(DateTime(timezone=True), nullable=False, default=lambda: datetime.now(timezone.utc))
|
||||
expiry_date = Column(DateTime(timezone=True), nullable=True)
|
||||
is_active = Column(Boolean, nullable=False, default=True)
|
||||
|
||||
# Additional information
|
||||
description = Column(Text, nullable=True)
|
||||
methodology = Column(Text, nullable=True)
|
||||
notes = Column(Text, nullable=True)
|
||||
|
||||
# Audit fields
|
||||
created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
|
||||
updated_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc), onupdate=lambda: datetime.now(timezone.utc))
|
||||
created_by = Column(UUID(as_uuid=True), nullable=False)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('ix_benchmarks_tenant_type', 'tenant_id', 'benchmark_type'),
|
||||
Index('ix_benchmarks_metric_type', 'metric_type'),
|
||||
Index('ix_benchmarks_category', 'supplier_category'),
|
||||
Index('ix_benchmarks_active', 'is_active'),
|
||||
)
|
||||
|
||||
|
||||
class AlertRule(Base):
|
||||
"""Configurable alert rules for supplier performance monitoring"""
|
||||
__tablename__ = "alert_rules"
|
||||
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
|
||||
# Rule identification
|
||||
rule_name = Column(String(255), nullable=False)
|
||||
rule_description = Column(Text, nullable=True)
|
||||
is_active = Column(Boolean, nullable=False, default=True)
|
||||
|
||||
# Alert configuration
|
||||
alert_type = Column(SQLEnum(AlertType), nullable=False, index=True)
|
||||
severity = Column(SQLEnum(AlertSeverity), nullable=False)
|
||||
metric_type = Column(SQLEnum(PerformanceMetricType), nullable=True, index=True)
|
||||
|
||||
# Trigger conditions
|
||||
trigger_condition = Column(String(50), nullable=False) # greater_than, less_than, equals, etc.
|
||||
threshold_value = Column(Float, nullable=False)
|
||||
consecutive_violations = Column(Integer, nullable=False, default=1) # How many consecutive violations before alert
|
||||
|
||||
# Scope and filters
|
||||
supplier_categories = Column(JSONB, nullable=True) # Which supplier categories this applies to
|
||||
supplier_ids = Column(JSONB, nullable=True) # Specific suppliers (if applicable)
|
||||
exclude_suppliers = Column(JSONB, nullable=True) # Suppliers to exclude
|
||||
|
||||
# Time constraints
|
||||
evaluation_period = Column(SQLEnum(PerformancePeriod), nullable=False)
|
||||
time_window_hours = Column(Integer, nullable=True) # Time window for evaluation
|
||||
business_hours_only = Column(Boolean, nullable=False, default=False)
|
||||
|
||||
# Auto-resolution
|
||||
auto_resolve = Column(Boolean, nullable=False, default=False)
|
||||
auto_resolve_threshold = Column(Float, nullable=True) # Value at which alert auto-resolves
|
||||
auto_resolve_duration_hours = Column(Integer, nullable=True) # How long condition must be met
|
||||
|
||||
# Notification settings
|
||||
notification_enabled = Column(Boolean, nullable=False, default=True)
|
||||
notification_recipients = Column(JSONB, nullable=True) # List of recipients
|
||||
escalation_minutes = Column(Integer, nullable=True) # Minutes before escalation
|
||||
escalation_recipients = Column(JSONB, nullable=True) # Escalation recipients
|
||||
|
||||
# Action triggers
|
||||
recommended_actions = Column(JSONB, nullable=True) # Actions to recommend
|
||||
auto_actions = Column(JSONB, nullable=True) # Actions to automatically trigger
|
||||
|
||||
# Rule metadata
|
||||
priority = Column(Integer, nullable=False, default=50) # Rule priority (1-100)
|
||||
tags = Column(JSONB, nullable=True) # Classification tags
|
||||
|
||||
# Audit fields
|
||||
created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
|
||||
updated_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc), onupdate=lambda: datetime.now(timezone.utc))
|
||||
created_by = Column(UUID(as_uuid=True), nullable=False)
|
||||
last_triggered = Column(DateTime(timezone=True), nullable=True)
|
||||
trigger_count = Column(Integer, nullable=False, default=0)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('ix_alert_rules_tenant_active', 'tenant_id', 'is_active'),
|
||||
Index('ix_alert_rules_type_severity', 'alert_type', 'severity'),
|
||||
Index('ix_alert_rules_metric_type', 'metric_type'),
|
||||
Index('ix_alert_rules_priority', 'priority'),
|
||||
)
|
||||
385
services/suppliers/app/schemas/performance.py
Normal file
385
services/suppliers/app/schemas/performance.py
Normal file
@@ -0,0 +1,385 @@
|
||||
# ================================================================
|
||||
# services/suppliers/app/schemas/performance.py
|
||||
# ================================================================
|
||||
"""
|
||||
Performance Tracking and Alert Schemas for Suppliers Service
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import List, Optional, Dict, Any
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel, Field, validator
|
||||
from decimal import Decimal
|
||||
|
||||
from app.models.performance import (
|
||||
AlertSeverity, AlertType, AlertStatus, PerformanceMetricType,
|
||||
PerformancePeriod
|
||||
)
|
||||
|
||||
|
||||
# ===== Base Schemas =====
|
||||
|
||||
class PerformanceMetricBase(BaseModel):
|
||||
"""Base schema for performance metrics"""
|
||||
metric_type: PerformanceMetricType
|
||||
period: PerformancePeriod
|
||||
period_start: datetime
|
||||
period_end: datetime
|
||||
metric_value: float = Field(ge=0, le=100)
|
||||
target_value: Optional[float] = None
|
||||
total_orders: int = Field(ge=0, default=0)
|
||||
total_deliveries: int = Field(ge=0, default=0)
|
||||
on_time_deliveries: int = Field(ge=0, default=0)
|
||||
late_deliveries: int = Field(ge=0, default=0)
|
||||
quality_issues: int = Field(ge=0, default=0)
|
||||
total_amount: Decimal = Field(ge=0, default=0)
|
||||
notes: Optional[str] = None
|
||||
|
||||
|
||||
class PerformanceMetricCreate(PerformanceMetricBase):
|
||||
"""Schema for creating performance metrics"""
|
||||
supplier_id: UUID
|
||||
metrics_data: Optional[Dict[str, Any]] = None
|
||||
external_factors: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class PerformanceMetricUpdate(BaseModel):
|
||||
"""Schema for updating performance metrics"""
|
||||
metric_value: Optional[float] = Field(None, ge=0, le=100)
|
||||
target_value: Optional[float] = None
|
||||
notes: Optional[str] = None
|
||||
metrics_data: Optional[Dict[str, Any]] = None
|
||||
external_factors: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class PerformanceMetric(PerformanceMetricBase):
|
||||
"""Complete performance metric schema"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
supplier_id: UUID
|
||||
previous_value: Optional[float] = None
|
||||
trend_direction: Optional[str] = None
|
||||
trend_percentage: Optional[float] = None
|
||||
metrics_data: Optional[Dict[str, Any]] = None
|
||||
external_factors: Optional[Dict[str, Any]] = None
|
||||
calculated_at: datetime
|
||||
|
||||
class Config:
|
||||
orm_mode = True
|
||||
|
||||
|
||||
# ===== Alert Schemas =====
|
||||
|
||||
class AlertBase(BaseModel):
|
||||
"""Base schema for alerts"""
|
||||
alert_type: AlertType
|
||||
severity: AlertSeverity
|
||||
title: str = Field(max_length=255)
|
||||
message: str
|
||||
description: Optional[str] = None
|
||||
trigger_value: Optional[float] = None
|
||||
threshold_value: Optional[float] = None
|
||||
metric_type: Optional[PerformanceMetricType] = None
|
||||
recommended_actions: Optional[List[Dict[str, Any]]] = None
|
||||
auto_resolve: bool = False
|
||||
|
||||
|
||||
class AlertCreate(AlertBase):
|
||||
"""Schema for creating alerts"""
|
||||
supplier_id: UUID
|
||||
purchase_order_id: Optional[UUID] = None
|
||||
delivery_id: Optional[UUID] = None
|
||||
performance_metric_id: Optional[UUID] = None
|
||||
priority_score: int = Field(ge=1, le=100, default=50)
|
||||
business_impact: Optional[str] = None
|
||||
tags: Optional[List[str]] = None
|
||||
|
||||
|
||||
class AlertUpdate(BaseModel):
|
||||
"""Schema for updating alerts"""
|
||||
status: Optional[AlertStatus] = None
|
||||
actions_taken: Optional[List[Dict[str, Any]]] = None
|
||||
resolution_notes: Optional[str] = None
|
||||
escalated: Optional[bool] = None
|
||||
|
||||
|
||||
class Alert(AlertBase):
|
||||
"""Complete alert schema"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
supplier_id: UUID
|
||||
status: AlertStatus
|
||||
purchase_order_id: Optional[UUID] = None
|
||||
delivery_id: Optional[UUID] = None
|
||||
performance_metric_id: Optional[UUID] = None
|
||||
triggered_at: datetime
|
||||
acknowledged_at: Optional[datetime] = None
|
||||
acknowledged_by: Optional[UUID] = None
|
||||
resolved_at: Optional[datetime] = None
|
||||
resolved_by: Optional[UUID] = None
|
||||
actions_taken: Optional[List[Dict[str, Any]]] = None
|
||||
resolution_notes: Optional[str] = None
|
||||
escalated: bool = False
|
||||
escalated_at: Optional[datetime] = None
|
||||
notification_sent: bool = False
|
||||
priority_score: int
|
||||
business_impact: Optional[str] = None
|
||||
tags: Optional[List[str]] = None
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
orm_mode = True
|
||||
|
||||
|
||||
# ===== Scorecard Schemas =====
|
||||
|
||||
class ScorecardBase(BaseModel):
|
||||
"""Base schema for supplier scorecards"""
|
||||
scorecard_name: str = Field(max_length=255)
|
||||
period: PerformancePeriod
|
||||
period_start: datetime
|
||||
period_end: datetime
|
||||
overall_score: float = Field(ge=0, le=100)
|
||||
quality_score: float = Field(ge=0, le=100)
|
||||
delivery_score: float = Field(ge=0, le=100)
|
||||
cost_score: float = Field(ge=0, le=100)
|
||||
service_score: float = Field(ge=0, le=100)
|
||||
on_time_delivery_rate: float = Field(ge=0, le=100)
|
||||
quality_rejection_rate: float = Field(ge=0, le=100)
|
||||
order_accuracy_rate: float = Field(ge=0, le=100)
|
||||
response_time_hours: float = Field(ge=0)
|
||||
cost_variance_percentage: float
|
||||
total_orders_processed: int = Field(ge=0, default=0)
|
||||
total_amount_processed: Decimal = Field(ge=0, default=0)
|
||||
average_order_value: Decimal = Field(ge=0, default=0)
|
||||
cost_savings_achieved: Decimal = Field(default=0)
|
||||
|
||||
|
||||
class ScorecardCreate(ScorecardBase):
|
||||
"""Schema for creating scorecards"""
|
||||
supplier_id: UUID
|
||||
strengths: Optional[List[str]] = None
|
||||
improvement_areas: Optional[List[str]] = None
|
||||
recommended_actions: Optional[List[Dict[str, Any]]] = None
|
||||
notes: Optional[str] = None
|
||||
|
||||
|
||||
class ScorecardUpdate(BaseModel):
|
||||
"""Schema for updating scorecards"""
|
||||
overall_score: Optional[float] = Field(None, ge=0, le=100)
|
||||
quality_score: Optional[float] = Field(None, ge=0, le=100)
|
||||
delivery_score: Optional[float] = Field(None, ge=0, le=100)
|
||||
cost_score: Optional[float] = Field(None, ge=0, le=100)
|
||||
service_score: Optional[float] = Field(None, ge=0, le=100)
|
||||
strengths: Optional[List[str]] = None
|
||||
improvement_areas: Optional[List[str]] = None
|
||||
recommended_actions: Optional[List[Dict[str, Any]]] = None
|
||||
notes: Optional[str] = None
|
||||
is_final: Optional[bool] = None
|
||||
|
||||
|
||||
class Scorecard(ScorecardBase):
|
||||
"""Complete scorecard schema"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
supplier_id: UUID
|
||||
overall_rank: Optional[int] = None
|
||||
category_rank: Optional[int] = None
|
||||
total_suppliers_evaluated: Optional[int] = None
|
||||
score_trend: Optional[str] = None
|
||||
score_change_percentage: Optional[float] = None
|
||||
strengths: Optional[List[str]] = None
|
||||
improvement_areas: Optional[List[str]] = None
|
||||
recommended_actions: Optional[List[Dict[str, Any]]] = None
|
||||
is_final: bool = False
|
||||
approved_by: Optional[UUID] = None
|
||||
approved_at: Optional[datetime] = None
|
||||
notes: Optional[str] = None
|
||||
attachments: Optional[List[Dict[str, Any]]] = None
|
||||
generated_at: datetime
|
||||
generated_by: UUID
|
||||
|
||||
class Config:
|
||||
orm_mode = True
|
||||
|
||||
|
||||
# ===== Dashboard Schemas =====
|
||||
|
||||
class PerformanceDashboardSummary(BaseModel):
|
||||
"""Performance dashboard summary schema"""
|
||||
total_suppliers: int
|
||||
active_suppliers: int
|
||||
suppliers_above_threshold: int
|
||||
suppliers_below_threshold: int
|
||||
average_overall_score: float
|
||||
average_delivery_rate: float
|
||||
average_quality_rate: float
|
||||
total_active_alerts: int
|
||||
critical_alerts: int
|
||||
high_priority_alerts: int
|
||||
recent_scorecards_generated: int
|
||||
cost_savings_this_month: Decimal
|
||||
|
||||
# Performance trends
|
||||
performance_trend: str # improving, declining, stable
|
||||
delivery_trend: str
|
||||
quality_trend: str
|
||||
|
||||
# Business model insights
|
||||
detected_business_model: str # individual_bakery, central_bakery, hybrid
|
||||
model_confidence: float
|
||||
business_model_metrics: Dict[str, Any]
|
||||
|
||||
|
||||
class SupplierPerformanceInsights(BaseModel):
|
||||
"""Supplier performance insights schema"""
|
||||
supplier_id: UUID
|
||||
supplier_name: str
|
||||
current_overall_score: float
|
||||
previous_score: Optional[float] = None
|
||||
score_change_percentage: Optional[float] = None
|
||||
performance_rank: Optional[int] = None
|
||||
|
||||
# Key performance indicators
|
||||
delivery_performance: float
|
||||
quality_performance: float
|
||||
cost_performance: float
|
||||
service_performance: float
|
||||
|
||||
# Recent metrics
|
||||
orders_last_30_days: int
|
||||
average_delivery_time: float
|
||||
quality_issues_count: int
|
||||
cost_variance: float
|
||||
|
||||
# Alert summary
|
||||
active_alerts: int
|
||||
resolved_alerts_last_30_days: int
|
||||
alert_trend: str
|
||||
|
||||
# Performance categorization
|
||||
performance_category: str # excellent, good, acceptable, needs_improvement, poor
|
||||
risk_level: str # low, medium, high, critical
|
||||
|
||||
# Recommendations
|
||||
top_strengths: List[str]
|
||||
improvement_priorities: List[str]
|
||||
recommended_actions: List[Dict[str, Any]]
|
||||
|
||||
|
||||
class PerformanceAnalytics(BaseModel):
|
||||
"""Advanced performance analytics schema"""
|
||||
period_start: datetime
|
||||
period_end: datetime
|
||||
total_suppliers_analyzed: int
|
||||
|
||||
# Performance distribution
|
||||
performance_distribution: Dict[str, int] # excellent, good, etc.
|
||||
score_ranges: Dict[str, List[float]] # min, max, avg per range
|
||||
|
||||
# Trend analysis
|
||||
overall_trend: Dict[str, float] # month-over-month changes
|
||||
delivery_trends: Dict[str, float]
|
||||
quality_trends: Dict[str, float]
|
||||
cost_trends: Dict[str, float]
|
||||
|
||||
# Comparative analysis
|
||||
top_performers: List[SupplierPerformanceInsights]
|
||||
underperformers: List[SupplierPerformanceInsights]
|
||||
most_improved: List[SupplierPerformanceInsights]
|
||||
biggest_declines: List[SupplierPerformanceInsights]
|
||||
|
||||
# Risk analysis
|
||||
high_risk_suppliers: List[Dict[str, Any]]
|
||||
contract_renewals_due: List[Dict[str, Any]]
|
||||
certification_expiries: List[Dict[str, Any]]
|
||||
|
||||
# Financial impact
|
||||
total_procurement_value: Decimal
|
||||
cost_savings_achieved: Decimal
|
||||
cost_avoidance: Decimal
|
||||
financial_risk_exposure: Decimal
|
||||
|
||||
|
||||
class AlertSummary(BaseModel):
|
||||
"""Alert summary schema"""
|
||||
alert_type: AlertType
|
||||
severity: AlertSeverity
|
||||
count: int
|
||||
avg_resolution_time_hours: Optional[float] = None
|
||||
oldest_alert_age_hours: Optional[float] = None
|
||||
trend_percentage: Optional[float] = None
|
||||
|
||||
|
||||
class DashboardFilter(BaseModel):
|
||||
"""Dashboard filter schema"""
|
||||
supplier_ids: Optional[List[UUID]] = None
|
||||
supplier_categories: Optional[List[str]] = None
|
||||
performance_categories: Optional[List[str]] = None
|
||||
date_from: Optional[datetime] = None
|
||||
date_to: Optional[datetime] = None
|
||||
include_inactive: bool = False
|
||||
|
||||
|
||||
class AlertFilter(BaseModel):
|
||||
"""Alert filter schema"""
|
||||
alert_types: Optional[List[AlertType]] = None
|
||||
severities: Optional[List[AlertSeverity]] = None
|
||||
statuses: Optional[List[AlertStatus]] = None
|
||||
supplier_ids: Optional[List[UUID]] = None
|
||||
date_from: Optional[datetime] = None
|
||||
date_to: Optional[datetime] = None
|
||||
metric_types: Optional[List[PerformanceMetricType]] = None
|
||||
|
||||
|
||||
# ===== Business Model Detection =====
|
||||
|
||||
class BusinessModelInsights(BaseModel):
|
||||
"""Business model detection and insights schema"""
|
||||
detected_model: str # individual_bakery, central_bakery, hybrid
|
||||
confidence_score: float
|
||||
model_characteristics: Dict[str, Any]
|
||||
|
||||
# Model-specific metrics
|
||||
supplier_diversity_score: float
|
||||
procurement_volume_patterns: Dict[str, Any]
|
||||
delivery_frequency_patterns: Dict[str, Any]
|
||||
order_size_patterns: Dict[str, Any]
|
||||
|
||||
# Recommendations
|
||||
optimization_opportunities: List[Dict[str, Any]]
|
||||
recommended_supplier_mix: Dict[str, Any]
|
||||
cost_optimization_potential: Decimal
|
||||
risk_mitigation_suggestions: List[str]
|
||||
|
||||
# Benchmarking
|
||||
industry_comparison: Dict[str, float]
|
||||
peer_comparison: Optional[Dict[str, float]] = None
|
||||
|
||||
|
||||
# ===== Export and Reporting =====
|
||||
|
||||
class PerformanceReportRequest(BaseModel):
|
||||
"""Performance report generation request"""
|
||||
report_type: str # scorecard, analytics, alerts, comprehensive
|
||||
format: str = Field(pattern="^(pdf|excel|csv|json)$")
|
||||
period: PerformancePeriod
|
||||
date_from: datetime
|
||||
date_to: datetime
|
||||
supplier_ids: Optional[List[UUID]] = None
|
||||
include_charts: bool = True
|
||||
include_recommendations: bool = True
|
||||
include_benchmarks: bool = True
|
||||
custom_metrics: Optional[List[str]] = None
|
||||
|
||||
|
||||
class ExportDataResponse(BaseModel):
|
||||
"""Export data response schema"""
|
||||
export_id: UUID
|
||||
format: str
|
||||
file_url: Optional[str] = None
|
||||
file_size_bytes: Optional[int] = None
|
||||
generated_at: datetime
|
||||
expires_at: datetime
|
||||
status: str # generating, ready, expired, failed
|
||||
error_message: Optional[str] = None
|
||||
@@ -1 +1,19 @@
|
||||
# services/suppliers/app/services/__init__.py
|
||||
# services/suppliers/app/services/__init__.py
|
||||
"""
|
||||
Services package for the Supplier service
|
||||
"""
|
||||
|
||||
from .supplier_service import SupplierService
|
||||
from .purchase_order_service import PurchaseOrderService
|
||||
from .delivery_service import DeliveryService
|
||||
from .performance_service import PerformanceTrackingService, AlertService
|
||||
from .dashboard_service import DashboardService
|
||||
|
||||
__all__ = [
|
||||
'SupplierService',
|
||||
'PurchaseOrderService',
|
||||
'DeliveryService',
|
||||
'PerformanceTrackingService',
|
||||
'AlertService',
|
||||
'DashboardService'
|
||||
]
|
||||
624
services/suppliers/app/services/dashboard_service.py
Normal file
624
services/suppliers/app/services/dashboard_service.py
Normal file
@@ -0,0 +1,624 @@
|
||||
# ================================================================
|
||||
# services/suppliers/app/services/dashboard_service.py
|
||||
# ================================================================
|
||||
"""
|
||||
Supplier Dashboard and Analytics Service
|
||||
Comprehensive supplier performance dashboards and business intelligence
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import List, Optional, Dict, Any
|
||||
from uuid import UUID
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, func, and_, or_, desc, asc, text
|
||||
from decimal import Decimal
|
||||
import structlog
|
||||
|
||||
from app.models.suppliers import (
|
||||
Supplier, PurchaseOrder, Delivery, SupplierQualityReview,
|
||||
SupplierStatus, SupplierType, PurchaseOrderStatus, DeliveryStatus
|
||||
)
|
||||
from app.models.performance import (
|
||||
SupplierPerformanceMetric, SupplierScorecard, SupplierAlert,
|
||||
PerformanceMetricType, PerformancePeriod, AlertSeverity, AlertStatus
|
||||
)
|
||||
from app.schemas.performance import (
|
||||
PerformanceDashboardSummary, SupplierPerformanceInsights,
|
||||
PerformanceAnalytics, BusinessModelInsights, AlertSummary
|
||||
)
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class DashboardService:
|
||||
"""Service for supplier performance dashboards and analytics"""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = logger.bind(service="dashboard_service")
|
||||
|
||||
async def get_performance_dashboard_summary(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID,
|
||||
date_from: Optional[datetime] = None,
|
||||
date_to: Optional[datetime] = None
|
||||
) -> PerformanceDashboardSummary:
|
||||
"""Get comprehensive performance dashboard summary"""
|
||||
try:
|
||||
# Default date range - last 30 days
|
||||
if not date_to:
|
||||
date_to = datetime.now(timezone.utc)
|
||||
if not date_from:
|
||||
date_from = date_to - timedelta(days=30)
|
||||
|
||||
self.logger.info("Generating dashboard summary",
|
||||
tenant_id=str(tenant_id),
|
||||
date_from=date_from.isoformat(),
|
||||
date_to=date_to.isoformat())
|
||||
|
||||
# Get supplier statistics
|
||||
supplier_stats = await self._get_supplier_statistics(db, tenant_id)
|
||||
|
||||
# Get performance statistics
|
||||
performance_stats = await self._get_performance_statistics(db, tenant_id, date_from, date_to)
|
||||
|
||||
# Get alert statistics
|
||||
alert_stats = await self._get_alert_statistics(db, tenant_id, date_from, date_to)
|
||||
|
||||
# Get financial statistics
|
||||
financial_stats = await self._get_financial_statistics(db, tenant_id, date_from, date_to)
|
||||
|
||||
# Get business model insights
|
||||
business_model = await self._detect_business_model(db, tenant_id)
|
||||
|
||||
# Calculate trends
|
||||
trends = await self._calculate_performance_trends(db, tenant_id, date_from, date_to)
|
||||
|
||||
return PerformanceDashboardSummary(
|
||||
total_suppliers=supplier_stats['total_suppliers'],
|
||||
active_suppliers=supplier_stats['active_suppliers'],
|
||||
suppliers_above_threshold=performance_stats['above_threshold'],
|
||||
suppliers_below_threshold=performance_stats['below_threshold'],
|
||||
average_overall_score=performance_stats['avg_overall_score'],
|
||||
average_delivery_rate=performance_stats['avg_delivery_rate'],
|
||||
average_quality_rate=performance_stats['avg_quality_rate'],
|
||||
total_active_alerts=alert_stats['total_active'],
|
||||
critical_alerts=alert_stats['critical_alerts'],
|
||||
high_priority_alerts=alert_stats['high_priority'],
|
||||
recent_scorecards_generated=performance_stats['recent_scorecards'],
|
||||
cost_savings_this_month=financial_stats['cost_savings'],
|
||||
performance_trend=trends['performance_trend'],
|
||||
delivery_trend=trends['delivery_trend'],
|
||||
quality_trend=trends['quality_trend'],
|
||||
detected_business_model=business_model['model'],
|
||||
model_confidence=business_model['confidence'],
|
||||
business_model_metrics=business_model['metrics']
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error("Error generating dashboard summary", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_supplier_performance_insights(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID,
|
||||
supplier_id: UUID,
|
||||
days_back: int = 30
|
||||
) -> SupplierPerformanceInsights:
|
||||
"""Get detailed performance insights for a specific supplier"""
|
||||
try:
|
||||
date_to = datetime.now(timezone.utc)
|
||||
date_from = date_to - timedelta(days=days_back)
|
||||
|
||||
# Get supplier info
|
||||
supplier = await self._get_supplier_info(db, supplier_id, tenant_id)
|
||||
|
||||
# Get current performance metrics
|
||||
current_metrics = await self._get_current_performance_metrics(db, supplier_id, tenant_id)
|
||||
|
||||
# Get previous period metrics for comparison
|
||||
previous_metrics = await self._get_previous_performance_metrics(db, supplier_id, tenant_id, days_back)
|
||||
|
||||
# Get recent activity statistics
|
||||
activity_stats = await self._get_supplier_activity_stats(db, supplier_id, tenant_id, date_from, date_to)
|
||||
|
||||
# Get alert summary
|
||||
alert_summary = await self._get_supplier_alert_summary(db, supplier_id, tenant_id, date_from, date_to)
|
||||
|
||||
# Calculate performance categorization
|
||||
performance_category = self._categorize_performance(current_metrics.get('overall_score', 0))
|
||||
risk_level = self._assess_risk_level(current_metrics, alert_summary)
|
||||
|
||||
# Generate recommendations
|
||||
recommendations = await self._generate_supplier_recommendations(
|
||||
db, supplier_id, tenant_id, current_metrics, activity_stats, alert_summary
|
||||
)
|
||||
|
||||
return SupplierPerformanceInsights(
|
||||
supplier_id=supplier_id,
|
||||
supplier_name=supplier['name'],
|
||||
current_overall_score=current_metrics.get('overall_score', 0),
|
||||
previous_score=previous_metrics.get('overall_score'),
|
||||
score_change_percentage=self._calculate_change_percentage(
|
||||
current_metrics.get('overall_score', 0),
|
||||
previous_metrics.get('overall_score')
|
||||
),
|
||||
performance_rank=current_metrics.get('rank'),
|
||||
delivery_performance=current_metrics.get('delivery_performance', 0),
|
||||
quality_performance=current_metrics.get('quality_performance', 0),
|
||||
cost_performance=current_metrics.get('cost_performance', 0),
|
||||
service_performance=current_metrics.get('service_performance', 0),
|
||||
orders_last_30_days=activity_stats['orders_count'],
|
||||
average_delivery_time=activity_stats['avg_delivery_time'],
|
||||
quality_issues_count=activity_stats['quality_issues'],
|
||||
cost_variance=activity_stats['cost_variance'],
|
||||
active_alerts=alert_summary['active_count'],
|
||||
resolved_alerts_last_30_days=alert_summary['resolved_count'],
|
||||
alert_trend=alert_summary['trend'],
|
||||
performance_category=performance_category,
|
||||
risk_level=risk_level,
|
||||
top_strengths=recommendations['strengths'],
|
||||
improvement_priorities=recommendations['improvements'],
|
||||
recommended_actions=recommendations['actions']
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error("Error generating supplier insights",
|
||||
supplier_id=str(supplier_id),
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def get_performance_analytics(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID,
|
||||
period_days: int = 90
|
||||
) -> PerformanceAnalytics:
|
||||
"""Get advanced performance analytics"""
|
||||
try:
|
||||
date_to = datetime.now(timezone.utc)
|
||||
date_from = date_to - timedelta(days=period_days)
|
||||
|
||||
# Get performance distribution
|
||||
performance_distribution = await self._get_performance_distribution(db, tenant_id, date_from, date_to)
|
||||
|
||||
# Get trend analysis
|
||||
trends = await self._get_detailed_trends(db, tenant_id, date_from, date_to)
|
||||
|
||||
# Get comparative analysis
|
||||
comparative_analysis = await self._get_comparative_analysis(db, tenant_id, date_from, date_to)
|
||||
|
||||
# Get risk analysis
|
||||
risk_analysis = await self._get_risk_analysis(db, tenant_id, date_from, date_to)
|
||||
|
||||
# Get financial impact
|
||||
financial_impact = await self._get_financial_impact(db, tenant_id, date_from, date_to)
|
||||
|
||||
return PerformanceAnalytics(
|
||||
period_start=date_from,
|
||||
period_end=date_to,
|
||||
total_suppliers_analyzed=performance_distribution['total_suppliers'],
|
||||
performance_distribution=performance_distribution['distribution'],
|
||||
score_ranges=performance_distribution['score_ranges'],
|
||||
overall_trend=trends['overall'],
|
||||
delivery_trends=trends['delivery'],
|
||||
quality_trends=trends['quality'],
|
||||
cost_trends=trends['cost'],
|
||||
top_performers=comparative_analysis['top_performers'],
|
||||
underperformers=comparative_analysis['underperformers'],
|
||||
most_improved=comparative_analysis['most_improved'],
|
||||
biggest_declines=comparative_analysis['biggest_declines'],
|
||||
high_risk_suppliers=risk_analysis['high_risk'],
|
||||
contract_renewals_due=risk_analysis['contract_renewals'],
|
||||
certification_expiries=risk_analysis['certification_expiries'],
|
||||
total_procurement_value=financial_impact['total_value'],
|
||||
cost_savings_achieved=financial_impact['cost_savings'],
|
||||
cost_avoidance=financial_impact['cost_avoidance'],
|
||||
financial_risk_exposure=financial_impact['risk_exposure']
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error("Error generating performance analytics", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_business_model_insights(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID
|
||||
) -> BusinessModelInsights:
|
||||
"""Get business model detection and insights"""
|
||||
try:
|
||||
# Analyze supplier patterns
|
||||
supplier_patterns = await self._analyze_supplier_patterns(db, tenant_id)
|
||||
|
||||
# Detect business model
|
||||
business_model = await self._detect_business_model_detailed(db, tenant_id)
|
||||
|
||||
# Generate optimization recommendations
|
||||
optimization = await self._generate_optimization_recommendations(db, tenant_id, business_model)
|
||||
|
||||
# Get benchmarking data
|
||||
benchmarking = await self._get_benchmarking_data(db, tenant_id, business_model['model'])
|
||||
|
||||
return BusinessModelInsights(
|
||||
detected_model=business_model['model'],
|
||||
confidence_score=business_model['confidence'],
|
||||
model_characteristics=business_model['characteristics'],
|
||||
supplier_diversity_score=supplier_patterns['diversity_score'],
|
||||
procurement_volume_patterns=supplier_patterns['volume_patterns'],
|
||||
delivery_frequency_patterns=supplier_patterns['delivery_patterns'],
|
||||
order_size_patterns=supplier_patterns['order_size_patterns'],
|
||||
optimization_opportunities=optimization['opportunities'],
|
||||
recommended_supplier_mix=optimization['supplier_mix'],
|
||||
cost_optimization_potential=optimization['cost_potential'],
|
||||
risk_mitigation_suggestions=optimization['risk_mitigation'],
|
||||
industry_comparison=benchmarking['industry'],
|
||||
peer_comparison=benchmarking.get('peer')
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error("Error generating business model insights", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_alert_summary(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID,
|
||||
date_from: Optional[datetime] = None,
|
||||
date_to: Optional[datetime] = None
|
||||
) -> List[AlertSummary]:
|
||||
"""Get alert summary by type and severity"""
|
||||
try:
|
||||
if not date_to:
|
||||
date_to = datetime.now(timezone.utc)
|
||||
if not date_from:
|
||||
date_from = date_to - timedelta(days=30)
|
||||
|
||||
query = select(
|
||||
SupplierAlert.alert_type,
|
||||
SupplierAlert.severity,
|
||||
func.count(SupplierAlert.id).label('count'),
|
||||
func.avg(
|
||||
func.extract('epoch', SupplierAlert.resolved_at - SupplierAlert.triggered_at) / 3600
|
||||
).label('avg_resolution_hours'),
|
||||
func.max(
|
||||
func.extract('epoch', func.current_timestamp() - SupplierAlert.triggered_at) / 3600
|
||||
).label('oldest_age_hours')
|
||||
).where(
|
||||
and_(
|
||||
SupplierAlert.tenant_id == tenant_id,
|
||||
SupplierAlert.triggered_at >= date_from,
|
||||
SupplierAlert.triggered_at <= date_to
|
||||
)
|
||||
).group_by(SupplierAlert.alert_type, SupplierAlert.severity)
|
||||
|
||||
result = await db.execute(query)
|
||||
rows = result.all()
|
||||
|
||||
alert_summaries = []
|
||||
for row in rows:
|
||||
alert_summaries.append(AlertSummary(
|
||||
alert_type=row.alert_type,
|
||||
severity=row.severity,
|
||||
count=row.count,
|
||||
avg_resolution_time_hours=row.avg_resolution_hours,
|
||||
oldest_alert_age_hours=row.oldest_age_hours
|
||||
))
|
||||
|
||||
return alert_summaries
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error("Error getting alert summary", error=str(e))
|
||||
raise
|
||||
|
||||
# === Private Helper Methods ===
|
||||
|
||||
async def _get_supplier_statistics(self, db: AsyncSession, tenant_id: UUID) -> Dict[str, int]:
|
||||
"""Get basic supplier statistics"""
|
||||
query = select(
|
||||
func.count(Supplier.id).label('total_suppliers'),
|
||||
func.count(Supplier.id.filter(Supplier.status == SupplierStatus.ACTIVE)).label('active_suppliers')
|
||||
).where(Supplier.tenant_id == tenant_id)
|
||||
|
||||
result = await db.execute(query)
|
||||
row = result.first()
|
||||
|
||||
return {
|
||||
'total_suppliers': row.total_suppliers or 0,
|
||||
'active_suppliers': row.active_suppliers or 0
|
||||
}
|
||||
|
||||
async def _get_performance_statistics(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID,
|
||||
date_from: datetime,
|
||||
date_to: datetime
|
||||
) -> Dict[str, Any]:
|
||||
"""Get performance statistics"""
|
||||
# Get recent performance metrics
|
||||
query = select(
|
||||
func.avg(SupplierPerformanceMetric.metric_value).label('avg_score'),
|
||||
func.count(
|
||||
SupplierPerformanceMetric.id.filter(
|
||||
SupplierPerformanceMetric.metric_value >= settings.GOOD_DELIVERY_RATE
|
||||
)
|
||||
).label('above_threshold'),
|
||||
func.count(
|
||||
SupplierPerformanceMetric.id.filter(
|
||||
SupplierPerformanceMetric.metric_value < settings.GOOD_DELIVERY_RATE
|
||||
)
|
||||
).label('below_threshold')
|
||||
).where(
|
||||
and_(
|
||||
SupplierPerformanceMetric.tenant_id == tenant_id,
|
||||
SupplierPerformanceMetric.calculated_at >= date_from,
|
||||
SupplierPerformanceMetric.calculated_at <= date_to,
|
||||
SupplierPerformanceMetric.metric_type == PerformanceMetricType.DELIVERY_PERFORMANCE
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(query)
|
||||
row = result.first()
|
||||
|
||||
# Get quality statistics
|
||||
quality_query = select(
|
||||
func.avg(SupplierPerformanceMetric.metric_value).label('avg_quality')
|
||||
).where(
|
||||
and_(
|
||||
SupplierPerformanceMetric.tenant_id == tenant_id,
|
||||
SupplierPerformanceMetric.calculated_at >= date_from,
|
||||
SupplierPerformanceMetric.calculated_at <= date_to,
|
||||
SupplierPerformanceMetric.metric_type == PerformanceMetricType.QUALITY_SCORE
|
||||
)
|
||||
)
|
||||
|
||||
quality_result = await db.execute(quality_query)
|
||||
quality_row = quality_result.first()
|
||||
|
||||
# Get scorecard count
|
||||
scorecard_query = select(func.count(SupplierScorecard.id)).where(
|
||||
and_(
|
||||
SupplierScorecard.tenant_id == tenant_id,
|
||||
SupplierScorecard.generated_at >= date_from,
|
||||
SupplierScorecard.generated_at <= date_to
|
||||
)
|
||||
)
|
||||
|
||||
scorecard_result = await db.execute(scorecard_query)
|
||||
scorecard_count = scorecard_result.scalar() or 0
|
||||
|
||||
return {
|
||||
'avg_overall_score': row.avg_score or 0,
|
||||
'above_threshold': row.above_threshold or 0,
|
||||
'below_threshold': row.below_threshold or 0,
|
||||
'avg_delivery_rate': row.avg_score or 0,
|
||||
'avg_quality_rate': quality_row.avg_quality or 0,
|
||||
'recent_scorecards': scorecard_count
|
||||
}
|
||||
|
||||
async def _get_alert_statistics(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID,
|
||||
date_from: datetime,
|
||||
date_to: datetime
|
||||
) -> Dict[str, int]:
|
||||
"""Get alert statistics"""
|
||||
query = select(
|
||||
func.count(SupplierAlert.id.filter(SupplierAlert.status == AlertStatus.ACTIVE)).label('total_active'),
|
||||
func.count(SupplierAlert.id.filter(SupplierAlert.severity == AlertSeverity.CRITICAL)).label('critical'),
|
||||
func.count(SupplierAlert.id.filter(SupplierAlert.priority_score >= 70)).label('high_priority')
|
||||
).where(
|
||||
and_(
|
||||
SupplierAlert.tenant_id == tenant_id,
|
||||
SupplierAlert.triggered_at >= date_from,
|
||||
SupplierAlert.triggered_at <= date_to
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(query)
|
||||
row = result.first()
|
||||
|
||||
return {
|
||||
'total_active': row.total_active or 0,
|
||||
'critical_alerts': row.critical or 0,
|
||||
'high_priority': row.high_priority or 0
|
||||
}
|
||||
|
||||
async def _get_financial_statistics(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID,
|
||||
date_from: datetime,
|
||||
date_to: datetime
|
||||
) -> Dict[str, Decimal]:
|
||||
"""Get financial statistics"""
|
||||
# For now, return placeholder values
|
||||
# TODO: Implement cost savings calculation when pricing data is available
|
||||
return {
|
||||
'cost_savings': Decimal('0')
|
||||
}
|
||||
|
||||
async def _detect_business_model(self, db: AsyncSession, tenant_id: UUID) -> Dict[str, Any]:
|
||||
"""Detect business model based on supplier patterns"""
|
||||
# Get supplier count by category
|
||||
query = select(
|
||||
func.count(Supplier.id).label('total_suppliers'),
|
||||
func.count(Supplier.id.filter(Supplier.supplier_type == SupplierType.INGREDIENTS)).label('ingredient_suppliers')
|
||||
).where(
|
||||
and_(
|
||||
Supplier.tenant_id == tenant_id,
|
||||
Supplier.status == SupplierStatus.ACTIVE
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(query)
|
||||
row = result.first()
|
||||
|
||||
total_suppliers = row.total_suppliers or 0
|
||||
ingredient_suppliers = row.ingredient_suppliers or 0
|
||||
|
||||
# Simple business model detection logic
|
||||
if total_suppliers >= settings.CENTRAL_BAKERY_THRESHOLD_SUPPLIERS:
|
||||
model = "central_bakery"
|
||||
confidence = 0.85
|
||||
elif total_suppliers >= settings.INDIVIDUAL_BAKERY_THRESHOLD_SUPPLIERS:
|
||||
model = "individual_bakery"
|
||||
confidence = 0.75
|
||||
else:
|
||||
model = "small_bakery"
|
||||
confidence = 0.60
|
||||
|
||||
return {
|
||||
'model': model,
|
||||
'confidence': confidence,
|
||||
'metrics': {
|
||||
'total_suppliers': total_suppliers,
|
||||
'ingredient_suppliers': ingredient_suppliers,
|
||||
'supplier_diversity': ingredient_suppliers / max(total_suppliers, 1)
|
||||
}
|
||||
}
|
||||
|
||||
async def _calculate_performance_trends(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID,
|
||||
date_from: datetime,
|
||||
date_to: datetime
|
||||
) -> Dict[str, str]:
|
||||
"""Calculate performance trends"""
|
||||
# For now, return stable trends
|
||||
# TODO: Implement trend calculation based on historical data
|
||||
return {
|
||||
'performance_trend': 'stable',
|
||||
'delivery_trend': 'stable',
|
||||
'quality_trend': 'stable'
|
||||
}
|
||||
|
||||
def _categorize_performance(self, score: float) -> str:
|
||||
"""Categorize performance based on score"""
|
||||
if score >= settings.EXCELLENT_DELIVERY_RATE:
|
||||
return "excellent"
|
||||
elif score >= settings.GOOD_DELIVERY_RATE:
|
||||
return "good"
|
||||
elif score >= settings.ACCEPTABLE_DELIVERY_RATE:
|
||||
return "acceptable"
|
||||
elif score >= settings.POOR_DELIVERY_RATE:
|
||||
return "needs_improvement"
|
||||
else:
|
||||
return "poor"
|
||||
|
||||
def _assess_risk_level(self, metrics: Dict[str, Any], alerts: Dict[str, Any]) -> str:
|
||||
"""Assess risk level based on metrics and alerts"""
|
||||
if alerts.get('active_count', 0) > 3 or metrics.get('overall_score', 0) < 50:
|
||||
return "critical"
|
||||
elif alerts.get('active_count', 0) > 1 or metrics.get('overall_score', 0) < 70:
|
||||
return "high"
|
||||
elif alerts.get('active_count', 0) > 0 or metrics.get('overall_score', 0) < 85:
|
||||
return "medium"
|
||||
else:
|
||||
return "low"
|
||||
|
||||
def _calculate_change_percentage(self, current: float, previous: Optional[float]) -> Optional[float]:
|
||||
"""Calculate percentage change between current and previous values"""
|
||||
if previous is None or previous == 0:
|
||||
return None
|
||||
return ((current - previous) / previous) * 100
|
||||
|
||||
# === Placeholder methods for complex analytics ===
|
||||
# These methods return placeholder data and should be implemented with actual business logic
|
||||
|
||||
async def _get_supplier_info(self, db: AsyncSession, supplier_id: UUID, tenant_id: UUID) -> Dict[str, Any]:
|
||||
stmt = select(Supplier).where(and_(Supplier.id == supplier_id, Supplier.tenant_id == tenant_id))
|
||||
result = await db.execute(stmt)
|
||||
supplier = result.scalar_one_or_none()
|
||||
return {'name': supplier.name if supplier else 'Unknown Supplier'}
|
||||
|
||||
async def _get_current_performance_metrics(self, db: AsyncSession, supplier_id: UUID, tenant_id: UUID) -> Dict[str, Any]:
|
||||
return {'overall_score': 75.0, 'delivery_performance': 80.0, 'quality_performance': 85.0, 'cost_performance': 70.0, 'service_performance': 75.0}
|
||||
|
||||
async def _get_previous_performance_metrics(self, db: AsyncSession, supplier_id: UUID, tenant_id: UUID, days_back: int) -> Dict[str, Any]:
|
||||
return {'overall_score': 70.0}
|
||||
|
||||
async def _get_supplier_activity_stats(self, db: AsyncSession, supplier_id: UUID, tenant_id: UUID, date_from: datetime, date_to: datetime) -> Dict[str, Any]:
|
||||
return {'orders_count': 15, 'avg_delivery_time': 3.2, 'quality_issues': 2, 'cost_variance': 5.5}
|
||||
|
||||
async def _get_supplier_alert_summary(self, db: AsyncSession, supplier_id: UUID, tenant_id: UUID, date_from: datetime, date_to: datetime) -> Dict[str, Any]:
|
||||
return {'active_count': 1, 'resolved_count': 3, 'trend': 'improving'}
|
||||
|
||||
async def _generate_supplier_recommendations(self, db: AsyncSession, supplier_id: UUID, tenant_id: UUID, metrics: Dict[str, Any], activity: Dict[str, Any], alerts: Dict[str, Any]) -> Dict[str, Any]:
|
||||
return {
|
||||
'strengths': ['Consistent quality', 'Reliable delivery'],
|
||||
'improvements': ['Cost optimization', 'Communication'],
|
||||
'actions': [{'action': 'Negotiate better pricing', 'priority': 'high'}]
|
||||
}
|
||||
|
||||
async def _get_performance_distribution(self, db: AsyncSession, tenant_id: UUID, date_from: datetime, date_to: datetime) -> Dict[str, Any]:
|
||||
return {
|
||||
'total_suppliers': 25,
|
||||
'distribution': {'excellent': 5, 'good': 12, 'acceptable': 6, 'poor': 2},
|
||||
'score_ranges': {'excellent': [95, 100, 97.5], 'good': [80, 94, 87.0]}
|
||||
}
|
||||
|
||||
async def _get_detailed_trends(self, db: AsyncSession, tenant_id: UUID, date_from: datetime, date_to: datetime) -> Dict[str, Any]:
|
||||
return {
|
||||
'overall': {'month_over_month': 2.5},
|
||||
'delivery': {'month_over_month': 1.8},
|
||||
'quality': {'month_over_month': 3.2},
|
||||
'cost': {'month_over_month': -1.5}
|
||||
}
|
||||
|
||||
async def _get_comparative_analysis(self, db: AsyncSession, tenant_id: UUID, date_from: datetime, date_to: datetime) -> Dict[str, Any]:
|
||||
return {
|
||||
'top_performers': [],
|
||||
'underperformers': [],
|
||||
'most_improved': [],
|
||||
'biggest_declines': []
|
||||
}
|
||||
|
||||
async def _get_risk_analysis(self, db: AsyncSession, tenant_id: UUID, date_from: datetime, date_to: datetime) -> Dict[str, Any]:
|
||||
return {
|
||||
'high_risk': [],
|
||||
'contract_renewals': [],
|
||||
'certification_expiries': []
|
||||
}
|
||||
|
||||
async def _get_financial_impact(self, db: AsyncSession, tenant_id: UUID, date_from: datetime, date_to: datetime) -> Dict[str, Any]:
|
||||
return {
|
||||
'total_value': Decimal('150000'),
|
||||
'cost_savings': Decimal('5000'),
|
||||
'cost_avoidance': Decimal('2000'),
|
||||
'risk_exposure': Decimal('10000')
|
||||
}
|
||||
|
||||
async def _analyze_supplier_patterns(self, db: AsyncSession, tenant_id: UUID) -> Dict[str, Any]:
|
||||
return {
|
||||
'diversity_score': 75.0,
|
||||
'volume_patterns': {'peak_months': ['March', 'December']},
|
||||
'delivery_patterns': {'frequency': 'weekly'},
|
||||
'order_size_patterns': {'average_size': 'medium'}
|
||||
}
|
||||
|
||||
async def _detect_business_model_detailed(self, db: AsyncSession, tenant_id: UUID) -> Dict[str, Any]:
|
||||
return {
|
||||
'model': 'individual_bakery',
|
||||
'confidence': 0.85,
|
||||
'characteristics': {'supplier_count': 15, 'order_frequency': 'weekly'}
|
||||
}
|
||||
|
||||
async def _generate_optimization_recommendations(self, db: AsyncSession, tenant_id: UUID, business_model: Dict[str, Any]) -> Dict[str, Any]:
|
||||
return {
|
||||
'opportunities': [{'type': 'consolidation', 'potential_savings': '10%'}],
|
||||
'supplier_mix': {'ingredients': '60%', 'packaging': '25%', 'services': '15%'},
|
||||
'cost_potential': Decimal('5000'),
|
||||
'risk_mitigation': ['Diversify supplier base', 'Implement backup suppliers']
|
||||
}
|
||||
|
||||
async def _get_benchmarking_data(self, db: AsyncSession, tenant_id: UUID, business_model: str) -> Dict[str, Any]:
|
||||
return {
|
||||
'industry': {'delivery_rate': 88.5, 'quality_score': 91.2},
|
||||
'peer': {'delivery_rate': 86.8, 'quality_score': 89.5}
|
||||
}
|
||||
662
services/suppliers/app/services/performance_service.py
Normal file
662
services/suppliers/app/services/performance_service.py
Normal file
@@ -0,0 +1,662 @@
|
||||
# ================================================================
|
||||
# services/suppliers/app/services/performance_service.py
|
||||
# ================================================================
|
||||
"""
|
||||
Supplier Performance Tracking Service
|
||||
Comprehensive supplier performance calculation, tracking, and analytics
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import List, Optional, Dict, Any, Tuple
|
||||
from uuid import UUID
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, func, and_, or_, desc, asc
|
||||
from sqlalchemy.orm import selectinload
|
||||
import structlog
|
||||
from decimal import Decimal
|
||||
|
||||
from app.models.suppliers import (
|
||||
Supplier, PurchaseOrder, Delivery, SupplierQualityReview,
|
||||
PurchaseOrderStatus, DeliveryStatus, QualityRating, DeliveryRating
|
||||
)
|
||||
from app.models.performance import (
|
||||
SupplierPerformanceMetric, SupplierScorecard, SupplierAlert,
|
||||
PerformanceMetricType, PerformancePeriod, AlertType, AlertSeverity,
|
||||
AlertStatus
|
||||
)
|
||||
from app.schemas.performance import (
|
||||
PerformanceMetricCreate, ScorecardCreate, AlertCreate,
|
||||
PerformanceDashboardSummary, SupplierPerformanceInsights,
|
||||
PerformanceAnalytics, BusinessModelInsights
|
||||
)
|
||||
from app.core.config import settings
|
||||
from shared.database.transactions import transactional
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class PerformanceTrackingService:
|
||||
"""Service for tracking and calculating supplier performance metrics"""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = logger.bind(service="performance_tracking")
|
||||
|
||||
@transactional
|
||||
async def calculate_supplier_performance(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier_id: UUID,
|
||||
tenant_id: UUID,
|
||||
period: PerformancePeriod,
|
||||
period_start: datetime,
|
||||
period_end: datetime
|
||||
) -> SupplierPerformanceMetric:
|
||||
"""Calculate comprehensive performance metrics for a supplier"""
|
||||
try:
|
||||
self.logger.info("Calculating supplier performance",
|
||||
supplier_id=str(supplier_id),
|
||||
period=period.value,
|
||||
period_start=period_start.isoformat(),
|
||||
period_end=period_end.isoformat())
|
||||
|
||||
# Get base data for calculations
|
||||
orders_data = await self._get_orders_data(db, supplier_id, tenant_id, period_start, period_end)
|
||||
deliveries_data = await self._get_deliveries_data(db, supplier_id, tenant_id, period_start, period_end)
|
||||
quality_data = await self._get_quality_data(db, supplier_id, tenant_id, period_start, period_end)
|
||||
|
||||
# Calculate delivery performance
|
||||
delivery_performance = await self._calculate_delivery_performance(
|
||||
orders_data, deliveries_data
|
||||
)
|
||||
|
||||
# Calculate quality performance
|
||||
quality_performance = await self._calculate_quality_performance(
|
||||
deliveries_data, quality_data
|
||||
)
|
||||
|
||||
# Calculate cost performance
|
||||
cost_performance = await self._calculate_cost_performance(
|
||||
orders_data, deliveries_data
|
||||
)
|
||||
|
||||
# Calculate service performance
|
||||
service_performance = await self._calculate_service_performance(
|
||||
orders_data, quality_data
|
||||
)
|
||||
|
||||
# Calculate overall performance (weighted average)
|
||||
overall_performance = (
|
||||
delivery_performance * 0.30 +
|
||||
quality_performance * 0.30 +
|
||||
cost_performance * 0.20 +
|
||||
service_performance * 0.20
|
||||
)
|
||||
|
||||
# Create performance metrics for each category
|
||||
performance_metrics = []
|
||||
|
||||
metrics_to_create = [
|
||||
(PerformanceMetricType.DELIVERY_PERFORMANCE, delivery_performance),
|
||||
(PerformanceMetricType.QUALITY_SCORE, quality_performance),
|
||||
(PerformanceMetricType.PRICE_COMPETITIVENESS, cost_performance),
|
||||
(PerformanceMetricType.COMMUNICATION_RATING, service_performance)
|
||||
]
|
||||
|
||||
for metric_type, value in metrics_to_create:
|
||||
# Get previous period value for trend calculation
|
||||
previous_value = await self._get_previous_period_value(
|
||||
db, supplier_id, tenant_id, metric_type, period, period_start
|
||||
)
|
||||
|
||||
# Calculate trend
|
||||
trend_direction, trend_percentage = self._calculate_trend(value, previous_value)
|
||||
|
||||
# Prepare detailed metrics data
|
||||
metrics_data = await self._prepare_detailed_metrics(
|
||||
metric_type, orders_data, deliveries_data, quality_data
|
||||
)
|
||||
|
||||
# Create performance metric
|
||||
metric_create = PerformanceMetricCreate(
|
||||
supplier_id=supplier_id,
|
||||
metric_type=metric_type,
|
||||
period=period,
|
||||
period_start=period_start,
|
||||
period_end=period_end,
|
||||
metric_value=value,
|
||||
target_value=self._get_target_value(metric_type),
|
||||
total_orders=orders_data.get('total_orders', 0),
|
||||
total_deliveries=deliveries_data.get('total_deliveries', 0),
|
||||
on_time_deliveries=deliveries_data.get('on_time_deliveries', 0),
|
||||
late_deliveries=deliveries_data.get('late_deliveries', 0),
|
||||
quality_issues=quality_data.get('quality_issues', 0),
|
||||
total_amount=orders_data.get('total_amount', Decimal('0')),
|
||||
metrics_data=metrics_data
|
||||
)
|
||||
|
||||
performance_metric = SupplierPerformanceMetric(
|
||||
tenant_id=tenant_id,
|
||||
supplier_id=supplier_id,
|
||||
metric_type=metric_create.metric_type,
|
||||
period=metric_create.period,
|
||||
period_start=metric_create.period_start,
|
||||
period_end=metric_create.period_end,
|
||||
metric_value=metric_create.metric_value,
|
||||
target_value=metric_create.target_value,
|
||||
previous_value=previous_value,
|
||||
total_orders=metric_create.total_orders,
|
||||
total_deliveries=metric_create.total_deliveries,
|
||||
on_time_deliveries=metric_create.on_time_deliveries,
|
||||
late_deliveries=metric_create.late_deliveries,
|
||||
quality_issues=metric_create.quality_issues,
|
||||
total_amount=metric_create.total_amount,
|
||||
metrics_data=metric_create.metrics_data,
|
||||
trend_direction=trend_direction,
|
||||
trend_percentage=trend_percentage,
|
||||
calculated_at=datetime.now(timezone.utc)
|
||||
)
|
||||
|
||||
db.add(performance_metric)
|
||||
performance_metrics.append(performance_metric)
|
||||
|
||||
await db.flush()
|
||||
|
||||
# Update supplier's overall performance ratings
|
||||
await self._update_supplier_ratings(db, supplier_id, overall_performance, quality_performance)
|
||||
|
||||
self.logger.info("Supplier performance calculated successfully",
|
||||
supplier_id=str(supplier_id),
|
||||
overall_performance=overall_performance)
|
||||
|
||||
# Return the overall performance metric
|
||||
return performance_metrics[0] if performance_metrics else None
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error("Error calculating supplier performance",
|
||||
supplier_id=str(supplier_id),
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def _get_orders_data(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier_id: UUID,
|
||||
tenant_id: UUID,
|
||||
period_start: datetime,
|
||||
period_end: datetime
|
||||
) -> Dict[str, Any]:
|
||||
"""Get orders data for performance calculation"""
|
||||
query = select(
|
||||
func.count(PurchaseOrder.id).label('total_orders'),
|
||||
func.sum(PurchaseOrder.total_amount).label('total_amount'),
|
||||
func.avg(PurchaseOrder.total_amount).label('avg_order_value'),
|
||||
func.count(
|
||||
PurchaseOrder.id.filter(
|
||||
PurchaseOrder.status == PurchaseOrderStatus.COMPLETED
|
||||
)
|
||||
).label('completed_orders')
|
||||
).where(
|
||||
and_(
|
||||
PurchaseOrder.supplier_id == supplier_id,
|
||||
PurchaseOrder.tenant_id == tenant_id,
|
||||
PurchaseOrder.order_date >= period_start,
|
||||
PurchaseOrder.order_date <= period_end
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(query)
|
||||
row = result.first()
|
||||
|
||||
return {
|
||||
'total_orders': row.total_orders or 0,
|
||||
'total_amount': row.total_amount or Decimal('0'),
|
||||
'avg_order_value': row.avg_order_value or Decimal('0'),
|
||||
'completed_orders': row.completed_orders or 0
|
||||
}
|
||||
|
||||
async def _get_deliveries_data(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier_id: UUID,
|
||||
tenant_id: UUID,
|
||||
period_start: datetime,
|
||||
period_end: datetime
|
||||
) -> Dict[str, Any]:
|
||||
"""Get deliveries data for performance calculation"""
|
||||
# Get delivery statistics
|
||||
query = select(
|
||||
func.count(Delivery.id).label('total_deliveries'),
|
||||
func.count(
|
||||
Delivery.id.filter(
|
||||
and_(
|
||||
Delivery.actual_arrival <= Delivery.scheduled_date,
|
||||
Delivery.status == DeliveryStatus.DELIVERED
|
||||
)
|
||||
)
|
||||
).label('on_time_deliveries'),
|
||||
func.count(
|
||||
Delivery.id.filter(
|
||||
and_(
|
||||
Delivery.actual_arrival > Delivery.scheduled_date,
|
||||
Delivery.status == DeliveryStatus.DELIVERED
|
||||
)
|
||||
)
|
||||
).label('late_deliveries'),
|
||||
func.avg(
|
||||
func.extract('epoch', Delivery.actual_arrival - Delivery.scheduled_date) / 3600
|
||||
).label('avg_delay_hours')
|
||||
).where(
|
||||
and_(
|
||||
Delivery.supplier_id == supplier_id,
|
||||
Delivery.tenant_id == tenant_id,
|
||||
Delivery.scheduled_date >= period_start,
|
||||
Delivery.scheduled_date <= period_end,
|
||||
Delivery.status.in_([DeliveryStatus.DELIVERED, DeliveryStatus.PARTIALLY_DELIVERED])
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(query)
|
||||
row = result.first()
|
||||
|
||||
return {
|
||||
'total_deliveries': row.total_deliveries or 0,
|
||||
'on_time_deliveries': row.on_time_deliveries or 0,
|
||||
'late_deliveries': row.late_deliveries or 0,
|
||||
'avg_delay_hours': row.avg_delay_hours or 0
|
||||
}
|
||||
|
||||
async def _get_quality_data(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier_id: UUID,
|
||||
tenant_id: UUID,
|
||||
period_start: datetime,
|
||||
period_end: datetime
|
||||
) -> Dict[str, Any]:
|
||||
"""Get quality data for performance calculation"""
|
||||
query = select(
|
||||
func.count(SupplierQualityReview.id).label('total_reviews'),
|
||||
func.avg(
|
||||
func.cast(SupplierQualityReview.quality_rating, func.Float)
|
||||
).label('avg_quality_rating'),
|
||||
func.avg(
|
||||
func.cast(SupplierQualityReview.delivery_rating, func.Float)
|
||||
).label('avg_delivery_rating'),
|
||||
func.avg(SupplierQualityReview.communication_rating).label('avg_communication_rating'),
|
||||
func.count(
|
||||
SupplierQualityReview.id.filter(
|
||||
SupplierQualityReview.quality_issues.isnot(None)
|
||||
)
|
||||
).label('quality_issues')
|
||||
).where(
|
||||
and_(
|
||||
SupplierQualityReview.supplier_id == supplier_id,
|
||||
SupplierQualityReview.tenant_id == tenant_id,
|
||||
SupplierQualityReview.review_date >= period_start,
|
||||
SupplierQualityReview.review_date <= period_end
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(query)
|
||||
row = result.first()
|
||||
|
||||
return {
|
||||
'total_reviews': row.total_reviews or 0,
|
||||
'avg_quality_rating': row.avg_quality_rating or 0,
|
||||
'avg_delivery_rating': row.avg_delivery_rating or 0,
|
||||
'avg_communication_rating': row.avg_communication_rating or 0,
|
||||
'quality_issues': row.quality_issues or 0
|
||||
}
|
||||
|
||||
async def _calculate_delivery_performance(
|
||||
self,
|
||||
orders_data: Dict[str, Any],
|
||||
deliveries_data: Dict[str, Any]
|
||||
) -> float:
|
||||
"""Calculate delivery performance score (0-100)"""
|
||||
total_deliveries = deliveries_data.get('total_deliveries', 0)
|
||||
if total_deliveries == 0:
|
||||
return 0.0
|
||||
|
||||
on_time_deliveries = deliveries_data.get('on_time_deliveries', 0)
|
||||
on_time_rate = (on_time_deliveries / total_deliveries) * 100
|
||||
|
||||
# Apply penalty for average delay
|
||||
avg_delay_hours = deliveries_data.get('avg_delay_hours', 0)
|
||||
delay_penalty = min(avg_delay_hours * 2, 20) # Max 20 point penalty
|
||||
|
||||
performance_score = max(on_time_rate - delay_penalty, 0)
|
||||
return min(performance_score, 100.0)
|
||||
|
||||
async def _calculate_quality_performance(
|
||||
self,
|
||||
deliveries_data: Dict[str, Any],
|
||||
quality_data: Dict[str, Any]
|
||||
) -> float:
|
||||
"""Calculate quality performance score (0-100)"""
|
||||
total_reviews = quality_data.get('total_reviews', 0)
|
||||
if total_reviews == 0:
|
||||
return 50.0 # Default score when no reviews
|
||||
|
||||
# Base quality score from ratings
|
||||
avg_quality_rating = quality_data.get('avg_quality_rating', 0)
|
||||
base_score = (avg_quality_rating / 5.0) * 100
|
||||
|
||||
# Apply penalty for quality issues
|
||||
quality_issues = quality_data.get('quality_issues', 0)
|
||||
issue_penalty = min(quality_issues * 5, 30) # Max 30 point penalty
|
||||
|
||||
performance_score = max(base_score - issue_penalty, 0)
|
||||
return min(performance_score, 100.0)
|
||||
|
||||
async def _calculate_cost_performance(
|
||||
self,
|
||||
orders_data: Dict[str, Any],
|
||||
deliveries_data: Dict[str, Any]
|
||||
) -> float:
|
||||
"""Calculate cost performance score (0-100)"""
|
||||
# For now, return a baseline score
|
||||
# In future, implement price comparison with market rates
|
||||
return 75.0
|
||||
|
||||
async def _calculate_service_performance(
|
||||
self,
|
||||
orders_data: Dict[str, Any],
|
||||
quality_data: Dict[str, Any]
|
||||
) -> float:
|
||||
"""Calculate service performance score (0-100)"""
|
||||
total_reviews = quality_data.get('total_reviews', 0)
|
||||
if total_reviews == 0:
|
||||
return 50.0 # Default score when no reviews
|
||||
|
||||
avg_communication_rating = quality_data.get('avg_communication_rating', 0)
|
||||
return (avg_communication_rating / 5.0) * 100
|
||||
|
||||
def _calculate_trend(self, current_value: float, previous_value: Optional[float]) -> Tuple[Optional[str], Optional[float]]:
|
||||
"""Calculate performance trend"""
|
||||
if previous_value is None or previous_value == 0:
|
||||
return None, None
|
||||
|
||||
change_percentage = ((current_value - previous_value) / previous_value) * 100
|
||||
|
||||
if abs(change_percentage) < 2: # Less than 2% change considered stable
|
||||
trend_direction = "stable"
|
||||
elif change_percentage > 0:
|
||||
trend_direction = "improving"
|
||||
else:
|
||||
trend_direction = "declining"
|
||||
|
||||
return trend_direction, change_percentage
|
||||
|
||||
async def _get_previous_period_value(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier_id: UUID,
|
||||
tenant_id: UUID,
|
||||
metric_type: PerformanceMetricType,
|
||||
period: PerformancePeriod,
|
||||
current_period_start: datetime
|
||||
) -> Optional[float]:
|
||||
"""Get the previous period's value for trend calculation"""
|
||||
# Calculate previous period dates
|
||||
if period == PerformancePeriod.DAILY:
|
||||
previous_start = current_period_start - timedelta(days=1)
|
||||
previous_end = current_period_start
|
||||
elif period == PerformancePeriod.WEEKLY:
|
||||
previous_start = current_period_start - timedelta(weeks=1)
|
||||
previous_end = current_period_start
|
||||
elif period == PerformancePeriod.MONTHLY:
|
||||
previous_start = current_period_start - timedelta(days=30)
|
||||
previous_end = current_period_start
|
||||
elif period == PerformancePeriod.QUARTERLY:
|
||||
previous_start = current_period_start - timedelta(days=90)
|
||||
previous_end = current_period_start
|
||||
else: # YEARLY
|
||||
previous_start = current_period_start - timedelta(days=365)
|
||||
previous_end = current_period_start
|
||||
|
||||
query = select(SupplierPerformanceMetric.metric_value).where(
|
||||
and_(
|
||||
SupplierPerformanceMetric.supplier_id == supplier_id,
|
||||
SupplierPerformanceMetric.tenant_id == tenant_id,
|
||||
SupplierPerformanceMetric.metric_type == metric_type,
|
||||
SupplierPerformanceMetric.period == period,
|
||||
SupplierPerformanceMetric.period_start >= previous_start,
|
||||
SupplierPerformanceMetric.period_start < previous_end
|
||||
)
|
||||
).order_by(desc(SupplierPerformanceMetric.period_start)).limit(1)
|
||||
|
||||
result = await db.execute(query)
|
||||
row = result.first()
|
||||
return row[0] if row else None
|
||||
|
||||
def _get_target_value(self, metric_type: PerformanceMetricType) -> float:
|
||||
"""Get target value for metric type"""
|
||||
targets = {
|
||||
PerformanceMetricType.DELIVERY_PERFORMANCE: settings.GOOD_DELIVERY_RATE,
|
||||
PerformanceMetricType.QUALITY_SCORE: settings.GOOD_QUALITY_RATE,
|
||||
PerformanceMetricType.PRICE_COMPETITIVENESS: 80.0,
|
||||
PerformanceMetricType.COMMUNICATION_RATING: 80.0,
|
||||
PerformanceMetricType.ORDER_ACCURACY: 95.0,
|
||||
PerformanceMetricType.RESPONSE_TIME: 90.0,
|
||||
PerformanceMetricType.COMPLIANCE_SCORE: 95.0,
|
||||
PerformanceMetricType.FINANCIAL_STABILITY: 85.0
|
||||
}
|
||||
return targets.get(metric_type, 80.0)
|
||||
|
||||
async def _prepare_detailed_metrics(
|
||||
self,
|
||||
metric_type: PerformanceMetricType,
|
||||
orders_data: Dict[str, Any],
|
||||
deliveries_data: Dict[str, Any],
|
||||
quality_data: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Prepare detailed metrics breakdown"""
|
||||
if metric_type == PerformanceMetricType.DELIVERY_PERFORMANCE:
|
||||
return {
|
||||
"on_time_rate": (deliveries_data.get('on_time_deliveries', 0) /
|
||||
max(deliveries_data.get('total_deliveries', 1), 1)) * 100,
|
||||
"avg_delay_hours": deliveries_data.get('avg_delay_hours', 0),
|
||||
"late_delivery_count": deliveries_data.get('late_deliveries', 0)
|
||||
}
|
||||
elif metric_type == PerformanceMetricType.QUALITY_SCORE:
|
||||
return {
|
||||
"avg_quality_rating": quality_data.get('avg_quality_rating', 0),
|
||||
"quality_issues_count": quality_data.get('quality_issues', 0),
|
||||
"total_reviews": quality_data.get('total_reviews', 0)
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
async def _update_supplier_ratings(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier_id: UUID,
|
||||
overall_performance: float,
|
||||
quality_performance: float
|
||||
) -> None:
|
||||
"""Update supplier's overall ratings"""
|
||||
stmt = select(Supplier).where(Supplier.id == supplier_id)
|
||||
result = await db.execute(stmt)
|
||||
supplier = result.scalar_one_or_none()
|
||||
|
||||
if supplier:
|
||||
supplier.quality_rating = quality_performance / 20 # Convert to 1-5 scale
|
||||
supplier.delivery_rating = overall_performance / 20 # Convert to 1-5 scale
|
||||
db.add(supplier)
|
||||
|
||||
|
||||
class AlertService:
|
||||
"""Service for managing supplier alerts"""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = logger.bind(service="alert_service")
|
||||
|
||||
@transactional
|
||||
async def evaluate_performance_alerts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
tenant_id: UUID,
|
||||
supplier_id: Optional[UUID] = None
|
||||
) -> List[SupplierAlert]:
|
||||
"""Evaluate and create performance-based alerts"""
|
||||
try:
|
||||
alerts_created = []
|
||||
|
||||
# Get suppliers to evaluate
|
||||
if supplier_id:
|
||||
supplier_filter = and_(Supplier.id == supplier_id, Supplier.tenant_id == tenant_id)
|
||||
else:
|
||||
supplier_filter = and_(Supplier.tenant_id == tenant_id, Supplier.status == "active")
|
||||
|
||||
stmt = select(Supplier).where(supplier_filter)
|
||||
result = await db.execute(stmt)
|
||||
suppliers = result.scalars().all()
|
||||
|
||||
for supplier in suppliers:
|
||||
# Get recent performance metrics
|
||||
recent_metrics = await self._get_recent_performance_metrics(db, supplier.id, tenant_id)
|
||||
|
||||
# Evaluate delivery performance alerts
|
||||
delivery_alerts = await self._evaluate_delivery_alerts(db, supplier, recent_metrics)
|
||||
alerts_created.extend(delivery_alerts)
|
||||
|
||||
# Evaluate quality alerts
|
||||
quality_alerts = await self._evaluate_quality_alerts(db, supplier, recent_metrics)
|
||||
alerts_created.extend(quality_alerts)
|
||||
|
||||
# Evaluate cost variance alerts
|
||||
cost_alerts = await self._evaluate_cost_alerts(db, supplier, recent_metrics)
|
||||
alerts_created.extend(cost_alerts)
|
||||
|
||||
return alerts_created
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error("Error evaluating performance alerts", error=str(e))
|
||||
raise
|
||||
|
||||
async def _get_recent_performance_metrics(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier_id: UUID,
|
||||
tenant_id: UUID
|
||||
) -> Dict[PerformanceMetricType, SupplierPerformanceMetric]:
|
||||
"""Get recent performance metrics for a supplier"""
|
||||
query = select(SupplierPerformanceMetric).where(
|
||||
and_(
|
||||
SupplierPerformanceMetric.supplier_id == supplier_id,
|
||||
SupplierPerformanceMetric.tenant_id == tenant_id,
|
||||
SupplierPerformanceMetric.calculated_at >= datetime.now(timezone.utc) - timedelta(days=7)
|
||||
)
|
||||
).order_by(desc(SupplierPerformanceMetric.calculated_at))
|
||||
|
||||
result = await db.execute(query)
|
||||
metrics = result.scalars().all()
|
||||
|
||||
# Return the most recent metric for each type
|
||||
metrics_dict = {}
|
||||
for metric in metrics:
|
||||
if metric.metric_type not in metrics_dict:
|
||||
metrics_dict[metric.metric_type] = metric
|
||||
|
||||
return metrics_dict
|
||||
|
||||
async def _evaluate_delivery_alerts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier: Supplier,
|
||||
metrics: Dict[PerformanceMetricType, SupplierPerformanceMetric]
|
||||
) -> List[SupplierAlert]:
|
||||
"""Evaluate delivery performance alerts"""
|
||||
alerts = []
|
||||
|
||||
delivery_metric = metrics.get(PerformanceMetricType.DELIVERY_PERFORMANCE)
|
||||
if not delivery_metric:
|
||||
return alerts
|
||||
|
||||
# Poor delivery performance alert
|
||||
if delivery_metric.metric_value < settings.POOR_DELIVERY_RATE:
|
||||
severity = AlertSeverity.CRITICAL if delivery_metric.metric_value < 70 else AlertSeverity.HIGH
|
||||
|
||||
alert = SupplierAlert(
|
||||
tenant_id=supplier.tenant_id,
|
||||
supplier_id=supplier.id,
|
||||
alert_type=AlertType.POOR_QUALITY,
|
||||
severity=severity,
|
||||
title=f"Poor Delivery Performance - {supplier.name}",
|
||||
message=f"Delivery performance has dropped to {delivery_metric.metric_value:.1f}%",
|
||||
description=f"Supplier {supplier.name} delivery performance is below acceptable threshold",
|
||||
trigger_value=delivery_metric.metric_value,
|
||||
threshold_value=settings.POOR_DELIVERY_RATE,
|
||||
metric_type=PerformanceMetricType.DELIVERY_PERFORMANCE,
|
||||
performance_metric_id=delivery_metric.id,
|
||||
priority_score=90 if severity == AlertSeverity.CRITICAL else 70,
|
||||
business_impact="high" if severity == AlertSeverity.CRITICAL else "medium",
|
||||
recommended_actions=[
|
||||
{"action": "Review delivery processes with supplier"},
|
||||
{"action": "Request delivery improvement plan"},
|
||||
{"action": "Consider alternative suppliers"}
|
||||
]
|
||||
)
|
||||
|
||||
db.add(alert)
|
||||
alerts.append(alert)
|
||||
|
||||
return alerts
|
||||
|
||||
async def _evaluate_quality_alerts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier: Supplier,
|
||||
metrics: Dict[PerformanceMetricType, SupplierPerformanceMetric]
|
||||
) -> List[SupplierAlert]:
|
||||
"""Evaluate quality performance alerts"""
|
||||
alerts = []
|
||||
|
||||
quality_metric = metrics.get(PerformanceMetricType.QUALITY_SCORE)
|
||||
if not quality_metric:
|
||||
return alerts
|
||||
|
||||
# Poor quality performance alert
|
||||
if quality_metric.metric_value < settings.POOR_QUALITY_RATE:
|
||||
severity = AlertSeverity.CRITICAL if quality_metric.metric_value < 70 else AlertSeverity.HIGH
|
||||
|
||||
alert = SupplierAlert(
|
||||
tenant_id=supplier.tenant_id,
|
||||
supplier_id=supplier.id,
|
||||
alert_type=AlertType.POOR_QUALITY,
|
||||
severity=severity,
|
||||
title=f"Poor Quality Performance - {supplier.name}",
|
||||
message=f"Quality performance has dropped to {quality_metric.metric_value:.1f}%",
|
||||
description=f"Supplier {supplier.name} quality performance is below acceptable threshold",
|
||||
trigger_value=quality_metric.metric_value,
|
||||
threshold_value=settings.POOR_QUALITY_RATE,
|
||||
metric_type=PerformanceMetricType.QUALITY_SCORE,
|
||||
performance_metric_id=quality_metric.id,
|
||||
priority_score=95 if severity == AlertSeverity.CRITICAL else 75,
|
||||
business_impact="high" if severity == AlertSeverity.CRITICAL else "medium",
|
||||
recommended_actions=[
|
||||
{"action": "Conduct quality audit with supplier"},
|
||||
{"action": "Request quality improvement plan"},
|
||||
{"action": "Increase incoming inspection frequency"}
|
||||
]
|
||||
)
|
||||
|
||||
db.add(alert)
|
||||
alerts.append(alert)
|
||||
|
||||
return alerts
|
||||
|
||||
async def _evaluate_cost_alerts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
supplier: Supplier,
|
||||
metrics: Dict[PerformanceMetricType, SupplierPerformanceMetric]
|
||||
) -> List[SupplierAlert]:
|
||||
"""Evaluate cost variance alerts"""
|
||||
alerts = []
|
||||
|
||||
# For now, return empty list - cost analysis requires market data
|
||||
# TODO: Implement cost variance analysis when price benchmarks are available
|
||||
|
||||
return alerts
|
||||
Reference in New Issue
Block a user