Imporve enterprise
This commit is contained in:
341
services/distribution/app/api/vrp_optimization.py
Normal file
341
services/distribution/app/api/vrp_optimization.py
Normal file
@@ -0,0 +1,341 @@
|
||||
"""
|
||||
VRP Optimization API Endpoints
|
||||
Endpoints for VRP optimization and metrics retrieval
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from typing import List, Dict, Any, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
|
||||
from app.services.vrp_optimization_service import VRPOptimizationService
|
||||
from app.services.distribution_service import DistributionService
|
||||
from shared.auth.tenant_access import verify_tenant_permission_dep
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# Pydantic models for request/response
|
||||
class VRPOptimizationRequest(BaseModel):
|
||||
algorithm_version: str = Field(default="v2.1", description="VRP algorithm version to use")
|
||||
constraints: Optional[Dict[str, Any]] = Field(
|
||||
None,
|
||||
description="Optimization constraints: max_route_duration, max_route_distance, etc."
|
||||
)
|
||||
|
||||
|
||||
class VRPOptimizationResponse(BaseModel):
|
||||
success: bool
|
||||
route_id: str
|
||||
optimization_savings: Dict[str, Any]
|
||||
vrp_algorithm_version: str
|
||||
vrp_optimization_timestamp: str
|
||||
vrp_constraints_satisfied: bool
|
||||
vrp_objective_value: float
|
||||
|
||||
|
||||
class RouteOptimizationMetrics(BaseModel):
|
||||
route_id: str
|
||||
route_number: str
|
||||
route_date: str
|
||||
vrp_optimization_savings: Optional[Dict[str, Any]]
|
||||
vrp_algorithm_version: Optional[str]
|
||||
vrp_optimization_timestamp: Optional[str]
|
||||
vrp_constraints_satisfied: Optional[bool]
|
||||
vrp_objective_value: Optional[float]
|
||||
total_distance_km: Optional[float]
|
||||
estimated_duration_minutes: Optional[int]
|
||||
|
||||
|
||||
class NetworkOptimizationSummary(BaseModel):
|
||||
total_routes: int
|
||||
optimized_routes: int
|
||||
total_distance_saved_km: float
|
||||
total_time_saved_minutes: float
|
||||
total_fuel_saved_liters: float
|
||||
total_co2_saved_kg: float
|
||||
total_cost_saved_eur: float
|
||||
optimization_rate: float
|
||||
average_savings_per_route: Optional[Dict[str, Any]]
|
||||
|
||||
|
||||
class OptimizationHistoryItem(BaseModel):
|
||||
optimization_id: str
|
||||
route_id: str
|
||||
timestamp: str
|
||||
algorithm_version: str
|
||||
distance_saved_km: float
|
||||
time_saved_minutes: float
|
||||
fuel_saved_liters: float
|
||||
co2_saved_kg: float
|
||||
cost_saved_eur: float
|
||||
constraints_satisfied: bool
|
||||
|
||||
|
||||
async def get_vrp_optimization_service() -> VRPOptimizationService:
|
||||
"""Dependency injection for VRPOptimizationService"""
|
||||
from app.core.database import database_manager
|
||||
from app.services.distribution_service import DistributionService as BusinessDistributionService
|
||||
from app.repositories.delivery_route_repository import DeliveryRouteRepository
|
||||
from app.repositories.shipment_repository import ShipmentRepository
|
||||
from app.repositories.delivery_schedule_repository import DeliveryScheduleRepository
|
||||
from shared.clients.tenant_client import TenantServiceClient
|
||||
from shared.clients.inventory_client import InventoryServiceClient
|
||||
from shared.clients.procurement_client import ProcurementServiceClient
|
||||
from app.services.routing_optimizer import RoutingOptimizer
|
||||
|
||||
# Create the business distribution service with proper dependencies
|
||||
route_repository = DeliveryRouteRepository(database_manager.get_session())
|
||||
shipment_repository = ShipmentRepository(database_manager.get_session())
|
||||
schedule_repository = DeliveryScheduleRepository(database_manager.get_session())
|
||||
|
||||
# Create client instances (these will be initialized with proper config)
|
||||
tenant_client = TenantServiceClient()
|
||||
inventory_client = InventoryServiceClient()
|
||||
procurement_client = ProcurementServiceClient()
|
||||
routing_optimizer = RoutingOptimizer()
|
||||
|
||||
distribution_service = BusinessDistributionService(
|
||||
route_repository=route_repository,
|
||||
shipment_repository=shipment_repository,
|
||||
schedule_repository=schedule_repository,
|
||||
procurement_client=procurement_client,
|
||||
tenant_client=tenant_client,
|
||||
inventory_client=inventory_client,
|
||||
routing_optimizer=routing_optimizer
|
||||
)
|
||||
|
||||
return VRPOptimizationService(distribution_service, database_manager)
|
||||
|
||||
|
||||
@router.post("/tenants/{tenant_id}/routes/{route_id}/optimize",
|
||||
response_model=VRPOptimizationResponse,
|
||||
summary="Optimize delivery route with VRP")
|
||||
async def optimize_route_with_vrp(
|
||||
tenant_id: str,
|
||||
route_id: str,
|
||||
optimization_request: VRPOptimizationRequest,
|
||||
vrp_service: VRPOptimizationService = Depends(get_vrp_optimization_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Optimize a delivery route using VRP algorithm
|
||||
|
||||
This endpoint applies VRP optimization to a specific delivery route and stores
|
||||
the optimization metrics for analysis and reporting.
|
||||
"""
|
||||
try:
|
||||
result = await vrp_service.optimize_route_with_vrp(
|
||||
route_id=route_id,
|
||||
algorithm_version=optimization_request.algorithm_version,
|
||||
constraints=optimization_request.constraints
|
||||
)
|
||||
|
||||
if not result.get('success'):
|
||||
raise HTTPException(status_code=500, detail="Optimization failed")
|
||||
|
||||
return VRPOptimizationResponse(
|
||||
success=True,
|
||||
route_id=result['route_id'],
|
||||
optimization_savings=result['optimization_savings'],
|
||||
vrp_algorithm_version=result['optimization_savings'].get('algorithm_version', optimization_request.algorithm_version),
|
||||
vrp_optimization_timestamp=result['optimization_savings'].get('timestamp', datetime.now().isoformat()),
|
||||
vrp_constraints_satisfied=result['optimization_savings'].get('constraints_satisfied', True),
|
||||
vrp_objective_value=result['optimization_savings'].get('objective_value', 0.0)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("VRP optimization failed", tenant_id=tenant_id, route_id=route_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"VRP optimization failed: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/routes/{route_id}/optimization-metrics",
|
||||
response_model=RouteOptimizationMetrics,
|
||||
summary="Get VRP optimization metrics for route")
|
||||
async def get_route_optimization_metrics(
|
||||
tenant_id: str,
|
||||
route_id: str,
|
||||
vrp_service: VRPOptimizationService = Depends(get_vrp_optimization_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get VRP optimization metrics for a specific route
|
||||
|
||||
Retrieves stored optimization metrics including savings, algorithm version,
|
||||
and constraint satisfaction status.
|
||||
"""
|
||||
try:
|
||||
metrics = await vrp_service.get_route_optimization_metrics(route_id)
|
||||
return RouteOptimizationMetrics(**metrics)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get route optimization metrics", tenant_id=tenant_id, route_id=route_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get optimization metrics: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/vrp/optimization-summary",
|
||||
response_model=NetworkOptimizationSummary,
|
||||
summary="Get network-wide VRP optimization summary")
|
||||
async def get_network_optimization_summary(
|
||||
tenant_id: str,
|
||||
vrp_service: VRPOptimizationService = Depends(get_vrp_optimization_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get aggregated VRP optimization metrics across all routes
|
||||
|
||||
Provides network-wide summary of optimization benefits including
|
||||
total savings, optimization rate, and average improvements.
|
||||
"""
|
||||
try:
|
||||
summary = await vrp_service.get_network_optimization_summary(tenant_id)
|
||||
return NetworkOptimizationSummary(**summary)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get network optimization summary", tenant_id=tenant_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get optimization summary: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/tenants/{tenant_id}/vrp/batch-optimize",
|
||||
summary="Batch optimize multiple routes")
|
||||
async def batch_optimize_routes(
|
||||
tenant_id: str,
|
||||
route_ids: List[str] = Query(..., description="List of route IDs to optimize"),
|
||||
algorithm_version: str = Query("v2.1", description="VRP algorithm version"),
|
||||
vrp_service: VRPOptimizationService = Depends(get_vrp_optimization_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Batch optimize multiple delivery routes with VRP
|
||||
|
||||
Applies VRP optimization to multiple routes in a single request.
|
||||
"""
|
||||
try:
|
||||
result = await vrp_service.batch_optimize_routes(tenant_id, route_ids)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'total_routes_processed': result['total_routes_processed'],
|
||||
'successful_optimizations': result['successful_optimizations'],
|
||||
'failed_optimizations': result['failed_optimizations'],
|
||||
'results': result['results']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Batch optimization failed", tenant_id=tenant_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Batch optimization failed: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/routes/{route_id}/optimization-history",
|
||||
response_model=List[OptimizationHistoryItem],
|
||||
summary="Get optimization history for route")
|
||||
async def get_optimization_history(
|
||||
tenant_id: str,
|
||||
route_id: str,
|
||||
limit: int = Query(10, description="Maximum number of historical records to return"),
|
||||
vrp_service: VRPOptimizationService = Depends(get_vrp_optimization_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get historical optimization records for a route
|
||||
|
||||
Retrieves past optimization runs and their results for analysis.
|
||||
"""
|
||||
try:
|
||||
history = await vrp_service.get_optimization_history(route_id, limit)
|
||||
return [OptimizationHistoryItem(**item) for item in history]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get optimization history", tenant_id=tenant_id, route_id=route_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get optimization history: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/vrp/constraints/validate",
|
||||
summary="Validate VRP constraints")
|
||||
async def validate_vrp_constraints(
|
||||
tenant_id: str,
|
||||
route_id: str,
|
||||
max_route_duration: Optional[int] = Query(None, description="Maximum route duration in minutes"),
|
||||
max_route_distance: Optional[float] = Query(None, description="Maximum route distance in km"),
|
||||
vrp_service: VRPOptimizationService = Depends(get_vrp_optimization_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Validate VRP constraints against a route
|
||||
|
||||
Checks if a route satisfies specified VRP constraints.
|
||||
"""
|
||||
try:
|
||||
from app.services.vrp_optimization_service import VRPConstraintValidator
|
||||
|
||||
# Get route data
|
||||
route = await vrp_service.distribution_service.get_delivery_route(route_id)
|
||||
|
||||
if not route:
|
||||
raise HTTPException(status_code=404, detail="Route not found")
|
||||
|
||||
# Build constraints dict
|
||||
constraints = {}
|
||||
if max_route_duration is not None:
|
||||
constraints['max_route_duration'] = max_route_duration
|
||||
if max_route_distance is not None:
|
||||
constraints['max_route_distance'] = max_route_distance
|
||||
|
||||
# Validate constraints
|
||||
validation_result = VRPConstraintValidator.validate_constraints(route, constraints)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'all_constraints_satisfied': validation_result['all_satisfied'],
|
||||
'constraint_violations': validation_result['constraint_violations']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to validate VRP constraints", tenant_id=tenant_id, route_id=route_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to validate constraints: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/tenants/{tenant_id}/vrp/simulate",
|
||||
summary="Simulate VRP optimization")
|
||||
async def simulate_vrp_optimization(
|
||||
tenant_id: str,
|
||||
route_id: str,
|
||||
vrp_service: VRPOptimizationService = Depends(get_vrp_optimization_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Simulate VRP optimization without saving results
|
||||
|
||||
Useful for testing and previewing optimization results.
|
||||
"""
|
||||
try:
|
||||
from app.services.vrp_optimization_service import VRPOptimizationSimulator
|
||||
|
||||
# Get route data
|
||||
route = await vrp_service.distribution_service.get_delivery_route(route_id)
|
||||
|
||||
if not route:
|
||||
raise HTTPException(status_code=404, detail="Route not found")
|
||||
|
||||
# Simulate optimization
|
||||
simulation_result = VRPOptimizationSimulator.simulate_optimization(route)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'original_route': simulation_result['original_route'],
|
||||
'optimized_route': simulation_result['optimized_route'],
|
||||
'optimization_savings': simulation_result['optimization_savings'],
|
||||
'algorithm_version': simulation_result['algorithm_version'],
|
||||
'constraints_satisfied': simulation_result['constraints_satisfied'],
|
||||
'objective_value': simulation_result['objective_value']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("VRP simulation failed", tenant_id=tenant_id, route_id=route_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"VRP simulation failed: {str(e)}")
|
||||
|
||||
|
||||
# Import datetime at runtime to avoid circular imports
|
||||
from datetime import datetime
|
||||
@@ -9,6 +9,7 @@ from app.core.database import database_manager
|
||||
from app.api.routes import router as distribution_router
|
||||
from app.api.shipments import router as shipments_router
|
||||
from app.api.internal_demo import router as internal_demo_router
|
||||
from app.api.vrp_optimization import router as vrp_optimization_router
|
||||
from shared.service_base import StandardFastAPIService
|
||||
|
||||
|
||||
@@ -122,4 +123,5 @@ service.setup_standard_endpoints()
|
||||
# Note: Routes now use RouteBuilder which includes full paths, so no prefix needed
|
||||
service.add_router(distribution_router, tags=["distribution"])
|
||||
service.add_router(shipments_router, tags=["shipments"])
|
||||
service.add_router(internal_demo_router, tags=["internal-demo"])
|
||||
service.add_router(internal_demo_router, tags=["internal-demo"])
|
||||
service.add_router(vrp_optimization_router, tags=["vrp-optimization"])
|
||||
@@ -58,6 +58,13 @@ class DeliveryRoute(Base):
|
||||
total_distance_km = Column(Float, nullable=True)
|
||||
estimated_duration_minutes = Column(Integer, nullable=True)
|
||||
|
||||
# VRP Optimization metrics (Phase 2 enhancement)
|
||||
vrp_optimization_savings = Column(JSONB, nullable=True) # {"distance_saved_km": 12.5, "time_saved_minutes": 25, "fuel_saved_liters": 8.2, "co2_saved_kg": 15.4, "cost_saved_eur": 12.50}
|
||||
vrp_algorithm_version = Column(String(50), nullable=True) # Version of VRP algorithm used
|
||||
vrp_optimization_timestamp = Column(DateTime(timezone=True), nullable=True) # When optimization was performed
|
||||
vrp_constraints_satisfied = Column(Boolean, nullable=True) # Whether all constraints were satisfied
|
||||
vrp_objective_value = Column(Float, nullable=True) # Objective function value from optimization
|
||||
|
||||
# Route details
|
||||
route_sequence = Column(JSONB, nullable=True) # Ordered array of stops with timing: [{"stop_number": 1, "location_id": "...", "estimated_arrival": "...", "actual_arrival": "..."}]
|
||||
notes = Column(Text, nullable=True)
|
||||
|
||||
@@ -231,4 +231,82 @@ class DeliveryRouteRepository:
|
||||
await self.db_session.commit()
|
||||
|
||||
deleted_count = result.rowcount
|
||||
return deleted_count
|
||||
return deleted_count
|
||||
|
||||
async def update_route_vrp_metrics(self, route_id: str, vrp_metrics: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Update VRP optimization metrics for a route
|
||||
"""
|
||||
stmt = select(DeliveryRoute).where(DeliveryRoute.id == route_id)
|
||||
result = await self.db_session.execute(stmt)
|
||||
route = result.scalar_one_or_none()
|
||||
|
||||
if not route:
|
||||
return None
|
||||
|
||||
# Update VRP metrics fields
|
||||
route.vrp_optimization_savings = vrp_metrics.get('vrp_optimization_savings')
|
||||
route.vrp_algorithm_version = vrp_metrics.get('vrp_algorithm_version')
|
||||
route.vrp_optimization_timestamp = vrp_metrics.get('vrp_optimization_timestamp')
|
||||
route.vrp_constraints_satisfied = vrp_metrics.get('vrp_constraints_satisfied')
|
||||
route.vrp_objective_value = vrp_metrics.get('vrp_objective_value')
|
||||
|
||||
await self.db_session.commit()
|
||||
await self.db_session.refresh(route)
|
||||
|
||||
return {
|
||||
'id': str(route.id),
|
||||
'vrp_optimization_savings': route.vrp_optimization_savings,
|
||||
'vrp_algorithm_version': route.vrp_algorithm_version,
|
||||
'vrp_optimization_timestamp': route.vrp_optimization_timestamp,
|
||||
'vrp_constraints_satisfied': route.vrp_constraints_satisfied,
|
||||
'vrp_objective_value': route.vrp_objective_value
|
||||
}
|
||||
|
||||
async def get_routes_by_tenant(self, tenant_id: str, limit: int = None, offset: int = None, order_by: str = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get all routes for a specific tenant with pagination and ordering
|
||||
"""
|
||||
stmt = select(DeliveryRoute).where(DeliveryRoute.tenant_id == tenant_id)
|
||||
|
||||
# Apply ordering if specified
|
||||
if order_by:
|
||||
if 'vrp_optimization_timestamp' in order_by:
|
||||
if 'DESC' in order_by:
|
||||
stmt = stmt.order_by(DeliveryRoute.vrp_optimization_timestamp.desc())
|
||||
else:
|
||||
stmt = stmt.order_by(DeliveryRoute.vrp_optimization_timestamp.asc())
|
||||
elif 'route_date' in order_by:
|
||||
if 'DESC' in order_by:
|
||||
stmt = stmt.order_by(DeliveryRoute.route_date.desc())
|
||||
else:
|
||||
stmt = stmt.order_by(DeliveryRoute.route_date.asc())
|
||||
|
||||
# Apply pagination if specified
|
||||
if limit is not None:
|
||||
stmt = stmt.limit(limit)
|
||||
if offset is not None:
|
||||
stmt = stmt.offset(offset)
|
||||
|
||||
result = await self.db_session.execute(stmt)
|
||||
routes = result.scalars().all()
|
||||
|
||||
return [{
|
||||
'id': str(route.id),
|
||||
'tenant_id': str(route.tenant_id),
|
||||
'route_number': route.route_number,
|
||||
'route_date': route.route_date,
|
||||
'vehicle_id': route.vehicle_id,
|
||||
'driver_id': route.driver_id,
|
||||
'total_distance_km': route.total_distance_km,
|
||||
'estimated_duration_minutes': route.estimated_duration_minutes,
|
||||
'route_sequence': route.route_sequence,
|
||||
'status': route.status.value if hasattr(route.status, 'value') else route.status,
|
||||
'created_at': route.created_at,
|
||||
'updated_at': route.updated_at,
|
||||
'vrp_optimization_savings': route.vrp_optimization_savings,
|
||||
'vrp_algorithm_version': route.vrp_algorithm_version,
|
||||
'vrp_optimization_timestamp': route.vrp_optimization_timestamp,
|
||||
'vrp_constraints_satisfied': route.vrp_constraints_satisfied,
|
||||
'vrp_objective_value': route.vrp_objective_value
|
||||
} for route in routes]
|
||||
@@ -302,4 +302,23 @@ class DistributionService:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating delivery schedule: {e}")
|
||||
raise
|
||||
raise
|
||||
|
||||
# VRP Optimization Service Methods
|
||||
async def get_route_by_id(self, route_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get a specific delivery route by ID
|
||||
"""
|
||||
return await self.route_repository.get_route_by_id(route_id)
|
||||
|
||||
async def update_route_vrp_metrics(self, route_id: str, vrp_metrics: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Update VRP optimization metrics for a route
|
||||
"""
|
||||
return await self.route_repository.update_route_vrp_metrics(route_id, vrp_metrics)
|
||||
|
||||
async def get_routes_by_tenant(self, tenant_id: str, limit: int = None, offset: int = None, order_by: str = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get all routes for a specific tenant with pagination and ordering
|
||||
"""
|
||||
return await self.route_repository.get_routes_by_tenant(tenant_id, limit, offset, order_by)
|
||||
357
services/distribution/app/services/vrp_optimization_service.py
Normal file
357
services/distribution/app/services/vrp_optimization_service.py
Normal file
@@ -0,0 +1,357 @@
|
||||
"""
|
||||
VRP Optimization Service
|
||||
Business logic for VRP optimization and metrics management
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime
|
||||
import structlog
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.repositories.delivery_route_repository import DeliveryRouteRepository
|
||||
from app.services.routing_optimizer import RoutingOptimizer
|
||||
from app.core.database import get_db
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class VRPOptimizationService:
|
||||
"""
|
||||
Service for VRP optimization operations
|
||||
"""
|
||||
|
||||
def __init__(self, distribution_service: "DistributionService", database_manager: Any):
|
||||
"""
|
||||
Initialize VRP optimization service
|
||||
|
||||
Args:
|
||||
distribution_service: Distribution service instance
|
||||
database_manager: Database manager for session management
|
||||
"""
|
||||
self.distribution_service = distribution_service
|
||||
self.database_manager = database_manager
|
||||
self.routing_optimizer = RoutingOptimizer()
|
||||
|
||||
async def optimize_route(
|
||||
self,
|
||||
tenant_id: str,
|
||||
route_id: str,
|
||||
optimization_params: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Optimize a specific delivery route using VRP
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
route_id: Route ID to optimize
|
||||
optimization_params: Optimization parameters
|
||||
|
||||
Returns:
|
||||
Optimization result with metrics
|
||||
"""
|
||||
try:
|
||||
# Get the current route using distribution service
|
||||
route = await self.distribution_service.get_route_by_id(route_id)
|
||||
if not route:
|
||||
raise ValueError(f"Route {route_id} not found")
|
||||
|
||||
# Extract deliveries from route sequence
|
||||
deliveries = self._extract_deliveries_from_route(route)
|
||||
|
||||
# Perform VRP optimization
|
||||
depot_location = optimization_params.get('depot_location', (0.0, 0.0))
|
||||
vehicle_capacity = optimization_params.get('vehicle_capacity_kg', 1000.0)
|
||||
time_limit = optimization_params.get('time_limit_seconds', 30.0)
|
||||
|
||||
optimization_result = await self.routing_optimizer.optimize_daily_routes(
|
||||
deliveries=deliveries,
|
||||
depot_location=depot_location,
|
||||
vehicle_capacity_kg=vehicle_capacity,
|
||||
time_limit_seconds=time_limit
|
||||
)
|
||||
|
||||
# Update route with optimization metrics
|
||||
vrp_metrics = {
|
||||
'vrp_optimization_savings': {
|
||||
'distance_saved_km': optimization_result.get('distance_savings_km', 0.0),
|
||||
'time_saved_minutes': optimization_result.get('time_savings_minutes', 0.0),
|
||||
'cost_saved': optimization_result.get('cost_savings', 0.0)
|
||||
},
|
||||
'vrp_algorithm_version': 'or-tools-v1.0',
|
||||
'vrp_optimization_timestamp': datetime.utcnow(),
|
||||
'vrp_constraints_satisfied': optimization_result.get('constraints_satisfied', True),
|
||||
'vrp_objective_value': optimization_result.get('objective_value', 0.0)
|
||||
}
|
||||
|
||||
# Update the route with VRP metrics using distribution service
|
||||
await self.distribution_service.update_route_vrp_metrics(route_id, vrp_metrics)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'route_id': route_id,
|
||||
'optimization_metrics': vrp_metrics,
|
||||
'optimized_route': optimization_result.get('optimized_route', [])
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("vrp_optimization_failed", error=str(e), route_id=route_id)
|
||||
raise
|
||||
|
||||
def _extract_deliveries_from_route(self, route: Any) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Extract deliveries from route sequence
|
||||
|
||||
Args:
|
||||
route: Delivery route object
|
||||
|
||||
Returns:
|
||||
List of delivery dictionaries
|
||||
"""
|
||||
deliveries = []
|
||||
route_sequence = route.route_sequence or []
|
||||
|
||||
for stop in route_sequence:
|
||||
deliveries.append({
|
||||
'id': stop.get('id', ''),
|
||||
'location': (stop.get('lat', 0.0), stop.get('lng', 0.0)),
|
||||
'weight_kg': stop.get('weight_kg', 0.0),
|
||||
'time_window': stop.get('time_window')
|
||||
})
|
||||
|
||||
return deliveries
|
||||
|
||||
async def get_route_optimization_metrics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
route_id: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get VRP optimization metrics for a specific route
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
route_id: Route ID
|
||||
|
||||
Returns:
|
||||
VRP optimization metrics
|
||||
"""
|
||||
route = await self.route_repository.get_route_by_id(route_id)
|
||||
if not route:
|
||||
raise ValueError(f"Route {route_id} not found")
|
||||
|
||||
return {
|
||||
'vrp_optimization_savings': route.vrp_optimization_savings,
|
||||
'vrp_algorithm_version': route.vrp_algorithm_version,
|
||||
'vrp_optimization_timestamp': route.vrp_optimization_timestamp,
|
||||
'vrp_constraints_satisfied': route.vrp_constraints_satisfied,
|
||||
'vrp_objective_value': route.vrp_objective_value
|
||||
}
|
||||
|
||||
async def get_network_optimization_summary(
|
||||
self,
|
||||
tenant_id: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get VRP optimization summary across all routes for a tenant
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
|
||||
Returns:
|
||||
Network optimization summary
|
||||
"""
|
||||
routes = await self.route_repository.get_routes_by_tenant(tenant_id)
|
||||
|
||||
total_optimized = 0
|
||||
total_distance_saved = 0.0
|
||||
total_time_saved = 0.0
|
||||
total_cost_saved = 0.0
|
||||
|
||||
for route in routes:
|
||||
if route.vrp_optimization_timestamp:
|
||||
total_optimized += 1
|
||||
savings = route.vrp_optimization_savings or {}
|
||||
total_distance_saved += savings.get('distance_saved_km', 0.0)
|
||||
total_time_saved += savings.get('time_saved_minutes', 0.0)
|
||||
total_cost_saved += savings.get('cost_saved', 0.0)
|
||||
|
||||
return {
|
||||
'total_routes': len(routes),
|
||||
'total_optimized_routes': total_optimized,
|
||||
'optimization_rate': total_optimized / len(routes) if routes else 0.0,
|
||||
'total_distance_saved_km': total_distance_saved,
|
||||
'total_time_saved_minutes': total_time_saved,
|
||||
'total_cost_saved': total_cost_saved,
|
||||
'average_savings_per_route': {
|
||||
'distance_km': total_distance_saved / total_optimized if total_optimized > 0 else 0.0,
|
||||
'time_minutes': total_time_saved / total_optimized if total_optimized > 0 else 0.0,
|
||||
'cost': total_cost_saved / total_optimized if total_optimized > 0 else 0.0
|
||||
}
|
||||
}
|
||||
|
||||
async def batch_optimize_routes(
|
||||
self,
|
||||
tenant_id: str,
|
||||
route_ids: List[str],
|
||||
optimization_params: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Batch optimize multiple routes
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
route_ids: List of route IDs to optimize
|
||||
optimization_params: Optimization parameters
|
||||
|
||||
Returns:
|
||||
Batch optimization results
|
||||
"""
|
||||
results = []
|
||||
|
||||
for route_id in route_ids:
|
||||
try:
|
||||
result = await self.optimize_route(tenant_id, route_id, optimization_params)
|
||||
results.append({
|
||||
'route_id': route_id,
|
||||
'success': True,
|
||||
'metrics': result['optimization_metrics']
|
||||
})
|
||||
except Exception as e:
|
||||
results.append({
|
||||
'route_id': route_id,
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
})
|
||||
|
||||
return {
|
||||
'total_routes': len(route_ids),
|
||||
'successful_optimizations': sum(1 for r in results if r['success']),
|
||||
'failed_optimizations': sum(1 for r in results if not r['success']),
|
||||
'results': results
|
||||
}
|
||||
|
||||
async def validate_optimization_constraints(
|
||||
self,
|
||||
tenant_id: str,
|
||||
route_id: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate VRP optimization constraints for a route
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
route_id: Route ID
|
||||
|
||||
Returns:
|
||||
Constraint validation results
|
||||
"""
|
||||
route = await self.route_repository.get_route_by_id(route_id)
|
||||
if not route:
|
||||
raise ValueError(f"Route {route_id} not found")
|
||||
|
||||
# Check if route has been optimized
|
||||
if not route.vrp_optimization_timestamp:
|
||||
return {
|
||||
'route_id': route_id,
|
||||
'is_optimized': False,
|
||||
'constraints_valid': False,
|
||||
'message': 'Route has not been optimized yet'
|
||||
}
|
||||
|
||||
# Validate constraints
|
||||
constraints_valid = route.vrp_constraints_satisfied or False
|
||||
|
||||
return {
|
||||
'route_id': route_id,
|
||||
'is_optimized': True,
|
||||
'constraints_valid': constraints_valid,
|
||||
'vrp_algorithm_version': route.vrp_algorithm_version,
|
||||
'optimization_timestamp': route.vrp_optimization_timestamp
|
||||
}
|
||||
|
||||
async def get_optimization_history(
|
||||
self,
|
||||
tenant_id: str,
|
||||
limit: int = 50,
|
||||
offset: int = 0
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get VRP optimization history for a tenant
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
limit: Maximum number of records to return
|
||||
offset: Pagination offset
|
||||
|
||||
Returns:
|
||||
Optimization history
|
||||
"""
|
||||
routes = await self.route_repository.get_routes_by_tenant(
|
||||
tenant_id,
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
order_by='vrp_optimization_timestamp DESC'
|
||||
)
|
||||
|
||||
history = []
|
||||
for route in routes:
|
||||
if route.vrp_optimization_timestamp:
|
||||
history.append({
|
||||
'route_id': str(route.id),
|
||||
'route_number': route.route_number,
|
||||
'optimization_timestamp': route.vrp_optimization_timestamp,
|
||||
'algorithm_version': route.vrp_algorithm_version,
|
||||
'constraints_satisfied': route.vrp_constraints_satisfied,
|
||||
'objective_value': route.vrp_objective_value,
|
||||
'savings': route.vrp_optimization_savings
|
||||
})
|
||||
|
||||
return {
|
||||
'total_records': len(history),
|
||||
'history': history
|
||||
}
|
||||
|
||||
async def simulate_optimization(
|
||||
self,
|
||||
tenant_id: str,
|
||||
route_data: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Simulate VRP optimization without saving results
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
route_data: Route data for simulation
|
||||
|
||||
Returns:
|
||||
Simulation results
|
||||
"""
|
||||
try:
|
||||
deliveries = route_data.get('deliveries', [])
|
||||
depot_location = route_data.get('depot_location', (0.0, 0.0))
|
||||
vehicle_capacity = route_data.get('vehicle_capacity_kg', 1000.0)
|
||||
time_limit = route_data.get('time_limit_seconds', 30.0)
|
||||
|
||||
simulation_result = await self.routing_optimizer.optimize_daily_routes(
|
||||
deliveries=deliveries,
|
||||
depot_location=depot_location,
|
||||
vehicle_capacity_kg=vehicle_capacity,
|
||||
time_limit_seconds=time_limit
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'simulation_results': simulation_result,
|
||||
'estimated_savings': {
|
||||
'distance_km': simulation_result.get('distance_savings_km', 0.0),
|
||||
'time_minutes': simulation_result.get('time_savings_minutes', 0.0),
|
||||
'cost': simulation_result.get('cost_savings', 0.0)
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("vrp_simulation_failed", error=str(e))
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
@@ -41,6 +41,12 @@ def upgrade():
|
||||
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
|
||||
sa.Column('created_by', postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column('updated_by', postgresql.UUID(as_uuid=True), nullable=False),
|
||||
# VRP Optimization Metrics
|
||||
sa.Column('vrp_optimization_savings', sa.JSON(), nullable=True),
|
||||
sa.Column('vrp_algorithm_version', sa.String(length=50), nullable=True),
|
||||
sa.Column('vrp_optimization_timestamp', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column('vrp_constraints_satisfied', sa.Boolean(), nullable=True),
|
||||
sa.Column('vrp_objective_value', sa.Float(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('route_number')
|
||||
)
|
||||
@@ -53,6 +59,8 @@ def upgrade():
|
||||
op.create_index('ix_delivery_routes_driver_id', 'delivery_routes', ['driver_id'])
|
||||
op.create_index('ix_delivery_routes_tenant_date', 'delivery_routes', ['tenant_id', 'route_date'])
|
||||
op.create_index('ix_delivery_routes_date_tenant_status', 'delivery_routes', ['route_date', 'tenant_id', 'status'])
|
||||
# VRP Optimization Index
|
||||
op.create_index('ix_delivery_routes_vrp_optimization', 'delivery_routes', ['vrp_optimization_timestamp'], unique=False)
|
||||
|
||||
|
||||
# Create shipments table
|
||||
@@ -156,6 +164,7 @@ def downgrade():
|
||||
op.drop_table('shipments')
|
||||
|
||||
# Drop indexes for delivery_routes
|
||||
op.drop_index('ix_delivery_routes_vrp_optimization', table_name='delivery_routes')
|
||||
op.drop_index('ix_delivery_routes_date_tenant_status', table_name='delivery_routes')
|
||||
op.drop_index('ix_delivery_routes_tenant_date', table_name='delivery_routes')
|
||||
op.drop_index('ix_delivery_routes_driver_id', table_name='delivery_routes')
|
||||
|
||||
417
services/forecasting/app/api/forecast_feedback.py
Normal file
417
services/forecasting/app/api/forecast_feedback.py
Normal file
@@ -0,0 +1,417 @@
|
||||
# services/forecasting/app/api/forecast_feedback.py
|
||||
"""
|
||||
Forecast Feedback API - Endpoints for collecting and analyzing forecast feedback
|
||||
"""
|
||||
|
||||
import structlog
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query, Path, Body
|
||||
from typing import List, Optional, Dict, Any
|
||||
from datetime import date, datetime
|
||||
import uuid
|
||||
import enum
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.services.forecast_feedback_service import ForecastFeedbackService
|
||||
from shared.database.base import create_database_manager
|
||||
from app.core.config import settings
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.auth.tenant_access import verify_tenant_permission_dep
|
||||
|
||||
route_builder = RouteBuilder('forecasting')
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(tags=["forecast-feedback"])
|
||||
|
||||
|
||||
# Enums for feedback types
|
||||
class FeedbackType(str, enum.Enum):
|
||||
"""Type of feedback on forecast accuracy"""
|
||||
TOO_HIGH = "too_high"
|
||||
TOO_LOW = "too_low"
|
||||
ACCURATE = "accurate"
|
||||
UNCERTAIN = "uncertain"
|
||||
|
||||
|
||||
class FeedbackConfidence(str, enum.Enum):
|
||||
"""Confidence level of the feedback provider"""
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
|
||||
|
||||
# Pydantic models
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class ForecastFeedbackRequest(BaseModel):
|
||||
"""Request model for submitting forecast feedback"""
|
||||
feedback_type: FeedbackType = Field(..., description="Type of feedback on forecast accuracy")
|
||||
confidence: FeedbackConfidence = Field(..., description="Confidence level of the feedback provider")
|
||||
actual_value: Optional[float] = Field(None, description="Actual observed value")
|
||||
notes: Optional[str] = Field(None, description="Additional notes about the feedback")
|
||||
feedback_data: Optional[Dict[str, Any]] = Field(None, description="Additional feedback data")
|
||||
|
||||
|
||||
class ForecastFeedbackResponse(BaseModel):
|
||||
"""Response model for forecast feedback"""
|
||||
feedback_id: str = Field(..., description="Unique feedback ID")
|
||||
forecast_id: str = Field(..., description="Forecast ID this feedback relates to")
|
||||
tenant_id: str = Field(..., description="Tenant ID")
|
||||
feedback_type: FeedbackType = Field(..., description="Type of feedback")
|
||||
confidence: FeedbackConfidence = Field(..., description="Confidence level")
|
||||
actual_value: Optional[float] = Field(None, description="Actual value observed")
|
||||
notes: Optional[str] = Field(None, description="Feedback notes")
|
||||
feedback_data: Dict[str, Any] = Field(..., description="Additional feedback data")
|
||||
created_at: datetime = Field(..., description="When feedback was created")
|
||||
created_by: Optional[str] = Field(None, description="Who created the feedback")
|
||||
|
||||
|
||||
class ForecastAccuracyMetrics(BaseModel):
|
||||
"""Accuracy metrics for a forecast"""
|
||||
forecast_id: str = Field(..., description="Forecast ID")
|
||||
total_feedback_count: int = Field(..., description="Total feedback received")
|
||||
accuracy_score: float = Field(..., description="Calculated accuracy score (0-100)")
|
||||
feedback_distribution: Dict[str, int] = Field(..., description="Distribution of feedback types")
|
||||
average_confidence: float = Field(..., description="Average confidence score")
|
||||
last_feedback_date: Optional[datetime] = Field(None, description="Most recent feedback date")
|
||||
|
||||
|
||||
class ForecasterPerformanceMetrics(BaseModel):
|
||||
"""Performance metrics for the forecasting system"""
|
||||
overall_accuracy: float = Field(..., description="Overall system accuracy score")
|
||||
total_forecasts_with_feedback: int = Field(..., description="Total forecasts with feedback")
|
||||
accuracy_by_product: Dict[str, float] = Field(..., description="Accuracy by product type")
|
||||
accuracy_trend: str = Field(..., description="Trend direction: improving, declining, stable")
|
||||
improvement_suggestions: List[str] = Field(..., description="AI-generated improvement suggestions")
|
||||
|
||||
|
||||
def get_forecast_feedback_service():
|
||||
"""Dependency injection for ForecastFeedbackService"""
|
||||
database_manager = create_database_manager(settings.DATABASE_URL, "forecasting-service")
|
||||
return ForecastFeedbackService(database_manager)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("forecasts", "forecast_id", "feedback"),
|
||||
response_model=ForecastFeedbackResponse,
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
async def submit_forecast_feedback(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
feedback_request: ForecastFeedbackRequest = Body(...),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Submit feedback on forecast accuracy
|
||||
|
||||
Allows users to provide feedback on whether forecasts were accurate, too high, or too low.
|
||||
This feedback is used to improve future forecast accuracy through continuous learning.
|
||||
"""
|
||||
try:
|
||||
logger.info("Submitting forecast feedback",
|
||||
tenant_id=tenant_id, forecast_id=forecast_id,
|
||||
feedback_type=feedback_request.feedback_type)
|
||||
|
||||
# Validate forecast exists
|
||||
forecast_exists = await forecast_feedback_service.forecast_exists(tenant_id, forecast_id)
|
||||
if not forecast_exists:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Forecast not found"
|
||||
)
|
||||
|
||||
# Submit feedback
|
||||
feedback = await forecast_feedback_service.submit_feedback(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id,
|
||||
feedback_type=feedback_request.feedback_type,
|
||||
confidence=feedback_request.confidence,
|
||||
actual_value=feedback_request.actual_value,
|
||||
notes=feedback_request.notes,
|
||||
feedback_data=feedback_request.feedback_data
|
||||
)
|
||||
|
||||
return {
|
||||
'feedback_id': str(feedback.feedback_id),
|
||||
'forecast_id': str(feedback.forecast_id),
|
||||
'tenant_id': feedback.tenant_id,
|
||||
'feedback_type': feedback.feedback_type,
|
||||
'confidence': feedback.confidence,
|
||||
'actual_value': feedback.actual_value,
|
||||
'notes': feedback.notes,
|
||||
'feedback_data': feedback.feedback_data or {},
|
||||
'created_at': feedback.created_at,
|
||||
'created_by': feedback.created_by
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.error("Invalid forecast ID", error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid forecast ID format"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Failed to submit forecast feedback", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to submit feedback"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_nested_resource_route("forecasts", "forecast_id", "feedback"),
|
||||
response_model=List[ForecastFeedbackResponse]
|
||||
)
|
||||
async def get_forecast_feedback(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
limit: int = Query(50, ge=1, le=1000),
|
||||
offset: int = Query(0, ge=0),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get all feedback for a specific forecast
|
||||
|
||||
Retrieves historical feedback submissions for analysis and auditing.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting forecast feedback", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
feedback_list = await forecast_feedback_service.get_feedback_for_forecast(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id,
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
return [
|
||||
ForecastFeedbackResponse(
|
||||
feedback_id=str(f.feedback_id),
|
||||
forecast_id=str(f.forecast_id),
|
||||
tenant_id=f.tenant_id,
|
||||
feedback_type=f.feedback_type,
|
||||
confidence=f.confidence,
|
||||
actual_value=f.actual_value,
|
||||
notes=f.notes,
|
||||
feedback_data=f.feedback_data or {},
|
||||
created_at=f.created_at,
|
||||
created_by=f.created_by
|
||||
) for f in feedback_list
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecast feedback", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve feedback"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_nested_resource_route("forecasts", "forecast_id", "accuracy"),
|
||||
response_model=ForecastAccuracyMetrics
|
||||
)
|
||||
async def get_forecast_accuracy_metrics(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get accuracy metrics for a specific forecast
|
||||
|
||||
Calculates accuracy scores based on feedback and actual vs predicted values.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting forecast accuracy metrics", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
metrics = await forecast_feedback_service.calculate_accuracy_metrics(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id
|
||||
)
|
||||
|
||||
if not metrics:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="No accuracy metrics available for this forecast"
|
||||
)
|
||||
|
||||
return {
|
||||
'forecast_id': metrics.forecast_id,
|
||||
'total_feedback_count': metrics.total_feedback_count,
|
||||
'accuracy_score': metrics.accuracy_score,
|
||||
'feedback_distribution': metrics.feedback_distribution,
|
||||
'average_confidence': metrics.average_confidence,
|
||||
'last_feedback_date': metrics.last_feedback_date
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecast accuracy metrics", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to calculate accuracy metrics"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("forecasts", "accuracy-summary"),
|
||||
response_model=ForecasterPerformanceMetrics
|
||||
)
|
||||
async def get_forecaster_performance_summary(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[date] = Query(None, description="Start date filter"),
|
||||
end_date: Optional[date] = Query(None, description="End date filter"),
|
||||
product_id: Optional[str] = Query(None, description="Filter by product ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get overall forecaster performance summary
|
||||
|
||||
Aggregates accuracy metrics across all forecasts to assess overall system performance
|
||||
and identify areas for improvement.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting forecaster performance summary", tenant_id=tenant_id)
|
||||
|
||||
metrics = await forecast_feedback_service.calculate_performance_summary(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
product_id=product_id
|
||||
)
|
||||
|
||||
return {
|
||||
'overall_accuracy': metrics.overall_accuracy,
|
||||
'total_forecasts_with_feedback': metrics.total_forecasts_with_feedback,
|
||||
'accuracy_by_product': metrics.accuracy_by_product,
|
||||
'accuracy_trend': metrics.accuracy_trend,
|
||||
'improvement_suggestions': metrics.improvement_suggestions
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get forecaster performance summary", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to calculate performance summary"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("forecasts", "feedback-trends")
|
||||
)
|
||||
async def get_feedback_trends(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
days: int = Query(30, ge=7, le=365, description="Number of days to analyze"),
|
||||
product_id: Optional[str] = Query(None, description="Filter by product ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get feedback trends over time
|
||||
|
||||
Analyzes how forecast accuracy and feedback patterns change over time.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting feedback trends", tenant_id=tenant_id, days=days)
|
||||
|
||||
trends = await forecast_feedback_service.get_feedback_trends(
|
||||
tenant_id=tenant_id,
|
||||
days=days,
|
||||
product_id=product_id
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'trends': trends,
|
||||
'period': f'Last {days} days'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get feedback trends", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve feedback trends"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_resource_action_route("forecasts", "forecast_id", "retrain")
|
||||
)
|
||||
async def trigger_retraining_from_feedback(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Trigger model retraining based on feedback
|
||||
|
||||
Initiates a retraining job using recent feedback to improve forecast accuracy.
|
||||
"""
|
||||
try:
|
||||
logger.info("Triggering retraining from feedback", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
result = await forecast_feedback_service.trigger_retraining_from_feedback(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'message': 'Retraining job initiated successfully',
|
||||
'job_id': result.job_id,
|
||||
'forecasts_included': result.forecasts_included,
|
||||
'feedback_samples_used': result.feedback_samples_used
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to trigger retraining", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to initiate retraining"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_action_route("forecasts", "forecast_id", "suggestions")
|
||||
)
|
||||
async def get_improvement_suggestions(
|
||||
tenant_id: str = Path(..., description="Tenant ID"),
|
||||
forecast_id: str = Path(..., description="Forecast ID"),
|
||||
forecast_feedback_service: ForecastFeedbackService = Depends(get_forecast_feedback_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get AI-generated improvement suggestions for a forecast
|
||||
|
||||
Analyzes feedback patterns and suggests specific improvements for forecast accuracy.
|
||||
"""
|
||||
try:
|
||||
logger.info("Getting improvement suggestions", tenant_id=tenant_id, forecast_id=forecast_id)
|
||||
|
||||
suggestions = await forecast_feedback_service.get_improvement_suggestions(
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'forecast_id': forecast_id,
|
||||
'suggestions': suggestions,
|
||||
'confidence_scores': [s.get('confidence', 0.8) for s in suggestions]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get improvement suggestions", error=str(e), tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to generate suggestions"
|
||||
)
|
||||
|
||||
|
||||
# Import datetime at runtime to avoid circular imports
|
||||
from datetime import datetime, timedelta
|
||||
@@ -14,7 +14,7 @@ from app.services.forecasting_alert_service import ForecastingAlertService
|
||||
from shared.service_base import StandardFastAPIService
|
||||
|
||||
# Import API routers
|
||||
from app.api import forecasts, forecasting_operations, analytics, scenario_operations, audit, ml_insights, validation, historical_validation, webhooks, performance_monitoring, retraining, enterprise_forecasting, internal_demo
|
||||
from app.api import forecasts, forecasting_operations, analytics, scenario_operations, audit, ml_insights, validation, historical_validation, webhooks, performance_monitoring, retraining, enterprise_forecasting, internal_demo, forecast_feedback
|
||||
|
||||
|
||||
class ForecastingService(StandardFastAPIService):
|
||||
@@ -200,6 +200,7 @@ service.add_router(webhooks.router) # Webhooks endpoint
|
||||
service.add_router(performance_monitoring.router) # Performance monitoring endpoint
|
||||
service.add_router(retraining.router) # Retraining endpoint
|
||||
service.add_router(enterprise_forecasting.router) # Enterprise forecasting endpoint
|
||||
service.add_router(forecast_feedback.router) # Forecast feedback endpoint
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
533
services/forecasting/app/services/forecast_feedback_service.py
Normal file
533
services/forecasting/app/services/forecast_feedback_service.py
Normal file
@@ -0,0 +1,533 @@
|
||||
# services/forecasting/app/services/forecast_feedback_service.py
|
||||
"""
|
||||
Forecast Feedback Service
|
||||
Business logic for collecting and analyzing forecast feedback
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime, timedelta, date
|
||||
import uuid
|
||||
import structlog
|
||||
from dataclasses import dataclass
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
@dataclass
|
||||
class ForecastFeedback:
|
||||
"""Data class for forecast feedback"""
|
||||
feedback_id: uuid.UUID
|
||||
forecast_id: uuid.UUID
|
||||
tenant_id: str
|
||||
feedback_type: str
|
||||
confidence: str
|
||||
actual_value: Optional[float]
|
||||
notes: Optional[str]
|
||||
feedback_data: Dict[str, Any]
|
||||
created_at: datetime
|
||||
created_by: Optional[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ForecastAccuracyMetrics:
|
||||
"""Data class for forecast accuracy metrics"""
|
||||
forecast_id: str
|
||||
total_feedback_count: int
|
||||
accuracy_score: float
|
||||
feedback_distribution: Dict[str, int]
|
||||
average_confidence: float
|
||||
last_feedback_date: Optional[datetime]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ForecasterPerformanceMetrics:
|
||||
"""Data class for forecaster performance metrics"""
|
||||
overall_accuracy: float
|
||||
total_forecasts_with_feedback: int
|
||||
accuracy_by_product: Dict[str, float]
|
||||
accuracy_trend: str
|
||||
improvement_suggestions: List[str]
|
||||
|
||||
|
||||
class ForecastFeedbackService:
|
||||
"""
|
||||
Service for managing forecast feedback and accuracy tracking
|
||||
"""
|
||||
|
||||
def __init__(self, database_manager):
|
||||
self.database_manager = database_manager
|
||||
|
||||
async def forecast_exists(self, tenant_id: str, forecast_id: str) -> bool:
|
||||
"""
|
||||
Check if a forecast exists
|
||||
"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
from app.models.forecasts import Forecast
|
||||
|
||||
result = await session.execute(
|
||||
"""
|
||||
SELECT 1 FROM forecasts
|
||||
WHERE tenant_id = :tenant_id AND id = :forecast_id
|
||||
""",
|
||||
{"tenant_id": tenant_id, "forecast_id": forecast_id}
|
||||
)
|
||||
return result.scalar() is not None
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to check forecast existence", error=str(e))
|
||||
raise Exception(f"Failed to check forecast existence: {str(e)}")
|
||||
|
||||
async def submit_feedback(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str,
|
||||
feedback_type: str,
|
||||
confidence: str,
|
||||
actual_value: Optional[float] = None,
|
||||
notes: Optional[str] = None,
|
||||
feedback_data: Optional[Dict[str, Any]] = None
|
||||
) -> ForecastFeedback:
|
||||
"""
|
||||
Submit feedback on forecast accuracy
|
||||
"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Create feedback record
|
||||
feedback_id = uuid.uuid4()
|
||||
created_at = datetime.now()
|
||||
|
||||
# In a real implementation, this would insert into a forecast_feedback table
|
||||
# For demo purposes, we'll simulate the database operation
|
||||
|
||||
feedback = ForecastFeedback(
|
||||
feedback_id=feedback_id,
|
||||
forecast_id=uuid.UUID(forecast_id),
|
||||
tenant_id=tenant_id,
|
||||
feedback_type=feedback_type,
|
||||
confidence=confidence,
|
||||
actual_value=actual_value,
|
||||
notes=notes,
|
||||
feedback_data=feedback_data or {},
|
||||
created_at=created_at,
|
||||
created_by="system" # In real implementation, this would be the user ID
|
||||
)
|
||||
|
||||
# Simulate database insert
|
||||
logger.info("Feedback submitted",
|
||||
feedback_id=str(feedback_id),
|
||||
forecast_id=forecast_id,
|
||||
feedback_type=feedback_type)
|
||||
|
||||
return feedback
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to submit feedback", error=str(e))
|
||||
raise Exception(f"Failed to submit feedback: {str(e)}")
|
||||
|
||||
async def get_feedback_for_forecast(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str,
|
||||
limit: int = 50,
|
||||
offset: int = 0
|
||||
) -> List[ForecastFeedback]:
|
||||
"""
|
||||
Get all feedback for a specific forecast
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would query the forecast_feedback table
|
||||
# For demo purposes, we'll return simulated data
|
||||
|
||||
# Simulate some feedback data
|
||||
simulated_feedback = []
|
||||
|
||||
for i in range(min(limit, 3)): # Return up to 3 simulated feedback items
|
||||
feedback = ForecastFeedback(
|
||||
feedback_id=uuid.uuid4(),
|
||||
forecast_id=uuid.UUID(forecast_id),
|
||||
tenant_id=tenant_id,
|
||||
feedback_type=["too_high", "too_low", "accurate"][i % 3],
|
||||
confidence=["medium", "high", "low"][i % 3],
|
||||
actual_value=150.0 + i * 20 if i < 2 else None,
|
||||
notes=f"Feedback sample {i+1}" if i == 0 else None,
|
||||
feedback_data={"sample": i+1, "demo": True},
|
||||
created_at=datetime.now() - timedelta(days=i),
|
||||
created_by="demo_user"
|
||||
)
|
||||
simulated_feedback.append(feedback)
|
||||
|
||||
return simulated_feedback
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get feedback for forecast", error=str(e))
|
||||
raise Exception(f"Failed to get feedback: {str(e)}")
|
||||
|
||||
async def calculate_accuracy_metrics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str
|
||||
) -> ForecastAccuracyMetrics:
|
||||
"""
|
||||
Calculate accuracy metrics for a forecast
|
||||
"""
|
||||
try:
|
||||
# Get feedback for this forecast
|
||||
feedback_list = await self.get_feedback_for_forecast(tenant_id, forecast_id)
|
||||
|
||||
if not feedback_list:
|
||||
return None
|
||||
|
||||
# Calculate metrics
|
||||
total_feedback = len(feedback_list)
|
||||
|
||||
# Count feedback distribution
|
||||
feedback_distribution = {
|
||||
"too_high": 0,
|
||||
"too_low": 0,
|
||||
"accurate": 0,
|
||||
"uncertain": 0
|
||||
}
|
||||
|
||||
confidence_scores = {
|
||||
"low": 1,
|
||||
"medium": 2,
|
||||
"high": 3
|
||||
}
|
||||
|
||||
total_confidence = 0
|
||||
|
||||
for feedback in feedback_list:
|
||||
feedback_distribution[feedback.feedback_type] += 1
|
||||
total_confidence += confidence_scores.get(feedback.confidence, 1)
|
||||
|
||||
# Calculate accuracy score (simplified)
|
||||
accurate_count = feedback_distribution["accurate"]
|
||||
accuracy_score = (accurate_count / total_feedback) * 100
|
||||
|
||||
# Adjust for confidence
|
||||
avg_confidence = total_confidence / total_feedback
|
||||
adjusted_accuracy = accuracy_score * (avg_confidence / 3) # Normalize confidence to 0-1 range
|
||||
|
||||
return ForecastAccuracyMetrics(
|
||||
forecast_id=forecast_id,
|
||||
total_feedback_count=total_feedback,
|
||||
accuracy_score=round(adjusted_accuracy, 1),
|
||||
feedback_distribution=feedback_distribution,
|
||||
average_confidence=round(avg_confidence, 1),
|
||||
last_feedback_date=max(f.created_at for f in feedback_list)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to calculate accuracy metrics", error=str(e))
|
||||
raise Exception(f"Failed to calculate metrics: {str(e)}")
|
||||
|
||||
async def calculate_performance_summary(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None,
|
||||
product_id: Optional[str] = None
|
||||
) -> ForecasterPerformanceMetrics:
|
||||
"""
|
||||
Calculate overall forecaster performance summary
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would aggregate data across multiple forecasts
|
||||
# For demo purposes, we'll return simulated metrics
|
||||
|
||||
# Simulate performance data
|
||||
accuracy_by_product = {
|
||||
"baguette": 85.5,
|
||||
"croissant": 78.2,
|
||||
"pain_au_chocolat": 92.1
|
||||
}
|
||||
|
||||
if product_id and product_id in accuracy_by_product:
|
||||
# Return metrics for specific product
|
||||
product_accuracy = accuracy_by_product[product_id]
|
||||
accuracy_by_product = {product_id: product_accuracy}
|
||||
|
||||
# Calculate overall accuracy
|
||||
overall_accuracy = sum(accuracy_by_product.values()) / len(accuracy_by_product)
|
||||
|
||||
# Determine trend (simulated)
|
||||
trend_data = [82.3, 84.1, 85.5, 86.8, 88.2] # Last 5 periods
|
||||
if trend_data[-1] > trend_data[0]:
|
||||
trend = "improving"
|
||||
elif trend_data[-1] < trend_data[0]:
|
||||
trend = "declining"
|
||||
else:
|
||||
trend = "stable"
|
||||
|
||||
# Generate improvement suggestions
|
||||
suggestions = []
|
||||
|
||||
for product, accuracy in accuracy_by_product.items():
|
||||
if accuracy < 80:
|
||||
suggestions.append(f"Improve {product} forecast accuracy (current: {accuracy}%)")
|
||||
elif accuracy < 90:
|
||||
suggestions.append(f"Consider fine-tuning {product} forecast model (current: {accuracy}%)")
|
||||
|
||||
if not suggestions:
|
||||
suggestions.append("Overall forecast accuracy is excellent - maintain current approach")
|
||||
|
||||
return ForecasterPerformanceMetrics(
|
||||
overall_accuracy=round(overall_accuracy, 1),
|
||||
total_forecasts_with_feedback=42,
|
||||
accuracy_by_product=accuracy_by_product,
|
||||
accuracy_trend=trend,
|
||||
improvement_suggestions=suggestions
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to calculate performance summary", error=str(e))
|
||||
raise Exception(f"Failed to calculate summary: {str(e)}")
|
||||
|
||||
async def get_feedback_trends(
|
||||
self,
|
||||
tenant_id: str,
|
||||
days: int = 30,
|
||||
product_id: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get feedback trends over time
|
||||
"""
|
||||
try:
|
||||
# Simulate trend data
|
||||
trends = []
|
||||
end_date = datetime.now()
|
||||
|
||||
# Generate daily trend data
|
||||
for i in range(days):
|
||||
date = end_date - timedelta(days=i)
|
||||
|
||||
# Simulate varying accuracy with weekly pattern
|
||||
base_accuracy = 85.0
|
||||
weekly_variation = 3.0 * (i % 7 / 6 - 0.5) # Weekly pattern
|
||||
daily_noise = (i % 3 - 1) * 1.5 # Daily noise
|
||||
|
||||
accuracy = max(70, min(95, base_accuracy + weekly_variation + daily_noise))
|
||||
|
||||
trends.append({
|
||||
'date': date.strftime('%Y-%m-%d'),
|
||||
'accuracy_score': round(accuracy, 1),
|
||||
'feedback_count': max(1, int(5 + i % 10)),
|
||||
'confidence_score': round(2.5 + (i % 5 - 2) * 0.2, 1)
|
||||
})
|
||||
|
||||
# Sort by date (oldest first)
|
||||
trends.sort(key=lambda x: x['date'])
|
||||
|
||||
return trends
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get feedback trends", error=str(e))
|
||||
raise Exception(f"Failed to get trends: {str(e)}")
|
||||
|
||||
async def trigger_retraining_from_feedback(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Trigger model retraining based on feedback
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would:
|
||||
# 1. Collect recent feedback data
|
||||
# 2. Prepare training dataset
|
||||
# 3. Submit retraining job to ML service
|
||||
# 4. Return job ID
|
||||
|
||||
# For demo purposes, simulate a retraining job
|
||||
job_id = str(uuid.uuid4())
|
||||
|
||||
logger.info("Retraining job triggered",
|
||||
job_id=job_id,
|
||||
tenant_id=tenant_id,
|
||||
forecast_id=forecast_id)
|
||||
|
||||
return {
|
||||
'job_id': job_id,
|
||||
'forecasts_included': 15,
|
||||
'feedback_samples_used': 42,
|
||||
'status': 'queued',
|
||||
'estimated_completion': (datetime.now() + timedelta(minutes=30)).isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to trigger retraining", error=str(e))
|
||||
raise Exception(f"Failed to trigger retraining: {str(e)}")
|
||||
|
||||
async def get_improvement_suggestions(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_id: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get AI-generated improvement suggestions
|
||||
"""
|
||||
try:
|
||||
# Get accuracy metrics for this forecast
|
||||
metrics = await self.calculate_accuracy_metrics(tenant_id, forecast_id)
|
||||
|
||||
if not metrics:
|
||||
return [
|
||||
{
|
||||
'suggestion': 'Insufficient feedback data to generate suggestions',
|
||||
'type': 'data',
|
||||
'priority': 'low',
|
||||
'confidence': 0.7
|
||||
}
|
||||
]
|
||||
|
||||
# Generate suggestions based on metrics
|
||||
suggestions = []
|
||||
|
||||
# Analyze feedback distribution
|
||||
feedback_dist = metrics.feedback_distribution
|
||||
total_feedback = metrics.total_feedback_count
|
||||
|
||||
if feedback_dist['too_high'] > total_feedback * 0.4:
|
||||
suggestions.append({
|
||||
'suggestion': 'Forecasts are consistently too high - consider adjusting demand estimation parameters',
|
||||
'type': 'bias',
|
||||
'priority': 'high',
|
||||
'confidence': 0.9,
|
||||
'details': {
|
||||
'too_high_percentage': feedback_dist['too_high'] / total_feedback * 100,
|
||||
'recommended_action': 'Reduce demand estimation by 10-15%'
|
||||
}
|
||||
})
|
||||
|
||||
if feedback_dist['too_low'] > total_feedback * 0.4:
|
||||
suggestions.append({
|
||||
'suggestion': 'Forecasts are consistently too low - consider increasing demand estimation parameters',
|
||||
'type': 'bias',
|
||||
'priority': 'high',
|
||||
'confidence': 0.9,
|
||||
'details': {
|
||||
'too_low_percentage': feedback_dist['too_low'] / total_feedback * 100,
|
||||
'recommended_action': 'Increase demand estimation by 10-15%'
|
||||
}
|
||||
})
|
||||
|
||||
if metrics.accuracy_score < 70:
|
||||
suggestions.append({
|
||||
'suggestion': 'Low overall accuracy - consider comprehensive model review and retraining',
|
||||
'type': 'model',
|
||||
'priority': 'critical',
|
||||
'confidence': 0.85,
|
||||
'details': {
|
||||
'current_accuracy': metrics.accuracy_score,
|
||||
'recommended_action': 'Full model retraining with expanded feature set'
|
||||
}
|
||||
})
|
||||
elif metrics.accuracy_score < 85:
|
||||
suggestions.append({
|
||||
'suggestion': 'Moderate accuracy - consider feature engineering improvements',
|
||||
'type': 'features',
|
||||
'priority': 'medium',
|
||||
'confidence': 0.8,
|
||||
'details': {
|
||||
'current_accuracy': metrics.accuracy_score,
|
||||
'recommended_action': 'Add weather data, promotions, and seasonal features'
|
||||
}
|
||||
})
|
||||
|
||||
if metrics.average_confidence < 2.0: # Average of medium (2) and high (3)
|
||||
suggestions.append({
|
||||
'suggestion': 'Low confidence in feedback - consider improving feedback collection process',
|
||||
'type': 'process',
|
||||
'priority': 'medium',
|
||||
'confidence': 0.75,
|
||||
'details': {
|
||||
'average_confidence': metrics.average_confidence,
|
||||
'recommended_action': 'Provide clearer guidance to users on feedback submission'
|
||||
}
|
||||
})
|
||||
|
||||
if not suggestions:
|
||||
suggestions.append({
|
||||
'suggestion': 'Forecast accuracy is good - consider expanding to additional products',
|
||||
'type': 'expansion',
|
||||
'priority': 'low',
|
||||
'confidence': 0.85,
|
||||
'details': {
|
||||
'current_accuracy': metrics.accuracy_score,
|
||||
'recommended_action': 'Extend forecasting to new product categories'
|
||||
}
|
||||
})
|
||||
|
||||
return suggestions
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to generate improvement suggestions", error=str(e))
|
||||
raise Exception(f"Failed to generate suggestions: {str(e)}")
|
||||
|
||||
|
||||
# Helper class for feedback analysis
|
||||
class FeedbackAnalyzer:
|
||||
"""
|
||||
Helper class for analyzing feedback patterns
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def detect_feedback_patterns(feedback_list: List[ForecastFeedback]) -> Dict[str, Any]:
|
||||
"""
|
||||
Detect patterns in feedback data
|
||||
"""
|
||||
if not feedback_list:
|
||||
return {'patterns': [], 'anomalies': []}
|
||||
|
||||
patterns = []
|
||||
anomalies = []
|
||||
|
||||
# Simple pattern detection (in real implementation, this would be more sophisticated)
|
||||
feedback_types = [f.feedback_type for f in feedback_list]
|
||||
|
||||
if len(set(feedback_types)) == 1:
|
||||
patterns.append({
|
||||
'type': 'consistent_feedback',
|
||||
'pattern': f'All feedback is "{feedback_types[0]}"',
|
||||
'confidence': 0.9
|
||||
})
|
||||
|
||||
return {'patterns': patterns, 'anomalies': anomalies}
|
||||
|
||||
|
||||
# Helper class for accuracy calculation
|
||||
class AccuracyCalculator:
|
||||
"""
|
||||
Helper class for calculating forecast accuracy metrics
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def calculate_mape(actual: float, predicted: float) -> float:
|
||||
"""
|
||||
Calculate Mean Absolute Percentage Error
|
||||
"""
|
||||
if actual == 0:
|
||||
return 0.0
|
||||
return abs((actual - predicted) / actual) * 100
|
||||
|
||||
@staticmethod
|
||||
def calculate_rmse(actual: float, predicted: float) -> float:
|
||||
"""
|
||||
Calculate Root Mean Squared Error
|
||||
"""
|
||||
return (actual - predicted) ** 2
|
||||
|
||||
@staticmethod
|
||||
def feedback_to_accuracy_score(feedback_type: str) -> float:
|
||||
"""
|
||||
Convert feedback type to accuracy score
|
||||
"""
|
||||
feedback_scores = {
|
||||
'accurate': 100,
|
||||
'too_high': 50,
|
||||
'too_low': 50,
|
||||
'uncertain': 75
|
||||
}
|
||||
return feedback_scores.get(feedback_type, 75)
|
||||
314
services/inventory/app/api/enterprise_inventory.py
Normal file
314
services/inventory/app/api/enterprise_inventory.py
Normal file
@@ -0,0 +1,314 @@
|
||||
"""
|
||||
Enterprise Inventory API Endpoints
|
||||
APIs for enterprise-level inventory management across outlets
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from typing import List, Optional
|
||||
from datetime import date
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
|
||||
from app.services.enterprise_inventory_service import EnterpriseInventoryService
|
||||
from shared.auth.tenant_access import verify_tenant_permission_dep
|
||||
from shared.clients import get_inventory_client, get_tenant_client
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# Pydantic models for request/response
|
||||
class InventoryCoverageResponse(BaseModel):
|
||||
outlet_id: str = Field(..., description="Outlet tenant ID")
|
||||
outlet_name: str = Field(..., description="Outlet name")
|
||||
overall_coverage: float = Field(..., description="Overall inventory coverage percentage (0-100)")
|
||||
critical_items_count: int = Field(..., description="Number of items at critical stock levels")
|
||||
high_risk_items_count: int = Field(..., description="Number of items at high risk of stockout")
|
||||
medium_risk_items_count: int = Field(..., description="Number of items at medium risk")
|
||||
low_risk_items_count: int = Field(..., description="Number of items at low risk")
|
||||
fulfillment_rate: float = Field(..., description="Order fulfillment rate percentage (0-100)")
|
||||
last_updated: str = Field(..., description="Last inventory update timestamp")
|
||||
status: str = Field(..., description="Overall status: normal, warning, critical")
|
||||
|
||||
|
||||
class ProductCoverageDetail(BaseModel):
|
||||
product_id: str = Field(..., description="Product ID")
|
||||
product_name: str = Field(..., description="Product name")
|
||||
current_stock: int = Field(..., description="Current stock quantity")
|
||||
safety_stock: int = Field(..., description="Safety stock threshold")
|
||||
coverage_percentage: float = Field(..., description="Coverage percentage (current/safety)")
|
||||
risk_level: str = Field(..., description="Risk level: critical, high, medium, low")
|
||||
days_until_stockout: Optional[int] = Field(None, description="Estimated days until stockout")
|
||||
|
||||
|
||||
class OutletInventoryDetailResponse(BaseModel):
|
||||
outlet_id: str = Field(..., description="Outlet tenant ID")
|
||||
outlet_name: str = Field(..., description="Outlet name")
|
||||
overall_coverage: float = Field(..., description="Overall inventory coverage percentage")
|
||||
products: List[ProductCoverageDetail] = Field(..., description="Product-level inventory details")
|
||||
last_updated: str = Field(..., description="Last update timestamp")
|
||||
|
||||
|
||||
class NetworkInventorySummary(BaseModel):
|
||||
total_outlets: int = Field(..., description="Total number of outlets")
|
||||
average_coverage: float = Field(..., description="Network average inventory coverage")
|
||||
average_fulfillment_rate: float = Field(..., description="Network average fulfillment rate")
|
||||
critical_outlets: int = Field(..., description="Number of outlets with critical status")
|
||||
warning_outlets: int = Field(..., description="Number of outlets with warning status")
|
||||
normal_outlets: int = Field(..., description="Number of outlets with normal status")
|
||||
total_critical_items: int = Field(..., description="Total critical items across network")
|
||||
network_health_score: float = Field(..., description="Overall network health score (0-100)")
|
||||
|
||||
|
||||
class InventoryAlert(BaseModel):
|
||||
alert_id: str = Field(..., description="Alert ID")
|
||||
outlet_id: str = Field(..., description="Outlet ID")
|
||||
outlet_name: str = Field(..., description="Outlet name")
|
||||
product_id: Optional[str] = Field(None, description="Product ID if applicable")
|
||||
product_name: Optional[str] = Field(None, description="Product name if applicable")
|
||||
alert_type: str = Field(..., description="Type of alert: stockout_risk, low_coverage, etc.")
|
||||
severity: str = Field(..., description="Severity: critical, high, medium, low")
|
||||
current_coverage: float = Field(..., description="Current inventory coverage percentage")
|
||||
threshold: float = Field(..., description="Threshold that triggered alert")
|
||||
timestamp: str = Field(..., description="Alert timestamp")
|
||||
message: str = Field(..., description="Alert message")
|
||||
|
||||
|
||||
async def get_enterprise_inventory_service() -> "EnterpriseInventoryService":
|
||||
"""Dependency injection for EnterpriseInventoryService"""
|
||||
inventory_client = get_inventory_client(settings, "inventory-service")
|
||||
tenant_client = get_tenant_client(settings, "inventory-service")
|
||||
return EnterpriseInventoryService(
|
||||
inventory_client=inventory_client,
|
||||
tenant_client=tenant_client
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/outlets/inventory-coverage",
|
||||
response_model=List[InventoryCoverageResponse],
|
||||
summary="Get inventory coverage for all outlets in network")
|
||||
async def get_outlet_inventory_coverage(
|
||||
parent_id: str,
|
||||
min_coverage: Optional[float] = Query(None, description="Filter outlets with coverage below this threshold"),
|
||||
risk_level: Optional[str] = Query(None, description="Filter by risk level: critical, high, medium, low"),
|
||||
enterprise_inventory_service: EnterpriseInventoryService = Depends(get_enterprise_inventory_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get inventory coverage metrics for all child outlets in a parent tenant's network
|
||||
|
||||
This endpoint provides a comprehensive view of inventory health across all outlets,
|
||||
enabling enterprise managers to identify stockout risks and prioritize inventory transfers.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await enterprise_inventory_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access outlet inventory coverage"
|
||||
)
|
||||
|
||||
# Get all child outlets for this parent
|
||||
child_outlets = await enterprise_inventory_service.get_child_outlets(parent_id)
|
||||
|
||||
if not child_outlets:
|
||||
return []
|
||||
|
||||
# Get inventory coverage for each outlet
|
||||
coverage_data = []
|
||||
for outlet in child_outlets:
|
||||
outlet_id = outlet['id']
|
||||
|
||||
# Get inventory coverage data
|
||||
coverage = await enterprise_inventory_service.get_inventory_coverage(outlet_id)
|
||||
|
||||
if coverage:
|
||||
# Apply filters if specified
|
||||
if min_coverage is not None and coverage['overall_coverage'] >= min_coverage:
|
||||
continue
|
||||
if risk_level is not None and coverage.get('status') != risk_level:
|
||||
continue
|
||||
|
||||
coverage_data.append(coverage)
|
||||
|
||||
# Sort by coverage (lowest first) to prioritize critical outlets
|
||||
coverage_data.sort(key=lambda x: x['overall_coverage'])
|
||||
|
||||
return coverage_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get outlet inventory coverage", error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get inventory coverage: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/outlets/inventory-summary",
|
||||
response_model=NetworkInventorySummary,
|
||||
summary="Get network-wide inventory summary")
|
||||
async def get_network_inventory_summary(
|
||||
parent_id: str,
|
||||
enterprise_inventory_service: EnterpriseInventoryService = Depends(get_enterprise_inventory_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get aggregated inventory summary across the entire network
|
||||
|
||||
Provides key metrics for network health monitoring and decision making.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await enterprise_inventory_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access network inventory summary"
|
||||
)
|
||||
|
||||
return await enterprise_inventory_service.get_network_inventory_summary(parent_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get network inventory summary", error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get inventory summary: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/outlets/{outlet_id}/inventory-details",
|
||||
response_model=OutletInventoryDetailResponse,
|
||||
summary="Get detailed inventory for specific outlet")
|
||||
async def get_outlet_inventory_details(
|
||||
parent_id: str,
|
||||
outlet_id: str,
|
||||
product_id: Optional[str] = Query(None, description="Filter by specific product ID"),
|
||||
risk_level: Optional[str] = Query(None, description="Filter products by risk level"),
|
||||
enterprise_inventory_service: EnterpriseInventoryService = Depends(get_enterprise_inventory_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get detailed product-level inventory data for a specific outlet
|
||||
|
||||
Enables drill-down analysis of inventory issues at the product level.
|
||||
"""
|
||||
try:
|
||||
# Verify parent-child relationship
|
||||
await enterprise_inventory_service.verify_parent_child_relationship(parent_id, outlet_id)
|
||||
|
||||
return await enterprise_inventory_service.get_outlet_inventory_details(outlet_id, product_id, risk_level)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get outlet inventory details", error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get inventory details: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/inventory-alerts",
|
||||
response_model=List[InventoryAlert],
|
||||
summary="Get real-time inventory alerts across network")
|
||||
async def get_network_inventory_alerts(
|
||||
parent_id: str,
|
||||
severity: Optional[str] = Query(None, description="Filter by severity: critical, high, medium, low"),
|
||||
alert_type: Optional[str] = Query(None, description="Filter by alert type"),
|
||||
limit: int = Query(50, description="Maximum number of alerts to return"),
|
||||
enterprise_inventory_service: EnterpriseInventoryService = Depends(get_enterprise_inventory_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get real-time inventory alerts across all outlets
|
||||
|
||||
Provides actionable alerts for inventory management and stockout prevention.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await enterprise_inventory_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access network inventory alerts"
|
||||
)
|
||||
|
||||
alerts = await enterprise_inventory_service.get_inventory_alerts(parent_id)
|
||||
|
||||
# Apply filters
|
||||
if severity:
|
||||
alerts = [alert for alert in alerts if alert.get('severity') == severity]
|
||||
if alert_type:
|
||||
alerts = [alert for alert in alerts if alert.get('alert_type') == alert_type]
|
||||
|
||||
# Sort by severity (critical first) and timestamp (newest first)
|
||||
severity_order = {'critical': 1, 'high': 2, 'medium': 3, 'low': 4}
|
||||
alerts.sort(key=lambda x: (severity_order.get(x.get('severity', 'low'), 5), -int(x.get('timestamp', 0))))
|
||||
|
||||
return alerts[:limit]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get inventory alerts", error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get inventory alerts: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/tenants/{parent_id}/inventory-transfers/recommend",
|
||||
summary="Get inventory transfer recommendations")
|
||||
async def get_inventory_transfer_recommendations(
|
||||
parent_id: str,
|
||||
urgency: str = Query("medium", description="Urgency level: low, medium, high, critical"),
|
||||
enterprise_inventory_service: EnterpriseInventoryService = Depends(get_enterprise_inventory_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get AI-powered inventory transfer recommendations
|
||||
|
||||
Analyzes inventory levels across outlets and suggests optimal transfers
|
||||
to prevent stockouts and balance inventory.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await enterprise_inventory_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can request transfer recommendations"
|
||||
)
|
||||
|
||||
recommendations = await enterprise_inventory_service.get_transfer_recommendations(parent_id, urgency)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'recommendations': recommendations,
|
||||
'message': f'Generated {len(recommendations)} transfer recommendations'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get transfer recommendations", error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get recommendations: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/inventory/coverage-trends",
|
||||
summary="Get inventory coverage trends over time")
|
||||
async def get_inventory_coverage_trends(
|
||||
parent_id: str,
|
||||
days: int = Query(30, description="Number of days to analyze"),
|
||||
enterprise_inventory_service: EnterpriseInventoryService = Depends(get_enterprise_inventory_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get historical inventory coverage trends
|
||||
|
||||
Enables analysis of inventory performance over time.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await enterprise_inventory_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access coverage trends"
|
||||
)
|
||||
|
||||
trends = await enterprise_inventory_service.get_coverage_trends(parent_id, days)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'trends': trends,
|
||||
'period': f'Last {days} days'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get coverage trends", error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get coverage trends: {str(e)}")
|
||||
@@ -32,7 +32,8 @@ from app.api import (
|
||||
analytics,
|
||||
sustainability,
|
||||
audit,
|
||||
ml_insights
|
||||
ml_insights,
|
||||
enterprise_inventory
|
||||
)
|
||||
from app.api.internal_alert_trigger import router as internal_alert_trigger_router
|
||||
from app.api.internal_demo import router as internal_demo_router
|
||||
@@ -217,6 +218,7 @@ service.add_router(internal_demo.router, tags=["internal-demo"])
|
||||
service.add_router(ml_insights.router) # ML insights endpoint
|
||||
service.add_router(ml_insights.internal_router) # Internal ML insights endpoint for demo cloning
|
||||
service.add_router(internal_alert_trigger_router) # Internal alert trigger for demo cloning
|
||||
service.add_router(enterprise_inventory.router) # Enterprise inventory endpoints
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
473
services/inventory/app/services/enterprise_inventory_service.py
Normal file
473
services/inventory/app/services/enterprise_inventory_service.py
Normal file
@@ -0,0 +1,473 @@
|
||||
"""
|
||||
Enterprise Inventory Service
|
||||
Business logic for enterprise-level inventory management across outlets
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime, timedelta
|
||||
import uuid
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class EnterpriseInventoryService:
|
||||
"""
|
||||
Service for managing inventory across enterprise networks
|
||||
"""
|
||||
|
||||
def __init__(self, inventory_client, tenant_client):
|
||||
self.inventory_client = inventory_client
|
||||
self.tenant_client = tenant_client
|
||||
|
||||
async def get_child_outlets(self, parent_id: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get all child outlets for a parent tenant
|
||||
"""
|
||||
try:
|
||||
# Get child tenants from tenant service
|
||||
children = await self.tenant_client.get_child_tenants(parent_id)
|
||||
|
||||
# Enrich with location data
|
||||
enriched_outlets = []
|
||||
for child in children:
|
||||
# Get location data for this outlet
|
||||
locations = await self.tenant_client.get_tenant_locations(child['id'])
|
||||
|
||||
outlet_info = {
|
||||
'id': child['id'],
|
||||
'name': child['name'],
|
||||
'subdomain': child.get('subdomain'),
|
||||
'location': locations[0] if locations else None
|
||||
}
|
||||
enriched_outlets.append(outlet_info)
|
||||
|
||||
return enriched_outlets
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get child outlets", parent_id=parent_id, error=str(e))
|
||||
raise Exception(f"Failed to get child outlets: {str(e)}")
|
||||
|
||||
async def get_inventory_coverage(self, outlet_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get inventory coverage metrics for a specific outlet
|
||||
"""
|
||||
try:
|
||||
# Get current inventory data
|
||||
inventory_data = await self.inventory_client.get_current_inventory(outlet_id)
|
||||
|
||||
if not inventory_data or not inventory_data.get('items'):
|
||||
return None
|
||||
|
||||
# Calculate coverage metrics
|
||||
total_items = len(inventory_data['items'])
|
||||
critical_count = 0
|
||||
high_risk_count = 0
|
||||
medium_risk_count = 0
|
||||
low_risk_count = 0
|
||||
total_coverage = 0
|
||||
|
||||
for item in inventory_data['items']:
|
||||
current_stock = item.get('current_stock', 0)
|
||||
safety_stock = item.get('safety_stock', 1) # Avoid division by zero
|
||||
|
||||
if safety_stock <= 0:
|
||||
safety_stock = 1
|
||||
|
||||
coverage = min(100, (current_stock / safety_stock) * 100)
|
||||
total_coverage += coverage
|
||||
|
||||
# Determine risk level
|
||||
if coverage < 30:
|
||||
critical_count += 1
|
||||
elif coverage < 50:
|
||||
high_risk_count += 1
|
||||
elif coverage < 70:
|
||||
medium_risk_count += 1
|
||||
else:
|
||||
low_risk_count += 1
|
||||
|
||||
# Calculate average coverage
|
||||
avg_coverage = total_coverage / total_items if total_items > 0 else 0
|
||||
|
||||
# Get fulfillment rate (simplified - in real implementation this would come from orders service)
|
||||
fulfillment_rate = await self._calculate_fulfillment_rate(outlet_id)
|
||||
|
||||
# Determine overall status
|
||||
status = self._determine_inventory_status(critical_count, high_risk_count, avg_coverage)
|
||||
|
||||
return {
|
||||
'outlet_id': outlet_id,
|
||||
'outlet_name': inventory_data.get('tenant_name', f'Outlet {outlet_id}'),
|
||||
'overall_coverage': round(avg_coverage, 1),
|
||||
'critical_items_count': critical_count,
|
||||
'high_risk_items_count': high_risk_count,
|
||||
'medium_risk_items_count': medium_risk_count,
|
||||
'low_risk_items_count': low_risk_count,
|
||||
'fulfillment_rate': round(fulfillment_rate, 1),
|
||||
'last_updated': datetime.now().isoformat(),
|
||||
'status': status
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get inventory coverage", outlet_id=outlet_id, error=str(e))
|
||||
raise Exception(f"Failed to get inventory coverage: {str(e)}")
|
||||
|
||||
async def _calculate_fulfillment_rate(self, outlet_id: str) -> float:
|
||||
"""
|
||||
Calculate fulfillment rate for an outlet (simplified)
|
||||
In a real implementation, this would query the orders service
|
||||
"""
|
||||
# This is a placeholder - real implementation would:
|
||||
# 1. Get recent orders from orders service
|
||||
# 2. Calculate % successfully fulfilled
|
||||
# 3. Return the rate
|
||||
|
||||
# For demo purposes, return a reasonable default
|
||||
return 95.0
|
||||
|
||||
def _determine_inventory_status(self, critical_count: int, high_risk_count: int, avg_coverage: float) -> str:
|
||||
"""
|
||||
Determine overall inventory status based on risk factors
|
||||
"""
|
||||
if critical_count > 5 or (critical_count > 0 and avg_coverage < 40):
|
||||
return 'critical'
|
||||
elif high_risk_count > 3 or (high_risk_count > 0 and avg_coverage < 60):
|
||||
return 'warning'
|
||||
else:
|
||||
return 'normal'
|
||||
|
||||
async def get_network_inventory_summary(self, parent_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get aggregated inventory summary across the entire network
|
||||
"""
|
||||
try:
|
||||
# Get all child outlets
|
||||
child_outlets = await self.get_child_outlets(parent_id)
|
||||
|
||||
if not child_outlets:
|
||||
return {
|
||||
'total_outlets': 0,
|
||||
'average_coverage': 0,
|
||||
'average_fulfillment_rate': 0,
|
||||
'critical_outlets': 0,
|
||||
'warning_outlets': 0,
|
||||
'normal_outlets': 0,
|
||||
'total_critical_items': 0,
|
||||
'network_health_score': 0
|
||||
}
|
||||
|
||||
# Get coverage for each outlet
|
||||
coverage_data = []
|
||||
for outlet in child_outlets:
|
||||
coverage = await self.get_inventory_coverage(outlet['id'])
|
||||
if coverage:
|
||||
coverage_data.append(coverage)
|
||||
|
||||
if not coverage_data:
|
||||
return {
|
||||
'total_outlets': len(child_outlets),
|
||||
'average_coverage': 0,
|
||||
'average_fulfillment_rate': 0,
|
||||
'critical_outlets': 0,
|
||||
'warning_outlets': 0,
|
||||
'normal_outlets': len(child_outlets),
|
||||
'total_critical_items': 0,
|
||||
'network_health_score': 0
|
||||
}
|
||||
|
||||
# Calculate network metrics
|
||||
total_coverage = sum(c['overall_coverage'] for c in coverage_data)
|
||||
total_fulfillment = sum(c['fulfillment_rate'] for c in coverage_data)
|
||||
|
||||
avg_coverage = total_coverage / len(coverage_data)
|
||||
avg_fulfillment = total_fulfillment / len(coverage_data)
|
||||
|
||||
critical_outlets = sum(1 for c in coverage_data if c['status'] == 'critical')
|
||||
warning_outlets = sum(1 for c in coverage_data if c['status'] == 'warning')
|
||||
normal_outlets = sum(1 for c in coverage_data if c['status'] == 'normal')
|
||||
|
||||
total_critical_items = sum(c['critical_items_count'] for c in coverage_data)
|
||||
|
||||
# Calculate network health score (weighted average)
|
||||
network_health = round(avg_coverage * 0.6 + avg_fulfillment * 0.4, 1)
|
||||
|
||||
return {
|
||||
'total_outlets': len(child_outlets),
|
||||
'average_coverage': round(avg_coverage, 1),
|
||||
'average_fulfillment_rate': round(avg_fulfillment, 1),
|
||||
'critical_outlets': critical_outlets,
|
||||
'warning_outlets': warning_outlets,
|
||||
'normal_outlets': normal_outlets,
|
||||
'total_critical_items': total_critical_items,
|
||||
'network_health_score': network_health
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get network inventory summary", parent_id=parent_id, error=str(e))
|
||||
raise Exception(f"Failed to get network inventory summary: {str(e)}")
|
||||
|
||||
async def get_outlet_inventory_details(self, outlet_id: str, product_id: Optional[str] = None, risk_level: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Get detailed product-level inventory data for a specific outlet
|
||||
"""
|
||||
try:
|
||||
# Get current inventory data
|
||||
inventory_data = await self.inventory_client.get_current_inventory(outlet_id)
|
||||
|
||||
if not inventory_data or not inventory_data.get('items'):
|
||||
return {
|
||||
'outlet_id': outlet_id,
|
||||
'outlet_name': inventory_data.get('tenant_name', f'Outlet {outlet_id}'),
|
||||
'overall_coverage': 0,
|
||||
'products': [],
|
||||
'last_updated': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# Process product details
|
||||
products = []
|
||||
total_coverage = 0
|
||||
|
||||
for item in inventory_data['items']:
|
||||
# Filter by product_id if specified
|
||||
if product_id and item.get('product_id') != product_id:
|
||||
continue
|
||||
|
||||
current_stock = item.get('current_stock', 0)
|
||||
safety_stock = item.get('safety_stock', 1)
|
||||
|
||||
if safety_stock <= 0:
|
||||
safety_stock = 1
|
||||
|
||||
coverage = min(100, (current_stock / safety_stock) * 100)
|
||||
total_coverage += coverage
|
||||
|
||||
# Determine risk level
|
||||
if coverage < 30:
|
||||
risk = 'critical'
|
||||
elif coverage < 50:
|
||||
risk = 'high'
|
||||
elif coverage < 70:
|
||||
risk = 'medium'
|
||||
else:
|
||||
risk = 'low'
|
||||
|
||||
# Filter by risk level if specified
|
||||
if risk_level and risk != risk_level:
|
||||
continue
|
||||
|
||||
# Calculate days until stockout (simplified)
|
||||
daily_usage = item.get('average_daily_usage', 1)
|
||||
days_until_stockout = None
|
||||
|
||||
if daily_usage > 0:
|
||||
days_until_stockout = max(0, int((current_stock - safety_stock) / daily_usage))
|
||||
if days_until_stockout < 0:
|
||||
days_until_stockout = 0
|
||||
|
||||
product_detail = {
|
||||
'product_id': item.get('product_id'),
|
||||
'product_name': item.get('product_name', 'Unknown Product'),
|
||||
'current_stock': current_stock,
|
||||
'safety_stock': safety_stock,
|
||||
'coverage_percentage': round(coverage, 1),
|
||||
'risk_level': risk,
|
||||
'days_until_stockout': days_until_stockout
|
||||
}
|
||||
|
||||
products.append(product_detail)
|
||||
|
||||
# Calculate overall coverage
|
||||
avg_coverage = total_coverage / len(inventory_data['items']) if inventory_data['items'] else 0
|
||||
|
||||
return {
|
||||
'outlet_id': outlet_id,
|
||||
'outlet_name': inventory_data.get('tenant_name', f'Outlet {outlet_id}'),
|
||||
'overall_coverage': round(avg_coverage, 1),
|
||||
'products': products,
|
||||
'last_updated': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get outlet inventory details", outlet_id=outlet_id, error=str(e))
|
||||
raise Exception(f"Failed to get outlet inventory details: {str(e)}")
|
||||
|
||||
async def get_inventory_alerts(self, parent_id: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get real-time inventory alerts across all outlets
|
||||
"""
|
||||
try:
|
||||
# Get all child outlets
|
||||
child_outlets = await self.get_child_outlets(parent_id)
|
||||
|
||||
alerts = []
|
||||
|
||||
for outlet in child_outlets:
|
||||
outlet_id = outlet['id']
|
||||
outlet_name = outlet['name']
|
||||
|
||||
# Get inventory coverage for this outlet
|
||||
coverage = await self.get_inventory_coverage(outlet_id)
|
||||
|
||||
if coverage:
|
||||
# Create alerts for critical items
|
||||
if coverage['critical_items_count'] > 0:
|
||||
alerts.append({
|
||||
'alert_id': str(uuid.uuid4()),
|
||||
'outlet_id': outlet_id,
|
||||
'outlet_name': outlet_name,
|
||||
'product_id': None,
|
||||
'product_name': None,
|
||||
'alert_type': 'low_coverage',
|
||||
'severity': 'critical',
|
||||
'current_coverage': coverage['overall_coverage'],
|
||||
'threshold': 30,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'message': f"Critical inventory coverage: {coverage['overall_coverage']}% (threshold: 30%)"
|
||||
})
|
||||
|
||||
# Create alerts for high risk items
|
||||
if coverage['high_risk_items_count'] > 0:
|
||||
alerts.append({
|
||||
'alert_id': str(uuid.uuid4()),
|
||||
'outlet_id': outlet_id,
|
||||
'outlet_name': outlet_name,
|
||||
'product_id': None,
|
||||
'product_name': None,
|
||||
'alert_type': 'stockout_risk',
|
||||
'severity': 'high',
|
||||
'current_coverage': coverage['overall_coverage'],
|
||||
'threshold': 50,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'message': f"High stockout risk: {coverage['overall_coverage']}% coverage"
|
||||
})
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get inventory alerts", parent_id=parent_id, error=str(e))
|
||||
raise Exception(f"Failed to get inventory alerts: {str(e)}")
|
||||
|
||||
async def get_transfer_recommendations(self, parent_id: str, urgency: str = "medium") -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get AI-powered inventory transfer recommendations
|
||||
"""
|
||||
try:
|
||||
# Get inventory coverage for all outlets
|
||||
child_outlets = await self.get_child_outlets(parent_id)
|
||||
coverage_data = []
|
||||
|
||||
for outlet in child_outlets:
|
||||
coverage = await self.get_inventory_coverage(outlet['id'])
|
||||
if coverage:
|
||||
coverage_data.append(coverage)
|
||||
|
||||
# Simple recommendation algorithm (in real implementation, this would be more sophisticated)
|
||||
recommendations = []
|
||||
|
||||
# Find outlets with surplus and deficit
|
||||
surplus_outlets = [c for c in coverage_data if c['overall_coverage'] > 85]
|
||||
deficit_outlets = [c for c in coverage_data if c['overall_coverage'] < 60]
|
||||
|
||||
# Generate transfer recommendations
|
||||
for deficit in deficit_outlets:
|
||||
for surplus in surplus_outlets:
|
||||
# Calculate transfer amount (simplified)
|
||||
transfer_amount = min(10, (deficit['overall_coverage'] - 60) * -2) # Transfer 2% per missing %
|
||||
|
||||
if transfer_amount > 0:
|
||||
recommendations.append({
|
||||
'recommendation_id': str(uuid.uuid4()),
|
||||
'from_outlet_id': surplus['outlet_id'],
|
||||
'from_outlet_name': surplus['outlet_name'],
|
||||
'to_outlet_id': deficit['outlet_id'],
|
||||
'to_outlet_name': deficit['outlet_name'],
|
||||
'transfer_amount': transfer_amount,
|
||||
'priority': self._calculate_priority(deficit, urgency),
|
||||
'reason': f"Balance inventory: {surplus['outlet_name']} has {surplus['overall_coverage']}% coverage, {deficit['outlet_name']} has {deficit['overall_coverage']}% coverage",
|
||||
'estimated_impact': f"Improve {deficit['outlet_name']} coverage by ~{transfer_amount}%"
|
||||
})
|
||||
|
||||
# Sort by priority
|
||||
recommendations.sort(key=lambda x: x['priority'], reverse=True)
|
||||
|
||||
return recommendations
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get transfer recommendations", parent_id=parent_id, error=str(e))
|
||||
raise Exception(f"Failed to get transfer recommendations: {str(e)}")
|
||||
|
||||
def _calculate_priority(self, deficit_coverage: Dict[str, Any], urgency: str) -> int:
|
||||
"""
|
||||
Calculate priority score for transfer recommendation
|
||||
"""
|
||||
priority_scores = {
|
||||
'critical': 4,
|
||||
'high': 3,
|
||||
'medium': 2,
|
||||
'low': 1
|
||||
}
|
||||
|
||||
urgency_score = priority_scores.get(urgency, 2)
|
||||
|
||||
# Higher priority for lower coverage
|
||||
coverage_score = max(1, 5 - int(deficit_coverage['overall_coverage'] / 20))
|
||||
|
||||
return urgency_score * coverage_score
|
||||
|
||||
async def get_coverage_trends(self, parent_id: str, days: int = 30) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get historical inventory coverage trends
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would query historical data
|
||||
# For demo purposes, generate some sample trend data
|
||||
|
||||
trends = []
|
||||
end_date = datetime.now()
|
||||
|
||||
for i in range(days):
|
||||
date = end_date - timedelta(days=i)
|
||||
|
||||
# Generate sample data with some variation
|
||||
base_coverage = 75
|
||||
variation = (i % 7) - 3 # Weekly pattern
|
||||
daily_variation = (i % 3) - 1 # Daily noise
|
||||
|
||||
coverage = max(50, min(95, base_coverage + variation + daily_variation))
|
||||
|
||||
trends.append({
|
||||
'date': date.strftime('%Y-%m-%d'),
|
||||
'average_coverage': round(coverage, 1),
|
||||
'min_coverage': max(40, coverage - 15),
|
||||
'max_coverage': min(95, coverage + 10)
|
||||
})
|
||||
|
||||
# Sort by date (oldest first)
|
||||
trends.sort(key=lambda x: x['date'])
|
||||
|
||||
return trends
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get coverage trends", parent_id=parent_id, error=str(e))
|
||||
raise Exception(f"Failed to get coverage trends: {str(e)}")
|
||||
|
||||
async def verify_parent_child_relationship(self, parent_id: str, child_id: str) -> bool:
|
||||
"""
|
||||
Verify that a child tenant belongs to a parent tenant
|
||||
"""
|
||||
try:
|
||||
# Get child tenant info
|
||||
child_info = await self.tenant_client.get_tenant(child_id)
|
||||
|
||||
if child_info.get('parent_tenant_id') != parent_id:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Child tenant does not belong to specified parent"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to verify parent-child relationship", parent_id=parent_id, child_id=child_id, error=str(e))
|
||||
raise Exception(f"Failed to verify relationship: {str(e)}")
|
||||
445
services/tenant/app/api/network_alerts.py
Normal file
445
services/tenant/app/api/network_alerts.py
Normal file
@@ -0,0 +1,445 @@
|
||||
"""
|
||||
Network Alerts API
|
||||
Endpoints for aggregating and managing alerts across enterprise networks
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
|
||||
from app.services.network_alerts_service import NetworkAlertsService
|
||||
from shared.auth.tenant_access import verify_tenant_permission_dep
|
||||
from shared.clients import get_tenant_client, get_alerts_client
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# Pydantic models for request/response
|
||||
class NetworkAlert(BaseModel):
|
||||
alert_id: str = Field(..., description="Unique alert ID")
|
||||
tenant_id: str = Field(..., description="Tenant ID where alert originated")
|
||||
tenant_name: str = Field(..., description="Tenant name")
|
||||
alert_type: str = Field(..., description="Type of alert: inventory, production, delivery, etc.")
|
||||
severity: str = Field(..., description="Severity: critical, high, medium, low")
|
||||
title: str = Field(..., description="Alert title")
|
||||
message: str = Field(..., description="Alert message")
|
||||
timestamp: str = Field(..., description="Alert timestamp")
|
||||
status: str = Field(..., description="Alert status: active, acknowledged, resolved")
|
||||
source_system: str = Field(..., description="System that generated the alert")
|
||||
related_entity_id: Optional[str] = Field(None, description="ID of related entity (product, route, etc.)")
|
||||
related_entity_type: Optional[str] = Field(None, description="Type of related entity")
|
||||
|
||||
|
||||
class AlertSeveritySummary(BaseModel):
|
||||
critical_count: int = Field(..., description="Number of critical alerts")
|
||||
high_count: int = Field(..., description="Number of high severity alerts")
|
||||
medium_count: int = Field(..., description="Number of medium severity alerts")
|
||||
low_count: int = Field(..., description="Number of low severity alerts")
|
||||
total_alerts: int = Field(..., description="Total number of alerts")
|
||||
|
||||
|
||||
class AlertTypeSummary(BaseModel):
|
||||
inventory_alerts: int = Field(..., description="Inventory-related alerts")
|
||||
production_alerts: int = Field(..., description="Production-related alerts")
|
||||
delivery_alerts: int = Field(..., description="Delivery-related alerts")
|
||||
equipment_alerts: int = Field(..., description="Equipment-related alerts")
|
||||
quality_alerts: int = Field(..., description="Quality-related alerts")
|
||||
other_alerts: int = Field(..., description="Other types of alerts")
|
||||
|
||||
|
||||
class NetworkAlertsSummary(BaseModel):
|
||||
total_alerts: int = Field(..., description="Total alerts across network")
|
||||
active_alerts: int = Field(..., description="Currently active alerts")
|
||||
acknowledged_alerts: int = Field(..., description="Acknowledged alerts")
|
||||
resolved_alerts: int = Field(..., description="Resolved alerts")
|
||||
severity_summary: AlertSeveritySummary = Field(..., description="Alerts by severity")
|
||||
type_summary: AlertTypeSummary = Field(..., description="Alerts by type")
|
||||
most_recent_alert: Optional[NetworkAlert] = Field(None, description="Most recent alert")
|
||||
|
||||
|
||||
class AlertCorrelation(BaseModel):
|
||||
correlation_id: str = Field(..., description="Correlation group ID")
|
||||
primary_alert: NetworkAlert = Field(..., description="Primary alert in the group")
|
||||
related_alerts: List[NetworkAlert] = Field(..., description="Alerts correlated with primary alert")
|
||||
correlation_type: str = Field(..., description="Type of correlation: causal, temporal, spatial")
|
||||
correlation_strength: float = Field(..., description="Correlation strength (0-1)")
|
||||
impact_analysis: str = Field(..., description="Analysis of combined impact")
|
||||
|
||||
|
||||
async def get_network_alerts_service() -> NetworkAlertsService:
|
||||
"""Dependency injection for NetworkAlertsService"""
|
||||
tenant_client = get_tenant_client(settings, "tenant-service")
|
||||
alerts_client = get_alerts_client(settings, "tenant-service")
|
||||
return NetworkAlertsService(tenant_client, alerts_client)
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/network/alerts",
|
||||
response_model=List[NetworkAlert],
|
||||
summary="Get aggregated alerts across network")
|
||||
async def get_network_alerts(
|
||||
parent_id: str,
|
||||
severity: Optional[str] = Query(None, description="Filter by severity: critical, high, medium, low"),
|
||||
alert_type: Optional[str] = Query(None, description="Filter by alert type"),
|
||||
status: Optional[str] = Query(None, description="Filter by status: active, acknowledged, resolved"),
|
||||
limit: int = Query(100, description="Maximum number of alerts to return"),
|
||||
network_alerts_service: NetworkAlertsService = Depends(get_network_alerts_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get aggregated alerts across all child tenants in a parent network
|
||||
|
||||
This endpoint provides a unified view of alerts across the entire enterprise network,
|
||||
enabling network managers to identify and prioritize issues that require attention.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await network_alerts_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access network alerts"
|
||||
)
|
||||
|
||||
# Get all child tenants
|
||||
child_tenants = await network_alerts_service.get_child_tenants(parent_id)
|
||||
|
||||
if not child_tenants:
|
||||
return []
|
||||
|
||||
# Aggregate alerts from all child tenants
|
||||
all_alerts = []
|
||||
|
||||
for child in child_tenants:
|
||||
child_id = child['id']
|
||||
child_name = child['name']
|
||||
|
||||
# Get alerts for this child tenant
|
||||
child_alerts = await network_alerts_service.get_alerts_for_tenant(child_id)
|
||||
|
||||
# Enrich with tenant information and apply filters
|
||||
for alert in child_alerts:
|
||||
enriched_alert = {
|
||||
'alert_id': alert.get('alert_id', str(uuid.uuid4())),
|
||||
'tenant_id': child_id,
|
||||
'tenant_name': child_name,
|
||||
'alert_type': alert.get('alert_type', 'unknown'),
|
||||
'severity': alert.get('severity', 'medium'),
|
||||
'title': alert.get('title', 'No title'),
|
||||
'message': alert.get('message', 'No message'),
|
||||
'timestamp': alert.get('timestamp', datetime.now().isoformat()),
|
||||
'status': alert.get('status', 'active'),
|
||||
'source_system': alert.get('source_system', 'unknown'),
|
||||
'related_entity_id': alert.get('related_entity_id'),
|
||||
'related_entity_type': alert.get('related_entity_type')
|
||||
}
|
||||
|
||||
# Apply filters
|
||||
if severity and enriched_alert['severity'] != severity:
|
||||
continue
|
||||
if alert_type and enriched_alert['alert_type'] != alert_type:
|
||||
continue
|
||||
if status and enriched_alert['status'] != status:
|
||||
continue
|
||||
|
||||
all_alerts.append(enriched_alert)
|
||||
|
||||
# Sort by severity (critical first) and timestamp (newest first)
|
||||
severity_order = {'critical': 1, 'high': 2, 'medium': 3, 'low': 4}
|
||||
all_alerts.sort(key=lambda x: (severity_order.get(x['severity'], 5), -int(x['timestamp'] or 0)))
|
||||
|
||||
return all_alerts[:limit]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get network alerts", parent_id=parent_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get network alerts: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/network/alerts/summary",
|
||||
response_model=NetworkAlertsSummary,
|
||||
summary="Get network alerts summary")
|
||||
async def get_network_alerts_summary(
|
||||
parent_id: str,
|
||||
network_alerts_service: NetworkAlertsService = Depends(get_network_alerts_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get summary of alerts across the network
|
||||
|
||||
Provides aggregated metrics and statistics about alerts across all child tenants.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await network_alerts_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access network alerts summary"
|
||||
)
|
||||
|
||||
# Get all network alerts
|
||||
all_alerts = await network_alerts_service.get_network_alerts(parent_id)
|
||||
|
||||
if not all_alerts:
|
||||
return NetworkAlertsSummary(
|
||||
total_alerts=0,
|
||||
active_alerts=0,
|
||||
acknowledged_alerts=0,
|
||||
resolved_alerts=0,
|
||||
severity_summary=AlertSeveritySummary(
|
||||
critical_count=0,
|
||||
high_count=0,
|
||||
medium_count=0,
|
||||
low_count=0,
|
||||
total_alerts=0
|
||||
),
|
||||
type_summary=AlertTypeSummary(
|
||||
inventory_alerts=0,
|
||||
production_alerts=0,
|
||||
delivery_alerts=0,
|
||||
equipment_alerts=0,
|
||||
quality_alerts=0,
|
||||
other_alerts=0
|
||||
),
|
||||
most_recent_alert=None
|
||||
)
|
||||
|
||||
# Calculate summary metrics
|
||||
active_alerts = sum(1 for a in all_alerts if a['status'] == 'active')
|
||||
acknowledged_alerts = sum(1 for a in all_alerts if a['status'] == 'acknowledged')
|
||||
resolved_alerts = sum(1 for a in all_alerts if a['status'] == 'resolved')
|
||||
|
||||
# Calculate severity summary
|
||||
severity_summary = AlertSeveritySummary(
|
||||
critical_count=sum(1 for a in all_alerts if a['severity'] == 'critical'),
|
||||
high_count=sum(1 for a in all_alerts if a['severity'] == 'high'),
|
||||
medium_count=sum(1 for a in all_alerts if a['severity'] == 'medium'),
|
||||
low_count=sum(1 for a in all_alerts if a['severity'] == 'low'),
|
||||
total_alerts=len(all_alerts)
|
||||
)
|
||||
|
||||
# Calculate type summary
|
||||
type_summary = AlertTypeSummary(
|
||||
inventory_alerts=sum(1 for a in all_alerts if a['alert_type'] == 'inventory'),
|
||||
production_alerts=sum(1 for a in all_alerts if a['alert_type'] == 'production'),
|
||||
delivery_alerts=sum(1 for a in all_alerts if a['alert_type'] == 'delivery'),
|
||||
equipment_alerts=sum(1 for a in all_alerts if a['alert_type'] == 'equipment'),
|
||||
quality_alerts=sum(1 for a in all_alerts if a['alert_type'] == 'quality'),
|
||||
other_alerts=sum(1 for a in all_alerts if a['alert_type'] not in ['inventory', 'production', 'delivery', 'equipment', 'quality'])
|
||||
)
|
||||
|
||||
# Get most recent alert
|
||||
most_recent_alert = None
|
||||
if all_alerts:
|
||||
most_recent_alert = max(all_alerts, key=lambda x: x['timestamp'])
|
||||
|
||||
return NetworkAlertsSummary(
|
||||
total_alerts=len(all_alerts),
|
||||
active_alerts=active_alerts,
|
||||
acknowledged_alerts=acknowledged_alerts,
|
||||
resolved_alerts=resolved_alerts,
|
||||
severity_summary=severity_summary,
|
||||
type_summary=type_summary,
|
||||
most_recent_alert=most_recent_alert
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get network alerts summary", parent_id=parent_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get alerts summary: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/network/alerts/correlations",
|
||||
response_model=List[AlertCorrelation],
|
||||
summary="Get correlated alert groups")
|
||||
async def get_correlated_alerts(
|
||||
parent_id: str,
|
||||
min_correlation_strength: float = Query(0.7, ge=0.5, le=1.0, description="Minimum correlation strength"),
|
||||
network_alerts_service: NetworkAlertsService = Depends(get_network_alerts_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get groups of correlated alerts
|
||||
|
||||
Identifies alerts that are related or have cascading effects across the network.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await network_alerts_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access alert correlations"
|
||||
)
|
||||
|
||||
# Get all network alerts
|
||||
all_alerts = await network_alerts_service.get_network_alerts(parent_id)
|
||||
|
||||
if not all_alerts:
|
||||
return []
|
||||
|
||||
# Detect correlations (simplified for demo)
|
||||
correlations = await network_alerts_service.detect_alert_correlations(
|
||||
all_alerts, min_correlation_strength
|
||||
)
|
||||
|
||||
return correlations
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get correlated alerts", parent_id=parent_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get alert correlations: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/tenants/{parent_id}/network/alerts/{alert_id}/acknowledge",
|
||||
summary="Acknowledge network alert")
|
||||
async def acknowledge_network_alert(
|
||||
parent_id: str,
|
||||
alert_id: str,
|
||||
network_alerts_service: NetworkAlertsService = Depends(get_network_alerts_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Acknowledge a network alert
|
||||
|
||||
Marks an alert as acknowledged to indicate it's being addressed.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await network_alerts_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can acknowledge network alerts"
|
||||
)
|
||||
|
||||
# Acknowledge the alert
|
||||
result = await network_alerts_service.acknowledge_alert(parent_id, alert_id)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'alert_id': alert_id,
|
||||
'status': 'acknowledged',
|
||||
'message': 'Alert acknowledged successfully'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to acknowledge alert", parent_id=parent_id, alert_id=alert_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to acknowledge alert: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/tenants/{parent_id}/network/alerts/{alert_id}/resolve",
|
||||
summary="Resolve network alert")
|
||||
async def resolve_network_alert(
|
||||
parent_id: str,
|
||||
alert_id: str,
|
||||
resolution_notes: Optional[str] = Query(None, description="Notes about resolution"),
|
||||
network_alerts_service: NetworkAlertsService = Depends(get_network_alerts_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Resolve a network alert
|
||||
|
||||
Marks an alert as resolved after the issue has been addressed.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await network_alerts_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can resolve network alerts"
|
||||
)
|
||||
|
||||
# Resolve the alert
|
||||
result = await network_alerts_service.resolve_alert(parent_id, alert_id, resolution_notes)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'alert_id': alert_id,
|
||||
'status': 'resolved',
|
||||
'resolution_notes': resolution_notes,
|
||||
'message': 'Alert resolved successfully'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to resolve alert", parent_id=parent_id, alert_id=alert_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to resolve alert: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/network/alerts/trends",
|
||||
summary="Get alert trends over time")
|
||||
async def get_alert_trends(
|
||||
parent_id: str,
|
||||
days: int = Query(30, ge=7, le=365, description="Number of days to analyze"),
|
||||
network_alerts_service: NetworkAlertsService = Depends(get_network_alerts_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get alert trends over time
|
||||
|
||||
Analyzes how alert patterns change over time to identify systemic issues.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await network_alerts_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access alert trends"
|
||||
)
|
||||
|
||||
# Get alert trends
|
||||
trends = await network_alerts_service.get_alert_trends(parent_id, days)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'trends': trends,
|
||||
'period': f'Last {days} days'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get alert trends", parent_id=parent_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get alert trends: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{parent_id}/network/alerts/prioritization",
|
||||
summary="Get prioritized alerts")
|
||||
async def get_prioritized_alerts(
|
||||
parent_id: str,
|
||||
limit: int = Query(10, description="Maximum number of alerts to return"),
|
||||
network_alerts_service: NetworkAlertsService = Depends(get_network_alerts_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get prioritized alerts based on impact and urgency
|
||||
|
||||
Uses AI to prioritize alerts based on potential business impact and urgency.
|
||||
"""
|
||||
try:
|
||||
# Verify this is a parent tenant
|
||||
tenant_info = await network_alerts_service.tenant_client.get_tenant(parent_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access prioritized alerts"
|
||||
)
|
||||
|
||||
# Get prioritized alerts
|
||||
prioritized_alerts = await network_alerts_service.get_prioritized_alerts(parent_id, limit)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'prioritized_alerts': prioritized_alerts,
|
||||
'message': f'Top {len(prioritized_alerts)} prioritized alerts'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get prioritized alerts", parent_id=parent_id, error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get prioritized alerts: {str(e)}")
|
||||
|
||||
|
||||
# Import datetime at runtime to avoid circular imports
|
||||
from datetime import datetime, timedelta
|
||||
import uuid
|
||||
@@ -7,7 +7,7 @@ from fastapi import FastAPI
|
||||
from sqlalchemy import text
|
||||
from app.core.config import settings
|
||||
from app.core.database import database_manager
|
||||
from app.api import tenants, tenant_members, tenant_operations, webhooks, plans, subscription, tenant_settings, whatsapp_admin, usage_forecast, enterprise_upgrade, tenant_locations, tenant_hierarchy, internal_demo
|
||||
from app.api import tenants, tenant_members, tenant_operations, webhooks, plans, subscription, tenant_settings, whatsapp_admin, usage_forecast, enterprise_upgrade, tenant_locations, tenant_hierarchy, internal_demo, network_alerts
|
||||
from shared.service_base import StandardFastAPIService
|
||||
|
||||
|
||||
@@ -157,6 +157,7 @@ service.add_router(tenant_locations.router, tags=["tenant-locations"]) # Tenant
|
||||
service.add_router(internal_demo.router, tags=["internal-demo"]) # Internal demo data cloning
|
||||
service.add_router(tenant_hierarchy.router, tags=["tenant-hierarchy"]) # Tenant hierarchy endpoints
|
||||
service.add_router(internal_demo.router, tags=["internal-demo"]) # Internal demo data cloning
|
||||
service.add_router(network_alerts.router, tags=["network-alerts"]) # Network alerts aggregation endpoints
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
365
services/tenant/app/services/network_alerts_service.py
Normal file
365
services/tenant/app/services/network_alerts_service.py
Normal file
@@ -0,0 +1,365 @@
|
||||
# services/tenant/app/services/network_alerts_service.py
|
||||
"""
|
||||
Network Alerts Service
|
||||
Business logic for aggregating and managing alerts across enterprise networks
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime, timedelta
|
||||
import uuid
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class NetworkAlertsService:
|
||||
"""
|
||||
Service for aggregating and managing alerts across enterprise networks
|
||||
"""
|
||||
|
||||
def __init__(self, tenant_client, alerts_client):
|
||||
self.tenant_client = tenant_client
|
||||
self.alerts_client = alerts_client
|
||||
|
||||
async def get_child_tenants(self, parent_id: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get all child tenants for a parent tenant
|
||||
"""
|
||||
try:
|
||||
# Get child tenants from tenant service
|
||||
children = await self.tenant_client.get_child_tenants(parent_id)
|
||||
|
||||
# Enrich with tenant details
|
||||
enriched_children = []
|
||||
for child in children:
|
||||
child_details = await self.tenant_client.get_tenant(child['id'])
|
||||
enriched_children.append({
|
||||
'id': child['id'],
|
||||
'name': child_details.get('name', f"Outlet {child['id']}"),
|
||||
'subdomain': child_details.get('subdomain'),
|
||||
'city': child_details.get('city')
|
||||
})
|
||||
|
||||
return enriched_children
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get child tenants", parent_id=parent_id, error=str(e))
|
||||
raise Exception(f"Failed to get child tenants: {str(e)}")
|
||||
|
||||
async def get_alerts_for_tenant(self, tenant_id: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get alerts for a specific tenant
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would call the alert service
|
||||
# For demo purposes, we'll simulate some alert data
|
||||
|
||||
# Simulate different types of alerts based on tenant type
|
||||
simulated_alerts = []
|
||||
|
||||
# Generate some sample alerts
|
||||
alert_types = ['inventory', 'production', 'delivery', 'equipment', 'quality']
|
||||
severities = ['critical', 'high', 'medium', 'low']
|
||||
|
||||
for i in range(3): # Generate 3 sample alerts per tenant
|
||||
alert = {
|
||||
'alert_id': str(uuid.uuid4()),
|
||||
'tenant_id': tenant_id,
|
||||
'alert_type': alert_types[i % len(alert_types)],
|
||||
'severity': severities[i % len(severities)],
|
||||
'title': f"{alert_types[i % len(alert_types)].title()} Alert Detected",
|
||||
'message': f"Sample {alert_types[i % len(alert_types)]} alert for tenant {tenant_id}",
|
||||
'timestamp': (datetime.now() - timedelta(hours=i)).isoformat(),
|
||||
'status': 'active' if i < 2 else 'resolved',
|
||||
'source_system': f"{alert_types[i % len(alert_types)]}-service",
|
||||
'related_entity_id': f"entity-{i+1}",
|
||||
'related_entity_type': alert_types[i % len(alert_types)]
|
||||
}
|
||||
simulated_alerts.append(alert)
|
||||
|
||||
return simulated_alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get alerts for tenant", tenant_id=tenant_id, error=str(e))
|
||||
raise Exception(f"Failed to get alerts: {str(e)}")
|
||||
|
||||
async def get_network_alerts(self, parent_id: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get all alerts across the network
|
||||
"""
|
||||
try:
|
||||
# Get all child tenants
|
||||
child_tenants = await self.get_child_tenants(parent_id)
|
||||
|
||||
# Aggregate alerts from all child tenants
|
||||
all_alerts = []
|
||||
|
||||
for child in child_tenants:
|
||||
child_id = child['id']
|
||||
child_alerts = await self.get_alerts_for_tenant(child_id)
|
||||
all_alerts.extend(child_alerts)
|
||||
|
||||
return all_alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get network alerts", parent_id=parent_id, error=str(e))
|
||||
raise Exception(f"Failed to get network alerts: {str(e)}")
|
||||
|
||||
async def detect_alert_correlations(
|
||||
self,
|
||||
alerts: List[Dict[str, Any]],
|
||||
min_correlation_strength: float = 0.7
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Detect correlations between alerts
|
||||
"""
|
||||
try:
|
||||
# Simple correlation detection (in real implementation, this would be more sophisticated)
|
||||
correlations = []
|
||||
|
||||
# Group alerts by type and time proximity
|
||||
alert_groups = {}
|
||||
|
||||
for alert in alerts:
|
||||
alert_type = alert['alert_type']
|
||||
timestamp = alert['timestamp']
|
||||
|
||||
# Use timestamp as key for grouping (simplified)
|
||||
if alert_type not in alert_groups:
|
||||
alert_groups[alert_type] = []
|
||||
|
||||
alert_groups[alert_type].append(alert)
|
||||
|
||||
# Create correlation groups
|
||||
for alert_type, group in alert_groups.items():
|
||||
if len(group) >= 2: # Only create correlations for groups with 2+ alerts
|
||||
primary_alert = group[0]
|
||||
related_alerts = group[1:]
|
||||
|
||||
correlation = {
|
||||
'correlation_id': str(uuid.uuid4()),
|
||||
'primary_alert': primary_alert,
|
||||
'related_alerts': related_alerts,
|
||||
'correlation_type': 'temporal',
|
||||
'correlation_strength': 0.85,
|
||||
'impact_analysis': f"Multiple {alert_type} alerts detected within short timeframe"
|
||||
}
|
||||
|
||||
if correlation['correlation_strength'] >= min_correlation_strength:
|
||||
correlations.append(correlation)
|
||||
|
||||
return correlations
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to detect alert correlations", error=str(e))
|
||||
raise Exception(f"Failed to detect correlations: {str(e)}")
|
||||
|
||||
async def acknowledge_alert(self, parent_id: str, alert_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Acknowledge an alert
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would update the alert status
|
||||
# For demo purposes, we'll simulate the operation
|
||||
|
||||
logger.info("Alert acknowledged", parent_id=parent_id, alert_id=alert_id)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'alert_id': alert_id,
|
||||
'status': 'acknowledged'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to acknowledge alert", parent_id=parent_id, alert_id=alert_id, error=str(e))
|
||||
raise Exception(f"Failed to acknowledge alert: {str(e)}")
|
||||
|
||||
async def resolve_alert(self, parent_id: str, alert_id: str, resolution_notes: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Resolve an alert
|
||||
"""
|
||||
try:
|
||||
# In a real implementation, this would update the alert status
|
||||
# For demo purposes, we'll simulate the operation
|
||||
|
||||
logger.info("Alert resolved", parent_id=parent_id, alert_id=alert_id, notes=resolution_notes)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'alert_id': alert_id,
|
||||
'status': 'resolved',
|
||||
'resolution_notes': resolution_notes
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to resolve alert", parent_id=parent_id, alert_id=alert_id, error=str(e))
|
||||
raise Exception(f"Failed to resolve alert: {str(e)}")
|
||||
|
||||
async def get_alert_trends(self, parent_id: str, days: int = 30) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get alert trends over time
|
||||
"""
|
||||
try:
|
||||
# Simulate trend data
|
||||
trends = []
|
||||
end_date = datetime.now()
|
||||
|
||||
# Generate daily trend data
|
||||
for i in range(days):
|
||||
date = end_date - timedelta(days=i)
|
||||
|
||||
# Simulate varying alert counts with weekly pattern
|
||||
base_count = 5
|
||||
weekly_variation = int((i % 7) * 1.5) # Higher on weekdays
|
||||
daily_noise = (i % 3 - 1) # Daily noise
|
||||
|
||||
alert_count = max(1, base_count + weekly_variation + daily_noise)
|
||||
|
||||
trends.append({
|
||||
'date': date.strftime('%Y-%m-%d'),
|
||||
'total_alerts': alert_count,
|
||||
'critical_alerts': max(0, int(alert_count * 0.1)),
|
||||
'high_alerts': max(0, int(alert_count * 0.2)),
|
||||
'medium_alerts': max(0, int(alert_count * 0.4)),
|
||||
'low_alerts': max(0, int(alert_count * 0.3))
|
||||
})
|
||||
|
||||
# Sort by date (oldest first)
|
||||
trends.sort(key=lambda x: x['date'])
|
||||
|
||||
return trends
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get alert trends", parent_id=parent_id, error=str(e))
|
||||
raise Exception(f"Failed to get alert trends: {str(e)}")
|
||||
|
||||
async def get_prioritized_alerts(self, parent_id: str, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get prioritized alerts based on impact and urgency
|
||||
"""
|
||||
try:
|
||||
# Get all network alerts
|
||||
all_alerts = await self.get_network_alerts(parent_id)
|
||||
|
||||
if not all_alerts:
|
||||
return []
|
||||
|
||||
# Simple prioritization (in real implementation, this would use ML)
|
||||
# Priority based on severity and recency
|
||||
severity_scores = {'critical': 4, 'high': 3, 'medium': 2, 'low': 1}
|
||||
|
||||
for alert in all_alerts:
|
||||
severity_score = severity_scores.get(alert['severity'], 1)
|
||||
# Add recency score (newer alerts get higher priority)
|
||||
timestamp = datetime.fromisoformat(alert['timestamp'])
|
||||
recency_score = min(3, (datetime.now() - timestamp).days + 1)
|
||||
|
||||
alert['priority_score'] = severity_score * recency_score
|
||||
|
||||
# Sort by priority score (highest first)
|
||||
all_alerts.sort(key=lambda x: x['priority_score'], reverse=True)
|
||||
|
||||
# Return top N alerts
|
||||
prioritized = all_alerts[:limit]
|
||||
|
||||
# Remove priority score from response
|
||||
for alert in prioritized:
|
||||
alert.pop('priority_score', None)
|
||||
|
||||
return prioritized
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get prioritized alerts", parent_id=parent_id, error=str(e))
|
||||
raise Exception(f"Failed to get prioritized alerts: {str(e)}")
|
||||
|
||||
|
||||
# Helper class for alert analysis
|
||||
class AlertAnalyzer:
|
||||
"""
|
||||
Helper class for analyzing alert patterns
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def calculate_alert_severity_score(alert: Dict[str, Any]) -> float:
|
||||
"""
|
||||
Calculate severity score for an alert
|
||||
"""
|
||||
severity_scores = {'critical': 1.0, 'high': 0.75, 'medium': 0.5, 'low': 0.25}
|
||||
return severity_scores.get(alert['severity'], 0.25)
|
||||
|
||||
@staticmethod
|
||||
def detect_alert_patterns(alerts: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
Detect patterns in alert data
|
||||
"""
|
||||
if not alerts:
|
||||
return {'patterns': [], 'anomalies': []}
|
||||
|
||||
patterns = []
|
||||
anomalies = []
|
||||
|
||||
# Simple pattern detection
|
||||
alert_types = [a['alert_type'] for a in alerts]
|
||||
type_counts = {}
|
||||
|
||||
for alert_type in alert_types:
|
||||
type_counts[alert_type] = type_counts.get(alert_type, 0) + 1
|
||||
|
||||
# Detect if one type dominates
|
||||
total_alerts = len(alerts)
|
||||
for alert_type, count in type_counts.items():
|
||||
if count / total_alerts > 0.6: # More than 60% of one type
|
||||
patterns.append({
|
||||
'type': 'dominant_alert_type',
|
||||
'pattern': f'{alert_type} alerts dominate ({count}/{total_alerts})',
|
||||
'confidence': 0.85
|
||||
})
|
||||
|
||||
return {'patterns': patterns, 'anomalies': anomalies}
|
||||
|
||||
|
||||
# Helper class for alert correlation
|
||||
class AlertCorrelator:
|
||||
"""
|
||||
Helper class for correlating alerts
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def calculate_correlation_strength(alert1: Dict[str, Any], alert2: Dict[str, Any]) -> float:
|
||||
"""
|
||||
Calculate correlation strength between two alerts
|
||||
"""
|
||||
# Simple correlation based on type and time proximity
|
||||
same_type = 1.0 if alert1['alert_type'] == alert2['alert_type'] else 0.3
|
||||
|
||||
time1 = datetime.fromisoformat(alert1['timestamp'])
|
||||
time2 = datetime.fromisoformat(alert2['timestamp'])
|
||||
time_diff_hours = abs((time2 - time1).total_seconds() / 3600)
|
||||
|
||||
# Time proximity score (higher for closer times)
|
||||
time_proximity = max(0, 1.0 - min(1.0, time_diff_hours / 24)) # Decays over 24 hours
|
||||
|
||||
return same_type * time_proximity
|
||||
|
||||
|
||||
# Helper class for alert prioritization
|
||||
class AlertPrioritizer:
|
||||
"""
|
||||
Helper class for prioritizing alerts
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def calculate_priority_score(alert: Dict[str, Any]) -> float:
|
||||
"""
|
||||
Calculate priority score for an alert
|
||||
"""
|
||||
# Base score from severity
|
||||
severity_scores = {'critical': 100, 'high': 75, 'medium': 50, 'low': 25}
|
||||
base_score = severity_scores.get(alert['severity'], 25)
|
||||
|
||||
# Add recency bonus (newer alerts get higher priority)
|
||||
timestamp = datetime.fromisoformat(alert['timestamp'])
|
||||
hours_old = (datetime.now() - timestamp).total_seconds() / 3600
|
||||
recency_bonus = max(0, 50 - hours_old) # Decays over 50 hours
|
||||
|
||||
return base_score + recency_bonus
|
||||
Reference in New Issue
Block a user