New alert system and panel de control page
This commit is contained in:
@@ -55,23 +55,26 @@ class HeadlineData(BaseModel):
|
||||
|
||||
|
||||
class HealthChecklistItem(BaseModel):
|
||||
"""Individual item in health checklist"""
|
||||
icon: str = Field(..., description="Icon name: check, warning, alert")
|
||||
"""Individual item in tri-state health checklist"""
|
||||
icon: str = Field(..., description="Icon name: check, warning, alert, ai_handled")
|
||||
text: Optional[str] = Field(None, description="Deprecated: Use textKey instead")
|
||||
textKey: Optional[str] = Field(None, description="i18n translation key")
|
||||
textParams: Optional[Dict[str, Any]] = Field(None, description="Parameters for i18n translation")
|
||||
actionRequired: bool = Field(..., description="Whether action is required")
|
||||
status: str = Field(..., description="Tri-state status: good, ai_handled, needs_you")
|
||||
actionPath: Optional[str] = Field(None, description="Path to navigate for action")
|
||||
|
||||
|
||||
class BakeryHealthStatusResponse(BaseModel):
|
||||
"""Overall bakery health status"""
|
||||
"""Overall bakery health status with tri-state checklist"""
|
||||
status: str = Field(..., description="Health status: green, yellow, red")
|
||||
headline: HeadlineData = Field(..., description="i18n-ready status headline")
|
||||
lastOrchestrationRun: Optional[str] = Field(None, description="ISO timestamp of last orchestration")
|
||||
nextScheduledRun: str = Field(..., description="ISO timestamp of next scheduled run")
|
||||
checklistItems: List[HealthChecklistItem] = Field(..., description="Status checklist")
|
||||
checklistItems: List[HealthChecklistItem] = Field(..., description="Tri-state status checklist")
|
||||
criticalIssues: int = Field(..., description="Count of critical issues")
|
||||
pendingActions: int = Field(..., description="Count of pending actions")
|
||||
aiPreventedIssues: int = Field(0, description="Count of issues AI prevented")
|
||||
|
||||
|
||||
class ReasoningInputs(BaseModel):
|
||||
@@ -207,10 +210,10 @@ async def get_bakery_health_status(
|
||||
db: AsyncSession = Depends(get_db)
|
||||
) -> BakeryHealthStatusResponse:
|
||||
"""
|
||||
Get overall bakery health status
|
||||
Get overall bakery health status with tri-state checklist
|
||||
|
||||
This is the top-level indicator showing if the bakery is running smoothly
|
||||
or if there are issues requiring attention.
|
||||
or if there are issues requiring attention. Includes AI-prevented issues.
|
||||
"""
|
||||
try:
|
||||
# Try to get from cache
|
||||
@@ -227,11 +230,19 @@ async def get_bakery_health_status(
|
||||
|
||||
async def fetch_alerts():
|
||||
try:
|
||||
alerts_data = await alerts_client.get_alerts_summary(tenant_id) or {}
|
||||
return alerts_data.get("critical_count", 0)
|
||||
alerts_data = await alerts_client.get_alerts(tenant_id, limit=100) or {}
|
||||
alerts_list = alerts_data.get("alerts", [])
|
||||
|
||||
# Count critical alerts
|
||||
critical_count = sum(1 for a in alerts_list if a.get('priority_level') == 'CRITICAL')
|
||||
|
||||
# Count AI prevented issues
|
||||
prevented_count = sum(1 for a in alerts_list if a.get('type_class') == 'prevented_issue')
|
||||
|
||||
return critical_count, prevented_count, alerts_list
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch alerts: {e}")
|
||||
return 0
|
||||
return 0, 0, []
|
||||
|
||||
async def fetch_pending_pos():
|
||||
try:
|
||||
@@ -260,24 +271,28 @@ async def get_bakery_health_status(
|
||||
return 0
|
||||
|
||||
# Execute all fetches in parallel
|
||||
critical_alerts, pending_approvals, production_delays, out_of_stock_count = await asyncio.gather(
|
||||
alerts_result, pending_approvals, production_delays, out_of_stock_count = await asyncio.gather(
|
||||
fetch_alerts(),
|
||||
fetch_pending_pos(),
|
||||
fetch_production_delays(),
|
||||
fetch_inventory()
|
||||
)
|
||||
|
||||
critical_alerts, ai_prevented_count, all_alerts = alerts_result
|
||||
|
||||
# System errors (would come from monitoring system)
|
||||
system_errors = 0
|
||||
|
||||
# Calculate health status
|
||||
# Calculate health status with tri-state checklist
|
||||
health_status = await dashboard_service.get_bakery_health_status(
|
||||
tenant_id=tenant_id,
|
||||
critical_alerts=critical_alerts,
|
||||
pending_approvals=pending_approvals,
|
||||
production_delays=production_delays,
|
||||
out_of_stock_count=out_of_stock_count,
|
||||
system_errors=system_errors
|
||||
system_errors=system_errors,
|
||||
ai_prevented_count=ai_prevented_count,
|
||||
action_needed_alerts=all_alerts
|
||||
)
|
||||
|
||||
# Cache the result
|
||||
@@ -501,6 +516,116 @@ async def get_production_timeline(
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/unified-action-queue")
|
||||
async def get_unified_action_queue(
|
||||
tenant_id: str,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get unified action queue with time-based grouping
|
||||
|
||||
Combines all alerts (PO approvals, delivery tracking, production, etc.)
|
||||
into URGENT (<6h), TODAY (<24h), and THIS WEEK (<7d) sections.
|
||||
"""
|
||||
try:
|
||||
dashboard_service = DashboardService(db)
|
||||
|
||||
# Fetch all alerts from alert processor
|
||||
alerts_data = await alerts_client.get_alerts(tenant_id, limit=100) or {}
|
||||
alerts = alerts_data.get("alerts", [])
|
||||
|
||||
# Build unified queue
|
||||
action_queue = await dashboard_service.get_unified_action_queue(
|
||||
tenant_id=tenant_id,
|
||||
alerts=alerts
|
||||
)
|
||||
|
||||
return action_queue
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting unified action queue: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/execution-progress")
|
||||
async def get_execution_progress(
|
||||
tenant_id: str,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get execution progress for today's plan
|
||||
|
||||
Shows plan vs actual for production batches, deliveries, and approvals
|
||||
"""
|
||||
try:
|
||||
dashboard_service = DashboardService(db)
|
||||
|
||||
# Fetch today's data in parallel
|
||||
async def fetch_todays_batches():
|
||||
try:
|
||||
batch_data = await production_client.get_todays_batches(tenant_id)
|
||||
if batch_data:
|
||||
return batch_data.get("batches", [])
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch today's batches: {e}")
|
||||
return []
|
||||
|
||||
async def fetch_expected_deliveries():
|
||||
try:
|
||||
# Get POs with expected deliveries today
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100)
|
||||
if pos_result and isinstance(pos_result, list):
|
||||
today_start = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
today_end = today_start.replace(hour=23, minute=59, second=59)
|
||||
|
||||
deliveries_today = []
|
||||
for po in pos_result:
|
||||
expected_date = po.get("expected_delivery_date")
|
||||
if expected_date:
|
||||
if isinstance(expected_date, str):
|
||||
expected_date = datetime.fromisoformat(expected_date.replace('Z', '+00:00'))
|
||||
if today_start <= expected_date <= today_end:
|
||||
deliveries_today.append(po)
|
||||
|
||||
return deliveries_today
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch expected deliveries: {e}")
|
||||
return []
|
||||
|
||||
async def fetch_pending_approvals():
|
||||
try:
|
||||
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100) or []
|
||||
return len(po_data) if isinstance(po_data, list) else 0
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch pending approvals: {e}")
|
||||
return 0
|
||||
|
||||
# Execute in parallel
|
||||
todays_batches, expected_deliveries, pending_approvals = await asyncio.gather(
|
||||
fetch_todays_batches(),
|
||||
fetch_expected_deliveries(),
|
||||
fetch_pending_approvals()
|
||||
)
|
||||
|
||||
# Calculate progress
|
||||
progress = await dashboard_service.get_execution_progress(
|
||||
tenant_id=tenant_id,
|
||||
todays_batches=todays_batches,
|
||||
expected_deliveries=expected_deliveries,
|
||||
pending_approvals=pending_approvals
|
||||
)
|
||||
|
||||
return progress
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting execution progress: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/insights", response_model=InsightsResponse)
|
||||
async def get_insights(
|
||||
tenant_id: str,
|
||||
@@ -575,35 +700,32 @@ async def get_insights(
|
||||
|
||||
async def fetch_savings():
|
||||
try:
|
||||
# Get recent POs (last 7 days) and sum up optimization savings
|
||||
seven_days_ago = datetime.now(timezone.utc) - timedelta(days=7)
|
||||
# Get prevented issue savings from alert analytics
|
||||
analytics = await alerts_client.get_dashboard_analytics(tenant_id, days=7)
|
||||
|
||||
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=200)
|
||||
if pos_result and isinstance(pos_result, list):
|
||||
weekly_savings = 0
|
||||
# Calculate savings from price optimization
|
||||
for po in pos_result:
|
||||
# Check if PO was created in last 7 days
|
||||
created_at = po.get("created_at")
|
||||
if created_at:
|
||||
if isinstance(created_at, str):
|
||||
created_at = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
|
||||
if created_at >= seven_days_ago:
|
||||
# Sum up savings from optimization
|
||||
optimization_data = po.get("optimization_data", {})
|
||||
if isinstance(optimization_data, dict):
|
||||
savings = optimization_data.get("savings", 0) or 0
|
||||
weekly_savings += float(savings)
|
||||
if analytics:
|
||||
weekly_savings = analytics.get('estimated_savings_eur', 0)
|
||||
prevented_count = analytics.get('prevented_issues_count', 0)
|
||||
|
||||
# Calculate trend from period comparison
|
||||
period_comparison = analytics.get('period_comparison', {})
|
||||
current_prevented = period_comparison.get('current_prevented', 0)
|
||||
previous_prevented = period_comparison.get('previous_prevented', 0)
|
||||
|
||||
trend_percentage = 0
|
||||
if previous_prevented > 0:
|
||||
trend_percentage = ((current_prevented - previous_prevented) / previous_prevented) * 100
|
||||
|
||||
# Default trend percentage (would need historical data for real trend)
|
||||
return {
|
||||
"weekly_savings": round(weekly_savings, 2),
|
||||
"trend_percentage": 12 if weekly_savings > 0 else 0
|
||||
"trend_percentage": round(trend_percentage, 1),
|
||||
"prevented_count": prevented_count
|
||||
}
|
||||
return {"weekly_savings": 0, "trend_percentage": 0}
|
||||
|
||||
return {"weekly_savings": 0, "trend_percentage": 0, "prevented_count": 0}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to calculate savings data: {e}")
|
||||
return {"weekly_savings": 0, "trend_percentage": 0}
|
||||
return {"weekly_savings": 0, "trend_percentage": 0, "prevented_count": 0}
|
||||
|
||||
# Execute all fetches in parallel
|
||||
sustainability_data, inventory_data, delivery_data, savings_data = await asyncio.gather(
|
||||
|
||||
Reference in New Issue
Block a user