New alert service

This commit is contained in:
Urtzi Alfaro
2025-12-05 20:07:01 +01:00
parent 1fe3a73549
commit 667e6e0404
393 changed files with 26002 additions and 61033 deletions

View File

@@ -1,4 +1,3 @@
from .orchestration import router as orchestration_router
from .dashboard import router as dashboard_router
__all__ = ["orchestration_router", "dashboard_router"]
__all__ = ["orchestration_router"]

View File

@@ -1,800 +0,0 @@
# ================================================================
# services/orchestrator/app/api/dashboard.py
# ================================================================
"""
Dashboard API endpoints for JTBD-aligned bakery dashboard
"""
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy.ext.asyncio import AsyncSession
from typing import Dict, Any, List, Optional
from pydantic import BaseModel, Field
from datetime import datetime
import structlog
import asyncio
from app.core.database import get_db
from app.core.config import settings
from ..services.dashboard_service import DashboardService
from ..utils.cache import get_cached, set_cached, delete_pattern
from shared.clients import (
get_inventory_client,
get_production_client,
get_alerts_client,
ProductionServiceClient,
InventoryServiceClient,
AlertsServiceClient
)
from shared.clients.procurement_client import ProcurementServiceClient
logger = structlog.get_logger()
# Initialize service clients
inventory_client = get_inventory_client(settings, "orchestrator")
production_client = get_production_client(settings, "orchestrator")
procurement_client = ProcurementServiceClient(settings)
alerts_client = get_alerts_client(settings, "orchestrator")
router = APIRouter(prefix="/api/v1/tenants/{tenant_id}/dashboard", tags=["dashboard"])
# ============================================================
# Response Models
# ============================================================
class I18nData(BaseModel):
"""i18n translation data"""
key: str = Field(..., description="i18n translation key")
params: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Parameters for translation")
class HeadlineData(BaseModel):
"""i18n-ready headline data"""
key: str = Field(..., description="i18n translation key")
params: Dict[str, Any] = Field(default_factory=dict, description="Parameters for translation")
class HealthChecklistItem(BaseModel):
"""Individual item in tri-state health checklist"""
icon: str = Field(..., description="Icon name: check, warning, alert, ai_handled")
text: Optional[str] = Field(None, description="Deprecated: Use textKey instead")
textKey: Optional[str] = Field(None, description="i18n translation key")
textParams: Optional[Dict[str, Any]] = Field(None, description="Parameters for i18n translation")
actionRequired: bool = Field(..., description="Whether action is required")
status: str = Field(..., description="Tri-state status: good, ai_handled, needs_you")
actionPath: Optional[str] = Field(None, description="Path to navigate for action")
class BakeryHealthStatusResponse(BaseModel):
"""Overall bakery health status with tri-state checklist"""
status: str = Field(..., description="Health status: green, yellow, red")
headline: HeadlineData = Field(..., description="i18n-ready status headline")
lastOrchestrationRun: Optional[str] = Field(None, description="ISO timestamp of last orchestration")
nextScheduledRun: str = Field(..., description="ISO timestamp of next scheduled run")
checklistItems: List[HealthChecklistItem] = Field(..., description="Tri-state status checklist")
criticalIssues: int = Field(..., description="Count of critical issues")
pendingActions: int = Field(..., description="Count of pending actions")
aiPreventedIssues: int = Field(0, description="Count of issues AI prevented")
class ReasoningInputs(BaseModel):
"""Inputs used by orchestrator for decision making"""
customerOrders: int = Field(..., description="Number of customer orders analyzed")
historicalDemand: bool = Field(..., description="Whether historical data was used")
inventoryLevels: bool = Field(..., description="Whether inventory levels were considered")
aiInsights: bool = Field(..., description="Whether AI insights were used")
class PurchaseOrderSummary(BaseModel):
"""Summary of a purchase order for dashboard"""
supplierName: str
itemCategories: List[str]
totalAmount: float
class ProductionBatchSummary(BaseModel):
"""Summary of a production batch for dashboard"""
productName: str
quantity: float
readyByTime: str
class OrchestrationSummaryResponse(BaseModel):
"""What the orchestrator did for the user"""
runTimestamp: Optional[str] = Field(None, description="When the orchestration ran")
runNumber: Optional[str] = Field(None, description="Run number identifier")
status: str = Field(..., description="Run status")
purchaseOrdersCreated: int = Field(..., description="Number of POs created")
purchaseOrdersSummary: List[PurchaseOrderSummary] = Field(default_factory=list)
productionBatchesCreated: int = Field(..., description="Number of batches created")
productionBatchesSummary: List[ProductionBatchSummary] = Field(default_factory=list)
reasoningInputs: ReasoningInputs
userActionsRequired: int = Field(..., description="Number of actions needing approval")
durationSeconds: Optional[int] = Field(None, description="How long orchestration took")
aiAssisted: bool = Field(False, description="Whether AI insights were used")
message_i18n: Optional[I18nData] = Field(None, description="i18n data for message")
class ActionButton(BaseModel):
"""Action button configuration"""
label_i18n: I18nData = Field(..., description="i18n data for button label")
type: str = Field(..., description="Button type: primary, secondary, tertiary")
action: str = Field(..., description="Action identifier")
class ActionItem(BaseModel):
"""Individual action requiring user attention"""
id: str
type: str = Field(..., description="Action type")
urgency: str = Field(..., description="Urgency: critical, important, normal")
title: Optional[str] = Field(None, description="Legacy field for alerts")
title_i18n: Optional[I18nData] = Field(None, description="i18n data for title")
subtitle: Optional[str] = Field(None, description="Legacy field for alerts")
subtitle_i18n: Optional[I18nData] = Field(None, description="i18n data for subtitle")
reasoning: Optional[str] = Field(None, description="Legacy field for alerts")
reasoning_i18n: Optional[I18nData] = Field(None, description="i18n data for reasoning")
consequence_i18n: I18nData = Field(..., description="i18n data for consequence")
reasoning_data: Optional[Dict[str, Any]] = Field(None, description="Structured reasoning data")
amount: Optional[float] = Field(None, description="Amount for financial actions")
currency: Optional[str] = Field(None, description="Currency code")
actions: List[ActionButton]
estimatedTimeMinutes: int
class ActionQueueResponse(BaseModel):
"""Prioritized queue of actions"""
actions: List[ActionItem]
totalActions: int
criticalCount: int
importantCount: int
class ProductionTimelineItem(BaseModel):
"""Individual production batch in timeline"""
id: str
batchNumber: str
productName: str
quantity: float
unit: str
plannedStartTime: Optional[str]
plannedEndTime: Optional[str]
actualStartTime: Optional[str]
status: str
statusIcon: str
statusText: str
progress: int = Field(..., ge=0, le=100, description="Progress percentage")
readyBy: Optional[str]
priority: str
reasoning_data: Optional[Dict[str, Any]] = Field(None, description="Structured reasoning data")
reasoning_i18n: Optional[I18nData] = Field(None, description="i18n data for reasoning")
status_i18n: Optional[I18nData] = Field(None, description="i18n data for status")
class ProductionTimelineResponse(BaseModel):
"""Today's production timeline"""
timeline: List[ProductionTimelineItem]
totalBatches: int
completedBatches: int
inProgressBatches: int
pendingBatches: int
class InsightCardI18n(BaseModel):
"""i18n data for insight card"""
label: I18nData = Field(..., description="i18n data for label")
value: I18nData = Field(..., description="i18n data for value")
detail: Optional[I18nData] = Field(None, description="i18n data for detail")
class InsightCard(BaseModel):
"""Individual insight card"""
color: str = Field(..., description="Color: green, amber, red")
i18n: InsightCardI18n = Field(..., description="i18n translation data")
class InsightsResponse(BaseModel):
"""Key insights grid"""
savings: InsightCard
inventory: InsightCard
waste: InsightCard
deliveries: InsightCard
# ============================================================
# API Endpoints
# ============================================================
@router.get("/health-status", response_model=BakeryHealthStatusResponse)
async def get_bakery_health_status(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> BakeryHealthStatusResponse:
"""
Get overall bakery health status with tri-state checklist
This is the top-level indicator showing if the bakery is running smoothly
or if there are issues requiring attention. Includes AI-prevented issues.
"""
try:
# Try to get from cache
if settings.CACHE_ENABLED:
cache_key = f"dashboard:health:{tenant_id}"
cached = await get_cached(cache_key)
if cached:
return BakeryHealthStatusResponse(**cached)
dashboard_service = DashboardService(db)
# Gather metrics from various services in parallel
# Use asyncio.gather to make all HTTP calls concurrently
async def fetch_alerts():
try:
alerts_data = await alerts_client.get_alerts(tenant_id, limit=100) or {}
alerts_list = alerts_data.get("alerts", [])
# Count critical alerts
critical_count = sum(1 for a in alerts_list if a.get('priority_level') == 'CRITICAL')
# Count AI prevented issues
prevented_count = sum(1 for a in alerts_list if a.get('type_class') == 'prevented_issue')
return critical_count, prevented_count, alerts_list
except Exception as e:
logger.warning(f"Failed to fetch alerts: {e}")
return 0, 0, []
async def fetch_pending_pos():
try:
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100) or []
return len(po_data) if isinstance(po_data, list) else 0
except Exception as e:
logger.warning(f"Failed to fetch POs: {e}")
return 0
async def fetch_production_delays():
try:
prod_data = await production_client.get_production_batches_by_status(
tenant_id, status="ON_HOLD", limit=100
) or {}
return len(prod_data.get("batches", []))
except Exception as e:
logger.warning(f"Failed to fetch production batches: {e}")
return 0
async def fetch_inventory():
try:
inv_data = await inventory_client.get_inventory_dashboard(tenant_id) or {}
return inv_data.get("out_of_stock_count", 0)
except Exception as e:
logger.warning(f"Failed to fetch inventory: {e}")
return 0
# Execute all fetches in parallel
alerts_result, pending_approvals, production_delays, out_of_stock_count = await asyncio.gather(
fetch_alerts(),
fetch_pending_pos(),
fetch_production_delays(),
fetch_inventory()
)
critical_alerts, ai_prevented_count, all_alerts = alerts_result
# System errors (would come from monitoring system)
system_errors = 0
# Calculate health status with tri-state checklist
health_status = await dashboard_service.get_bakery_health_status(
tenant_id=tenant_id,
critical_alerts=critical_alerts,
pending_approvals=pending_approvals,
production_delays=production_delays,
out_of_stock_count=out_of_stock_count,
system_errors=system_errors,
ai_prevented_count=ai_prevented_count,
action_needed_alerts=all_alerts
)
# Cache the result
if settings.CACHE_ENABLED:
cache_key = f"dashboard:health:{tenant_id}"
await set_cached(cache_key, health_status, ttl=settings.CACHE_TTL_HEALTH)
return BakeryHealthStatusResponse(**health_status)
except Exception as e:
logger.error(f"Error getting health status: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/orchestration-summary", response_model=OrchestrationSummaryResponse)
async def get_orchestration_summary(
tenant_id: str,
run_id: Optional[str] = Query(None, description="Specific run ID, or latest if not provided"),
db: AsyncSession = Depends(get_db)
) -> OrchestrationSummaryResponse:
"""
Get narrative summary of what the orchestrator did
This provides transparency into the automation, showing what was planned
and why, helping build user trust in the system.
"""
try:
# Try to get from cache (only if no specific run_id is provided)
if settings.CACHE_ENABLED and run_id is None:
cache_key = f"dashboard:summary:{tenant_id}"
cached = await get_cached(cache_key)
if cached:
return OrchestrationSummaryResponse(**cached)
dashboard_service = DashboardService(db)
# Get orchestration summary
summary = await dashboard_service.get_orchestration_summary(
tenant_id=tenant_id,
last_run_id=run_id
)
# Enhance with detailed PO and batch summaries
if summary["purchaseOrdersCreated"] > 0:
try:
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=10)
if po_data and isinstance(po_data, list):
# Override stale orchestration count with actual real-time PO count
summary["purchaseOrdersCreated"] = len(po_data)
summary["userActionsRequired"] = len(po_data) # Update actions required to match actual pending POs
summary["purchaseOrdersSummary"] = [
PurchaseOrderSummary(
supplierName=po.get("supplier_name", "Unknown"),
itemCategories=[item.get("ingredient_name", "Item") for item in po.get("items", [])[:3]],
totalAmount=float(po.get("total_amount", 0))
)
for po in po_data[:5] # Show top 5
]
except Exception as e:
logger.warning(f"Failed to fetch PO details: {e}")
if summary["productionBatchesCreated"] > 0:
try:
batch_data = await production_client.get_todays_batches(tenant_id)
if batch_data:
batches = batch_data.get("batches", [])
# Override stale orchestration count with actual real-time batch count
summary["productionBatchesCreated"] = len(batches)
summary["productionBatchesSummary"] = [
ProductionBatchSummary(
productName=batch.get("product_name", "Unknown"),
quantity=batch.get("planned_quantity", 0),
readyByTime=batch.get("planned_end_time", "")
)
for batch in batches[:5] # Show top 5
]
except Exception as e:
logger.warning(f"Failed to fetch batch details: {e}")
# Cache the result (only if no specific run_id)
if settings.CACHE_ENABLED and run_id is None:
cache_key = f"dashboard:summary:{tenant_id}"
await set_cached(cache_key, summary, ttl=settings.CACHE_TTL_SUMMARY)
return OrchestrationSummaryResponse(**summary)
except Exception as e:
logger.error(f"Error getting orchestration summary: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/action-queue", response_model=ActionQueueResponse)
async def get_action_queue(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> ActionQueueResponse:
"""
Get prioritized queue of actions requiring user attention
This is the core of the JTBD dashboard - showing exactly what the user
needs to do right now, prioritized by urgency and impact.
"""
try:
dashboard_service = DashboardService(db)
# Fetch data from various services in parallel
async def fetch_pending_pos():
try:
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=20)
if po_data and isinstance(po_data, list):
return po_data
return []
except Exception as e:
logger.warning(f"Failed to fetch pending POs: {e}")
return []
async def fetch_critical_alerts():
try:
alerts_data = await alerts_client.get_critical_alerts(tenant_id, limit=20)
if alerts_data:
return alerts_data.get("alerts", [])
return []
except Exception as e:
logger.warning(f"Failed to fetch alerts: {e}")
return []
async def fetch_onboarding():
try:
onboarding_data = await procurement_client.get(
"/procurement/auth/onboarding-progress",
tenant_id=tenant_id
)
if onboarding_data:
return {
"incomplete": not onboarding_data.get("completed", True),
"steps": onboarding_data.get("steps", [])
}
return {"incomplete": False, "steps": []}
except Exception as e:
logger.warning(f"Failed to fetch onboarding status: {e}")
return {"incomplete": False, "steps": []}
# Execute all fetches in parallel
pending_pos, critical_alerts, onboarding = await asyncio.gather(
fetch_pending_pos(),
fetch_critical_alerts(),
fetch_onboarding()
)
onboarding_incomplete = onboarding["incomplete"]
onboarding_steps = onboarding["steps"]
# Build action queue
actions = await dashboard_service.get_action_queue(
tenant_id=tenant_id,
pending_pos=pending_pos,
critical_alerts=critical_alerts,
onboarding_incomplete=onboarding_incomplete,
onboarding_steps=onboarding_steps
)
# Count by urgency
critical_count = sum(1 for a in actions if a["urgency"] == "critical")
important_count = sum(1 for a in actions if a["urgency"] == "important")
return ActionQueueResponse(
actions=[ActionItem(**action) for action in actions[:10]], # Show top 10
totalActions=len(actions),
criticalCount=critical_count,
importantCount=important_count
)
except Exception as e:
logger.error(f"Error getting action queue: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/production-timeline", response_model=ProductionTimelineResponse)
async def get_production_timeline(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> ProductionTimelineResponse:
"""
Get today's production timeline
Shows what's being made today in chronological order with status and progress.
"""
try:
dashboard_service = DashboardService(db)
# Fetch today's production batches
batches = []
try:
batch_data = await production_client.get_todays_batches(tenant_id)
if batch_data:
batches = batch_data.get("batches", [])
except Exception as e:
logger.warning(f"Failed to fetch production batches: {e}")
# Transform to timeline format
timeline = await dashboard_service.get_production_timeline(
tenant_id=tenant_id,
batches=batches
)
# Count by status
completed = sum(1 for item in timeline if item["status"] == "COMPLETED")
in_progress = sum(1 for item in timeline if item["status"] == "IN_PROGRESS")
pending = sum(1 for item in timeline if item["status"] == "PENDING")
return ProductionTimelineResponse(
timeline=[ProductionTimelineItem(**item) for item in timeline],
totalBatches=len(timeline),
completedBatches=completed,
inProgressBatches=in_progress,
pendingBatches=pending
)
except Exception as e:
logger.error(f"Error getting production timeline: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/unified-action-queue")
async def get_unified_action_queue(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> Dict[str, Any]:
"""
Get unified action queue with time-based grouping
Combines all alerts (PO approvals, delivery tracking, production, etc.)
into URGENT (<6h), TODAY (<24h), and THIS WEEK (<7d) sections.
"""
try:
dashboard_service = DashboardService(db)
# Fetch all alerts from alert processor
alerts_data = await alerts_client.get_alerts(tenant_id, limit=100) or {}
alerts = alerts_data.get("alerts", [])
# Build unified queue
action_queue = await dashboard_service.get_unified_action_queue(
tenant_id=tenant_id,
alerts=alerts
)
return action_queue
except Exception as e:
logger.error(f"Error getting unified action queue: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/execution-progress")
async def get_execution_progress(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> Dict[str, Any]:
"""
Get execution progress for today's plan
Shows plan vs actual for production batches, deliveries, and approvals
"""
try:
dashboard_service = DashboardService(db)
# Fetch today's data in parallel
async def fetch_todays_batches():
try:
batch_data = await production_client.get_todays_batches(tenant_id)
if batch_data:
return batch_data.get("batches", [])
return []
except Exception as e:
logger.warning(f"Failed to fetch today's batches: {e}")
return []
async def fetch_expected_deliveries():
try:
# Get POs with expected deliveries today
from datetime import datetime, timedelta, timezone
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100)
if pos_result and isinstance(pos_result, list):
today_start = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
today_end = today_start.replace(hour=23, minute=59, second=59)
deliveries_today = []
for po in pos_result:
expected_date = po.get("expected_delivery_date")
if expected_date:
if isinstance(expected_date, str):
expected_date = datetime.fromisoformat(expected_date.replace('Z', '+00:00'))
if today_start <= expected_date <= today_end:
deliveries_today.append(po)
return deliveries_today
return []
except Exception as e:
logger.warning(f"Failed to fetch expected deliveries: {e}")
return []
async def fetch_pending_approvals():
try:
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100)
if po_data is None:
logger.error(
"Procurement client returned None for pending POs",
tenant_id=tenant_id,
context="likely HTTP 404 error - check URL construction"
)
return 0
if not isinstance(po_data, list):
logger.error(
"Unexpected response format from procurement client",
tenant_id=tenant_id,
response_type=type(po_data).__name__,
response_value=str(po_data)[:200]
)
return 0
logger.info(
"Successfully fetched pending purchase orders",
tenant_id=tenant_id,
count=len(po_data)
)
return len(po_data)
except Exception as e:
logger.error(
"Exception while fetching pending approvals",
tenant_id=tenant_id,
error=str(e),
exc_info=True
)
return 0
# Execute in parallel
todays_batches, expected_deliveries, pending_approvals = await asyncio.gather(
fetch_todays_batches(),
fetch_expected_deliveries(),
fetch_pending_approvals()
)
# Calculate progress
progress = await dashboard_service.get_execution_progress(
tenant_id=tenant_id,
todays_batches=todays_batches,
expected_deliveries=expected_deliveries,
pending_approvals=pending_approvals
)
return progress
except Exception as e:
logger.error(f"Error getting execution progress: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/insights", response_model=InsightsResponse)
async def get_insights(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> InsightsResponse:
"""
Get key insights for dashboard grid
Provides glanceable metrics on savings, inventory, waste, and deliveries.
"""
try:
# Try to get from cache
if settings.CACHE_ENABLED:
cache_key = f"dashboard:insights:{tenant_id}"
cached = await get_cached(cache_key)
if cached:
return InsightsResponse(**cached)
dashboard_service = DashboardService(db)
# Fetch data from various services in parallel
from datetime import datetime, timedelta, timezone
async def fetch_sustainability():
try:
return await inventory_client.get_sustainability_widget(tenant_id) or {}
except Exception as e:
logger.warning(f"Failed to fetch sustainability data: {e}")
return {}
async def fetch_inventory():
try:
raw_inventory_data = await inventory_client.get_stock_status(tenant_id)
# Handle case where API returns a list instead of dict
if isinstance(raw_inventory_data, dict):
return raw_inventory_data
elif isinstance(raw_inventory_data, list):
# If it's a list, aggregate the data
return {
"low_stock_count": sum(1 for item in raw_inventory_data if item.get("status") == "low_stock"),
"out_of_stock_count": sum(1 for item in raw_inventory_data if item.get("status") == "out_of_stock"),
"total_items": len(raw_inventory_data)
}
return {}
except Exception as e:
logger.warning(f"Failed to fetch inventory data: {e}")
return {}
async def fetch_deliveries():
try:
# Get recent POs with pending deliveries
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100)
if pos_result and isinstance(pos_result, list):
# Count deliveries expected today
today_start = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
today_end = today_start.replace(hour=23, minute=59, second=59)
deliveries_today = 0
for po in pos_result:
expected_date = po.get("expected_delivery_date")
if expected_date:
if isinstance(expected_date, str):
expected_date = datetime.fromisoformat(expected_date.replace('Z', '+00:00'))
if today_start <= expected_date <= today_end:
deliveries_today += 1
return {"deliveries_today": deliveries_today}
return {}
except Exception as e:
logger.warning(f"Failed to fetch delivery data: {e}")
return {}
async def fetch_savings():
try:
# Get prevented issue savings from alert analytics
analytics = await alerts_client.get_dashboard_analytics(tenant_id, days=7)
if analytics:
weekly_savings = analytics.get('estimated_savings_eur', 0)
prevented_count = analytics.get('prevented_issues_count', 0)
# Calculate trend from period comparison
period_comparison = analytics.get('period_comparison', {})
current_prevented = period_comparison.get('current_prevented', 0)
previous_prevented = period_comparison.get('previous_prevented', 0)
trend_percentage = 0
if previous_prevented > 0:
trend_percentage = ((current_prevented - previous_prevented) / previous_prevented) * 100
return {
"weekly_savings": round(weekly_savings, 2),
"trend_percentage": round(trend_percentage, 1),
"prevented_count": prevented_count
}
return {"weekly_savings": 0, "trend_percentage": 0, "prevented_count": 0}
except Exception as e:
logger.warning(f"Failed to calculate savings data: {e}")
return {"weekly_savings": 0, "trend_percentage": 0, "prevented_count": 0}
# Execute all fetches in parallel
sustainability_data, inventory_data, delivery_data, savings_data = await asyncio.gather(
fetch_sustainability(),
fetch_inventory(),
fetch_deliveries(),
fetch_savings()
)
# Merge delivery data into inventory data
inventory_data.update(delivery_data)
# Calculate insights
insights = await dashboard_service.calculate_insights(
tenant_id=tenant_id,
sustainability_data=sustainability_data,
inventory_data=inventory_data,
savings_data=savings_data
)
# Prepare response
response_data = {
"savings": insights["savings"],
"inventory": insights["inventory"],
"waste": insights["waste"],
"deliveries": insights["deliveries"]
}
# Cache the result
if settings.CACHE_ENABLED:
cache_key = f"dashboard:insights:{tenant_id}"
await set_cached(cache_key, response_data, ttl=settings.CACHE_TTL_INSIGHTS)
return InsightsResponse(
savings=InsightCard(**insights["savings"]),
inventory=InsightCard(**insights["inventory"]),
waste=InsightCard(**insights["waste"]),
deliveries=InsightCard(**insights["deliveries"])
)
except Exception as e:
logger.error(f"Error getting insights: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -1,200 +0,0 @@
"""
Enterprise Dashboard API Endpoints for Orchestrator Service
"""
from fastapi import APIRouter, Depends, HTTPException
from typing import List, Optional, Dict, Any
from datetime import date
import structlog
from app.services.enterprise_dashboard_service import EnterpriseDashboardService
from shared.auth.tenant_access import verify_tenant_access_dep
from shared.clients.tenant_client import TenantServiceClient
from shared.clients.forecast_client import ForecastServiceClient
from shared.clients.production_client import ProductionServiceClient
from shared.clients.sales_client import SalesServiceClient
from shared.clients.inventory_client import InventoryServiceClient
from shared.clients.distribution_client import DistributionServiceClient
logger = structlog.get_logger()
router = APIRouter(prefix="/api/v1/tenants/{tenant_id}/enterprise", tags=["enterprise"])
# Add dependency injection function
from app.services.enterprise_dashboard_service import EnterpriseDashboardService
from shared.clients import (
get_tenant_client,
get_forecast_client,
get_production_client,
get_sales_client,
get_inventory_client,
get_procurement_client,
get_distribution_client
)
def get_enterprise_dashboard_service() -> EnterpriseDashboardService:
from app.core.config import settings
tenant_client = get_tenant_client(settings)
forecast_client = get_forecast_client(settings)
production_client = get_production_client(settings)
sales_client = get_sales_client(settings)
inventory_client = get_inventory_client(settings)
distribution_client = get_distribution_client(settings)
procurement_client = get_procurement_client(settings)
return EnterpriseDashboardService(
tenant_client=tenant_client,
forecast_client=forecast_client,
production_client=production_client,
sales_client=sales_client,
inventory_client=inventory_client,
distribution_client=distribution_client,
procurement_client=procurement_client
)
@router.get("/network-summary")
async def get_network_summary(
tenant_id: str,
enterprise_service: EnterpriseDashboardService = Depends(get_enterprise_dashboard_service),
verified_tenant: str = Depends(verify_tenant_access_dep)
):
"""
Get network summary metrics for enterprise dashboard
"""
try:
# Verify user has network access
tenant_info = await enterprise_service.tenant_client.get_tenant(tenant_id)
if not tenant_info:
raise HTTPException(status_code=404, detail="Tenant not found")
if tenant_info.get('tenant_type') != 'parent':
raise HTTPException(status_code=403, detail="Only parent tenants can access enterprise dashboard")
result = await enterprise_service.get_network_summary(parent_tenant_id=tenant_id)
return result
except Exception as e:
logger.error(f"Error getting network summary: {e}", exc_info=True)
raise HTTPException(status_code=500, detail="Failed to get network summary")
@router.get("/children-performance")
async def get_children_performance(
tenant_id: str,
metric: str = "sales",
period_days: int = 30,
enterprise_service: EnterpriseDashboardService = Depends(get_enterprise_dashboard_service),
verified_tenant: str = Depends(verify_tenant_access_dep)
):
"""
Get anonymized performance ranking of child tenants
"""
try:
# Verify user has network access
tenant_info = await enterprise_service.tenant_client.get_tenant(tenant_id)
if not tenant_info:
raise HTTPException(status_code=404, detail="Tenant not found")
if tenant_info.get('tenant_type') != 'parent':
raise HTTPException(status_code=403, detail="Only parent tenants can access enterprise dashboard")
result = await enterprise_service.get_children_performance(
parent_tenant_id=tenant_id,
metric=metric,
period_days=period_days
)
return result
except Exception as e:
logger.error(f"Error getting children performance: {e}", exc_info=True)
raise HTTPException(status_code=500, detail="Failed to get children performance")
@router.get("/distribution-overview")
async def get_distribution_overview(
tenant_id: str,
target_date: Optional[date] = None,
enterprise_service: EnterpriseDashboardService = Depends(get_enterprise_dashboard_service),
verified_tenant: str = Depends(verify_tenant_access_dep)
):
"""
Get distribution overview for enterprise dashboard
"""
try:
# Verify user has network access
tenant_info = await enterprise_service.tenant_client.get_tenant(tenant_id)
if not tenant_info:
raise HTTPException(status_code=404, detail="Tenant not found")
if tenant_info.get('tenant_type') != 'parent':
raise HTTPException(status_code=403, detail="Only parent tenants can access enterprise dashboard")
if target_date is None:
target_date = date.today()
result = await enterprise_service.get_distribution_overview(
parent_tenant_id=tenant_id,
target_date=target_date
)
return result
except Exception as e:
logger.error(f"Error getting distribution overview: {e}", exc_info=True)
raise HTTPException(status_code=500, detail="Failed to get distribution overview")
@router.get("/forecast-summary")
async def get_enterprise_forecast_summary(
tenant_id: str,
days_ahead: int = 7,
enterprise_service: EnterpriseDashboardService = Depends(get_enterprise_dashboard_service),
verified_tenant: str = Depends(verify_tenant_access_dep)
):
"""
Get aggregated forecast summary for the enterprise network
"""
try:
# Verify user has network access
tenant_info = await enterprise_service.tenant_client.get_tenant(tenant_id)
if not tenant_info:
raise HTTPException(status_code=404, detail="Tenant not found")
if tenant_info.get('tenant_type') != 'parent':
raise HTTPException(status_code=403, detail="Only parent tenants can access enterprise dashboard")
result = await enterprise_service.get_enterprise_forecast_summary(
parent_tenant_id=tenant_id,
days_ahead=days_ahead
)
return result
except Exception as e:
logger.error(f"Error getting enterprise forecast summary: {e}", exc_info=True)
raise HTTPException(status_code=500, detail="Failed to get enterprise forecast summary")
@router.get("/network-performance")
async def get_network_performance_metrics(
tenant_id: str,
start_date: Optional[date] = None,
end_date: Optional[date] = None,
enterprise_service: EnterpriseDashboardService = Depends(get_enterprise_dashboard_service),
verified_tenant: str = Depends(verify_tenant_access_dep)
):
"""
Get aggregated performance metrics across the tenant network
"""
try:
# Verify user has network access
tenant_info = await enterprise_service.tenant_client.get_tenant(tenant_id)
if not tenant_info:
raise HTTPException(status_code=404, detail="Tenant not found")
if tenant_info.get('tenant_type') != 'parent':
raise HTTPException(status_code=403, detail="Only parent tenants can access enterprise dashboard")
if not start_date:
start_date = date.today()
if not end_date:
end_date = date.today()
result = await enterprise_service.get_network_performance_metrics(
parent_tenant_id=tenant_id,
start_date=start_date,
end_date=end_date
)
return result
except Exception as e:
logger.error(f"Error getting network performance metrics: {e}", exc_info=True)
raise HTTPException(status_code=500, detail="Failed to get network performance metrics")

View File

@@ -303,3 +303,44 @@ async def list_orchestration_runs(
tenant_id=tenant_id,
error=str(e))
raise HTTPException(status_code=500, detail=str(e))
@router.get("/last-run")
async def get_last_orchestration_run(
tenant_id: str,
db: AsyncSession = Depends(get_db)
):
"""
Get timestamp of last orchestration run
Lightweight endpoint for health status frontend migration (Phase 4).
Returns only timestamp and run number for the most recent completed run.
Args:
tenant_id: Tenant ID
Returns:
Dict with timestamp and runNumber (or None if no runs)
"""
try:
tenant_uuid = uuid.UUID(tenant_id)
repo = OrchestrationRunRepository(db)
# Get most recent completed run
latest_run = await repo.get_latest_run_for_tenant(tenant_uuid)
if not latest_run:
return {"timestamp": None, "runNumber": None}
return {
"timestamp": latest_run.started_at.isoformat() if latest_run.started_at else None,
"runNumber": latest_run.run_number
}
except ValueError as e:
raise HTTPException(status_code=400, detail=f"Invalid tenant ID: {str(e)}")
except Exception as e:
logger.error("Error getting last orchestration run",
tenant_id=tenant_id,
error=str(e))
raise HTTPException(status_code=500, detail=str(e))