Files
bakery-ia/services/orchestrator/app/api/dashboard.py
2025-11-27 15:52:40 +01:00

772 lines
31 KiB
Python

# ================================================================
# services/orchestrator/app/api/dashboard.py
# ================================================================
"""
Dashboard API endpoints for JTBD-aligned bakery dashboard
"""
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy.ext.asyncio import AsyncSession
from typing import Dict, Any, List, Optional
from pydantic import BaseModel, Field
from datetime import datetime
import logging
import asyncio
from app.core.database import get_db
from app.core.config import settings
from ..services.dashboard_service import DashboardService
from ..utils.cache import get_cached, set_cached, delete_pattern
from shared.clients import (
get_inventory_client,
get_production_client,
get_alerts_client,
ProductionServiceClient,
InventoryServiceClient,
AlertsServiceClient
)
from shared.clients.procurement_client import ProcurementServiceClient
logger = logging.getLogger(__name__)
# Initialize service clients
inventory_client = get_inventory_client(settings, "orchestrator")
production_client = get_production_client(settings, "orchestrator")
procurement_client = ProcurementServiceClient(settings)
alerts_client = get_alerts_client(settings, "orchestrator")
router = APIRouter(prefix="/api/v1/tenants/{tenant_id}/dashboard", tags=["dashboard"])
# ============================================================
# Response Models
# ============================================================
class I18nData(BaseModel):
"""i18n translation data"""
key: str = Field(..., description="i18n translation key")
params: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Parameters for translation")
class HeadlineData(BaseModel):
"""i18n-ready headline data"""
key: str = Field(..., description="i18n translation key")
params: Dict[str, Any] = Field(default_factory=dict, description="Parameters for translation")
class HealthChecklistItem(BaseModel):
"""Individual item in tri-state health checklist"""
icon: str = Field(..., description="Icon name: check, warning, alert, ai_handled")
text: Optional[str] = Field(None, description="Deprecated: Use textKey instead")
textKey: Optional[str] = Field(None, description="i18n translation key")
textParams: Optional[Dict[str, Any]] = Field(None, description="Parameters for i18n translation")
actionRequired: bool = Field(..., description="Whether action is required")
status: str = Field(..., description="Tri-state status: good, ai_handled, needs_you")
actionPath: Optional[str] = Field(None, description="Path to navigate for action")
class BakeryHealthStatusResponse(BaseModel):
"""Overall bakery health status with tri-state checklist"""
status: str = Field(..., description="Health status: green, yellow, red")
headline: HeadlineData = Field(..., description="i18n-ready status headline")
lastOrchestrationRun: Optional[str] = Field(None, description="ISO timestamp of last orchestration")
nextScheduledRun: str = Field(..., description="ISO timestamp of next scheduled run")
checklistItems: List[HealthChecklistItem] = Field(..., description="Tri-state status checklist")
criticalIssues: int = Field(..., description="Count of critical issues")
pendingActions: int = Field(..., description="Count of pending actions")
aiPreventedIssues: int = Field(0, description="Count of issues AI prevented")
class ReasoningInputs(BaseModel):
"""Inputs used by orchestrator for decision making"""
customerOrders: int = Field(..., description="Number of customer orders analyzed")
historicalDemand: bool = Field(..., description="Whether historical data was used")
inventoryLevels: bool = Field(..., description="Whether inventory levels were considered")
aiInsights: bool = Field(..., description="Whether AI insights were used")
class PurchaseOrderSummary(BaseModel):
"""Summary of a purchase order for dashboard"""
supplierName: str
itemCategories: List[str]
totalAmount: float
class ProductionBatchSummary(BaseModel):
"""Summary of a production batch for dashboard"""
productName: str
quantity: float
readyByTime: str
class OrchestrationSummaryResponse(BaseModel):
"""What the orchestrator did for the user"""
runTimestamp: Optional[str] = Field(None, description="When the orchestration ran")
runNumber: Optional[str] = Field(None, description="Run number identifier")
status: str = Field(..., description="Run status")
purchaseOrdersCreated: int = Field(..., description="Number of POs created")
purchaseOrdersSummary: List[PurchaseOrderSummary] = Field(default_factory=list)
productionBatchesCreated: int = Field(..., description="Number of batches created")
productionBatchesSummary: List[ProductionBatchSummary] = Field(default_factory=list)
reasoningInputs: ReasoningInputs
userActionsRequired: int = Field(..., description="Number of actions needing approval")
durationSeconds: Optional[int] = Field(None, description="How long orchestration took")
aiAssisted: bool = Field(False, description="Whether AI insights were used")
message_i18n: Optional[I18nData] = Field(None, description="i18n data for message")
class ActionButton(BaseModel):
"""Action button configuration"""
label_i18n: I18nData = Field(..., description="i18n data for button label")
type: str = Field(..., description="Button type: primary, secondary, tertiary")
action: str = Field(..., description="Action identifier")
class ActionItem(BaseModel):
"""Individual action requiring user attention"""
id: str
type: str = Field(..., description="Action type")
urgency: str = Field(..., description="Urgency: critical, important, normal")
title: Optional[str] = Field(None, description="Legacy field for alerts")
title_i18n: Optional[I18nData] = Field(None, description="i18n data for title")
subtitle: Optional[str] = Field(None, description="Legacy field for alerts")
subtitle_i18n: Optional[I18nData] = Field(None, description="i18n data for subtitle")
reasoning: Optional[str] = Field(None, description="Legacy field for alerts")
reasoning_i18n: Optional[I18nData] = Field(None, description="i18n data for reasoning")
consequence_i18n: I18nData = Field(..., description="i18n data for consequence")
reasoning_data: Optional[Dict[str, Any]] = Field(None, description="Structured reasoning data")
amount: Optional[float] = Field(None, description="Amount for financial actions")
currency: Optional[str] = Field(None, description="Currency code")
actions: List[ActionButton]
estimatedTimeMinutes: int
class ActionQueueResponse(BaseModel):
"""Prioritized queue of actions"""
actions: List[ActionItem]
totalActions: int
criticalCount: int
importantCount: int
class ProductionTimelineItem(BaseModel):
"""Individual production batch in timeline"""
id: str
batchNumber: str
productName: str
quantity: float
unit: str
plannedStartTime: Optional[str]
plannedEndTime: Optional[str]
actualStartTime: Optional[str]
status: str
statusIcon: str
statusText: str
progress: int = Field(..., ge=0, le=100, description="Progress percentage")
readyBy: Optional[str]
priority: str
reasoning_data: Optional[Dict[str, Any]] = Field(None, description="Structured reasoning data")
reasoning_i18n: Optional[I18nData] = Field(None, description="i18n data for reasoning")
status_i18n: Optional[I18nData] = Field(None, description="i18n data for status")
class ProductionTimelineResponse(BaseModel):
"""Today's production timeline"""
timeline: List[ProductionTimelineItem]
totalBatches: int
completedBatches: int
inProgressBatches: int
pendingBatches: int
class InsightCardI18n(BaseModel):
"""i18n data for insight card"""
label: I18nData = Field(..., description="i18n data for label")
value: I18nData = Field(..., description="i18n data for value")
detail: Optional[I18nData] = Field(None, description="i18n data for detail")
class InsightCard(BaseModel):
"""Individual insight card"""
color: str = Field(..., description="Color: green, amber, red")
i18n: InsightCardI18n = Field(..., description="i18n translation data")
class InsightsResponse(BaseModel):
"""Key insights grid"""
savings: InsightCard
inventory: InsightCard
waste: InsightCard
deliveries: InsightCard
# ============================================================
# API Endpoints
# ============================================================
@router.get("/health-status", response_model=BakeryHealthStatusResponse)
async def get_bakery_health_status(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> BakeryHealthStatusResponse:
"""
Get overall bakery health status with tri-state checklist
This is the top-level indicator showing if the bakery is running smoothly
or if there are issues requiring attention. Includes AI-prevented issues.
"""
try:
# Try to get from cache
if settings.CACHE_ENABLED:
cache_key = f"dashboard:health:{tenant_id}"
cached = await get_cached(cache_key)
if cached:
return BakeryHealthStatusResponse(**cached)
dashboard_service = DashboardService(db)
# Gather metrics from various services in parallel
# Use asyncio.gather to make all HTTP calls concurrently
async def fetch_alerts():
try:
alerts_data = await alerts_client.get_alerts(tenant_id, limit=100) or {}
alerts_list = alerts_data.get("alerts", [])
# Count critical alerts
critical_count = sum(1 for a in alerts_list if a.get('priority_level') == 'CRITICAL')
# Count AI prevented issues
prevented_count = sum(1 for a in alerts_list if a.get('type_class') == 'prevented_issue')
return critical_count, prevented_count, alerts_list
except Exception as e:
logger.warning(f"Failed to fetch alerts: {e}")
return 0, 0, []
async def fetch_pending_pos():
try:
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100) or []
return len(po_data) if isinstance(po_data, list) else 0
except Exception as e:
logger.warning(f"Failed to fetch POs: {e}")
return 0
async def fetch_production_delays():
try:
prod_data = await production_client.get_production_batches_by_status(
tenant_id, status="ON_HOLD", limit=100
) or {}
return len(prod_data.get("batches", []))
except Exception as e:
logger.warning(f"Failed to fetch production batches: {e}")
return 0
async def fetch_inventory():
try:
inv_data = await inventory_client.get_inventory_dashboard(tenant_id) or {}
return inv_data.get("out_of_stock_count", 0)
except Exception as e:
logger.warning(f"Failed to fetch inventory: {e}")
return 0
# Execute all fetches in parallel
alerts_result, pending_approvals, production_delays, out_of_stock_count = await asyncio.gather(
fetch_alerts(),
fetch_pending_pos(),
fetch_production_delays(),
fetch_inventory()
)
critical_alerts, ai_prevented_count, all_alerts = alerts_result
# System errors (would come from monitoring system)
system_errors = 0
# Calculate health status with tri-state checklist
health_status = await dashboard_service.get_bakery_health_status(
tenant_id=tenant_id,
critical_alerts=critical_alerts,
pending_approvals=pending_approvals,
production_delays=production_delays,
out_of_stock_count=out_of_stock_count,
system_errors=system_errors,
ai_prevented_count=ai_prevented_count,
action_needed_alerts=all_alerts
)
# Cache the result
if settings.CACHE_ENABLED:
cache_key = f"dashboard:health:{tenant_id}"
await set_cached(cache_key, health_status, ttl=settings.CACHE_TTL_HEALTH)
return BakeryHealthStatusResponse(**health_status)
except Exception as e:
logger.error(f"Error getting health status: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/orchestration-summary", response_model=OrchestrationSummaryResponse)
async def get_orchestration_summary(
tenant_id: str,
run_id: Optional[str] = Query(None, description="Specific run ID, or latest if not provided"),
db: AsyncSession = Depends(get_db)
) -> OrchestrationSummaryResponse:
"""
Get narrative summary of what the orchestrator did
This provides transparency into the automation, showing what was planned
and why, helping build user trust in the system.
"""
try:
# Try to get from cache (only if no specific run_id is provided)
if settings.CACHE_ENABLED and run_id is None:
cache_key = f"dashboard:summary:{tenant_id}"
cached = await get_cached(cache_key)
if cached:
return OrchestrationSummaryResponse(**cached)
dashboard_service = DashboardService(db)
# Get orchestration summary
summary = await dashboard_service.get_orchestration_summary(
tenant_id=tenant_id,
last_run_id=run_id
)
# Enhance with detailed PO and batch summaries
if summary["purchaseOrdersCreated"] > 0:
try:
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=10)
if po_data and isinstance(po_data, list):
# Override stale orchestration count with actual real-time PO count
summary["purchaseOrdersCreated"] = len(po_data)
summary["userActionsRequired"] = len(po_data) # Update actions required to match actual pending POs
summary["purchaseOrdersSummary"] = [
PurchaseOrderSummary(
supplierName=po.get("supplier_name", "Unknown"),
itemCategories=[item.get("ingredient_name", "Item") for item in po.get("items", [])[:3]],
totalAmount=float(po.get("total_amount", 0))
)
for po in po_data[:5] # Show top 5
]
except Exception as e:
logger.warning(f"Failed to fetch PO details: {e}")
if summary["productionBatchesCreated"] > 0:
try:
batch_data = await production_client.get_todays_batches(tenant_id)
if batch_data:
batches = batch_data.get("batches", [])
# Override stale orchestration count with actual real-time batch count
summary["productionBatchesCreated"] = len(batches)
summary["productionBatchesSummary"] = [
ProductionBatchSummary(
productName=batch.get("product_name", "Unknown"),
quantity=batch.get("planned_quantity", 0),
readyByTime=batch.get("planned_end_time", "")
)
for batch in batches[:5] # Show top 5
]
except Exception as e:
logger.warning(f"Failed to fetch batch details: {e}")
# Cache the result (only if no specific run_id)
if settings.CACHE_ENABLED and run_id is None:
cache_key = f"dashboard:summary:{tenant_id}"
await set_cached(cache_key, summary, ttl=settings.CACHE_TTL_SUMMARY)
return OrchestrationSummaryResponse(**summary)
except Exception as e:
logger.error(f"Error getting orchestration summary: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/action-queue", response_model=ActionQueueResponse)
async def get_action_queue(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> ActionQueueResponse:
"""
Get prioritized queue of actions requiring user attention
This is the core of the JTBD dashboard - showing exactly what the user
needs to do right now, prioritized by urgency and impact.
"""
try:
dashboard_service = DashboardService(db)
# Fetch data from various services in parallel
async def fetch_pending_pos():
try:
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=20)
if po_data and isinstance(po_data, list):
return po_data
return []
except Exception as e:
logger.warning(f"Failed to fetch pending POs: {e}")
return []
async def fetch_critical_alerts():
try:
alerts_data = await alerts_client.get_critical_alerts(tenant_id, limit=20)
if alerts_data:
return alerts_data.get("alerts", [])
return []
except Exception as e:
logger.warning(f"Failed to fetch alerts: {e}")
return []
async def fetch_onboarding():
try:
onboarding_data = await procurement_client.get(
"/procurement/auth/onboarding-progress",
tenant_id=tenant_id
)
if onboarding_data:
return {
"incomplete": not onboarding_data.get("completed", True),
"steps": onboarding_data.get("steps", [])
}
return {"incomplete": False, "steps": []}
except Exception as e:
logger.warning(f"Failed to fetch onboarding status: {e}")
return {"incomplete": False, "steps": []}
# Execute all fetches in parallel
pending_pos, critical_alerts, onboarding = await asyncio.gather(
fetch_pending_pos(),
fetch_critical_alerts(),
fetch_onboarding()
)
onboarding_incomplete = onboarding["incomplete"]
onboarding_steps = onboarding["steps"]
# Build action queue
actions = await dashboard_service.get_action_queue(
tenant_id=tenant_id,
pending_pos=pending_pos,
critical_alerts=critical_alerts,
onboarding_incomplete=onboarding_incomplete,
onboarding_steps=onboarding_steps
)
# Count by urgency
critical_count = sum(1 for a in actions if a["urgency"] == "critical")
important_count = sum(1 for a in actions if a["urgency"] == "important")
return ActionQueueResponse(
actions=[ActionItem(**action) for action in actions[:10]], # Show top 10
totalActions=len(actions),
criticalCount=critical_count,
importantCount=important_count
)
except Exception as e:
logger.error(f"Error getting action queue: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/production-timeline", response_model=ProductionTimelineResponse)
async def get_production_timeline(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> ProductionTimelineResponse:
"""
Get today's production timeline
Shows what's being made today in chronological order with status and progress.
"""
try:
dashboard_service = DashboardService(db)
# Fetch today's production batches
batches = []
try:
batch_data = await production_client.get_todays_batches(tenant_id)
if batch_data:
batches = batch_data.get("batches", [])
except Exception as e:
logger.warning(f"Failed to fetch production batches: {e}")
# Transform to timeline format
timeline = await dashboard_service.get_production_timeline(
tenant_id=tenant_id,
batches=batches
)
# Count by status
completed = sum(1 for item in timeline if item["status"] == "COMPLETED")
in_progress = sum(1 for item in timeline if item["status"] == "IN_PROGRESS")
pending = sum(1 for item in timeline if item["status"] == "PENDING")
return ProductionTimelineResponse(
timeline=[ProductionTimelineItem(**item) for item in timeline],
totalBatches=len(timeline),
completedBatches=completed,
inProgressBatches=in_progress,
pendingBatches=pending
)
except Exception as e:
logger.error(f"Error getting production timeline: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/unified-action-queue")
async def get_unified_action_queue(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> Dict[str, Any]:
"""
Get unified action queue with time-based grouping
Combines all alerts (PO approvals, delivery tracking, production, etc.)
into URGENT (<6h), TODAY (<24h), and THIS WEEK (<7d) sections.
"""
try:
dashboard_service = DashboardService(db)
# Fetch all alerts from alert processor
alerts_data = await alerts_client.get_alerts(tenant_id, limit=100) or {}
alerts = alerts_data.get("alerts", [])
# Build unified queue
action_queue = await dashboard_service.get_unified_action_queue(
tenant_id=tenant_id,
alerts=alerts
)
return action_queue
except Exception as e:
logger.error(f"Error getting unified action queue: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/execution-progress")
async def get_execution_progress(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> Dict[str, Any]:
"""
Get execution progress for today's plan
Shows plan vs actual for production batches, deliveries, and approvals
"""
try:
dashboard_service = DashboardService(db)
# Fetch today's data in parallel
async def fetch_todays_batches():
try:
batch_data = await production_client.get_todays_batches(tenant_id)
if batch_data:
return batch_data.get("batches", [])
return []
except Exception as e:
logger.warning(f"Failed to fetch today's batches: {e}")
return []
async def fetch_expected_deliveries():
try:
# Get POs with expected deliveries today
from datetime import datetime, timedelta, timezone
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100)
if pos_result and isinstance(pos_result, list):
today_start = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
today_end = today_start.replace(hour=23, minute=59, second=59)
deliveries_today = []
for po in pos_result:
expected_date = po.get("expected_delivery_date")
if expected_date:
if isinstance(expected_date, str):
expected_date = datetime.fromisoformat(expected_date.replace('Z', '+00:00'))
if today_start <= expected_date <= today_end:
deliveries_today.append(po)
return deliveries_today
return []
except Exception as e:
logger.warning(f"Failed to fetch expected deliveries: {e}")
return []
async def fetch_pending_approvals():
try:
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100) or []
return len(po_data) if isinstance(po_data, list) else 0
except Exception as e:
logger.warning(f"Failed to fetch pending approvals: {e}")
return 0
# Execute in parallel
todays_batches, expected_deliveries, pending_approvals = await asyncio.gather(
fetch_todays_batches(),
fetch_expected_deliveries(),
fetch_pending_approvals()
)
# Calculate progress
progress = await dashboard_service.get_execution_progress(
tenant_id=tenant_id,
todays_batches=todays_batches,
expected_deliveries=expected_deliveries,
pending_approvals=pending_approvals
)
return progress
except Exception as e:
logger.error(f"Error getting execution progress: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/insights", response_model=InsightsResponse)
async def get_insights(
tenant_id: str,
db: AsyncSession = Depends(get_db)
) -> InsightsResponse:
"""
Get key insights for dashboard grid
Provides glanceable metrics on savings, inventory, waste, and deliveries.
"""
try:
# Try to get from cache
if settings.CACHE_ENABLED:
cache_key = f"dashboard:insights:{tenant_id}"
cached = await get_cached(cache_key)
if cached:
return InsightsResponse(**cached)
dashboard_service = DashboardService(db)
# Fetch data from various services in parallel
from datetime import datetime, timedelta, timezone
async def fetch_sustainability():
try:
return await inventory_client.get_sustainability_widget(tenant_id) or {}
except Exception as e:
logger.warning(f"Failed to fetch sustainability data: {e}")
return {}
async def fetch_inventory():
try:
raw_inventory_data = await inventory_client.get_stock_status(tenant_id)
# Handle case where API returns a list instead of dict
if isinstance(raw_inventory_data, dict):
return raw_inventory_data
elif isinstance(raw_inventory_data, list):
# If it's a list, aggregate the data
return {
"low_stock_count": sum(1 for item in raw_inventory_data if item.get("status") == "low_stock"),
"out_of_stock_count": sum(1 for item in raw_inventory_data if item.get("status") == "out_of_stock"),
"total_items": len(raw_inventory_data)
}
return {}
except Exception as e:
logger.warning(f"Failed to fetch inventory data: {e}")
return {}
async def fetch_deliveries():
try:
# Get recent POs with pending deliveries
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100)
if pos_result and isinstance(pos_result, list):
# Count deliveries expected today
today_start = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
today_end = today_start.replace(hour=23, minute=59, second=59)
deliveries_today = 0
for po in pos_result:
expected_date = po.get("expected_delivery_date")
if expected_date:
if isinstance(expected_date, str):
expected_date = datetime.fromisoformat(expected_date.replace('Z', '+00:00'))
if today_start <= expected_date <= today_end:
deliveries_today += 1
return {"deliveries_today": deliveries_today}
return {}
except Exception as e:
logger.warning(f"Failed to fetch delivery data: {e}")
return {}
async def fetch_savings():
try:
# Get prevented issue savings from alert analytics
analytics = await alerts_client.get_dashboard_analytics(tenant_id, days=7)
if analytics:
weekly_savings = analytics.get('estimated_savings_eur', 0)
prevented_count = analytics.get('prevented_issues_count', 0)
# Calculate trend from period comparison
period_comparison = analytics.get('period_comparison', {})
current_prevented = period_comparison.get('current_prevented', 0)
previous_prevented = period_comparison.get('previous_prevented', 0)
trend_percentage = 0
if previous_prevented > 0:
trend_percentage = ((current_prevented - previous_prevented) / previous_prevented) * 100
return {
"weekly_savings": round(weekly_savings, 2),
"trend_percentage": round(trend_percentage, 1),
"prevented_count": prevented_count
}
return {"weekly_savings": 0, "trend_percentage": 0, "prevented_count": 0}
except Exception as e:
logger.warning(f"Failed to calculate savings data: {e}")
return {"weekly_savings": 0, "trend_percentage": 0, "prevented_count": 0}
# Execute all fetches in parallel
sustainability_data, inventory_data, delivery_data, savings_data = await asyncio.gather(
fetch_sustainability(),
fetch_inventory(),
fetch_deliveries(),
fetch_savings()
)
# Merge delivery data into inventory data
inventory_data.update(delivery_data)
# Calculate insights
insights = await dashboard_service.calculate_insights(
tenant_id=tenant_id,
sustainability_data=sustainability_data,
inventory_data=inventory_data,
savings_data=savings_data
)
# Prepare response
response_data = {
"savings": insights["savings"],
"inventory": insights["inventory"],
"waste": insights["waste"],
"deliveries": insights["deliveries"]
}
# Cache the result
if settings.CACHE_ENABLED:
cache_key = f"dashboard:insights:{tenant_id}"
await set_cached(cache_key, response_data, ttl=settings.CACHE_TTL_INSIGHTS)
return InsightsResponse(
savings=InsightCard(**insights["savings"]),
inventory=InsightCard(**insights["inventory"]),
waste=InsightCard(**insights["waste"]),
deliveries=InsightCard(**insights["deliveries"])
)
except Exception as e:
logger.error(f"Error getting insights: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))