Improve the UI and tests
This commit is contained in:
@@ -11,10 +11,12 @@ from typing import Dict, Any, List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import asyncio
|
||||
|
||||
from app.core.database import get_db
|
||||
from app.core.config import settings
|
||||
from ..services.dashboard_service import DashboardService
|
||||
from ..utils.cache import get_cached, set_cached, delete_pattern
|
||||
from shared.clients import (
|
||||
get_inventory_client,
|
||||
get_production_client,
|
||||
@@ -194,45 +196,59 @@ async def get_bakery_health_status(
|
||||
or if there are issues requiring attention.
|
||||
"""
|
||||
try:
|
||||
# Try to get from cache
|
||||
if settings.CACHE_ENABLED:
|
||||
cache_key = f"dashboard:health:{tenant_id}"
|
||||
cached = await get_cached(cache_key)
|
||||
if cached:
|
||||
return BakeryHealthStatusResponse(**cached)
|
||||
|
||||
dashboard_service = DashboardService(db)
|
||||
|
||||
# Gather metrics from various services
|
||||
# In a real implementation, these would be fetched from respective services
|
||||
# For now, we'll make HTTP calls to the services
|
||||
# Gather metrics from various services in parallel
|
||||
# Use asyncio.gather to make all HTTP calls concurrently
|
||||
|
||||
# Get alerts summary
|
||||
try:
|
||||
alerts_data = await alerts_client.get_alerts_summary(tenant_id) or {}
|
||||
critical_alerts = alerts_data.get("critical_count", 0)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch alerts: {e}")
|
||||
critical_alerts = 0
|
||||
async def fetch_alerts():
|
||||
try:
|
||||
alerts_data = await alerts_client.get_alerts_summary(tenant_id) or {}
|
||||
return alerts_data.get("critical_count", 0)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch alerts: {e}")
|
||||
return 0
|
||||
|
||||
# Get pending PO count
|
||||
try:
|
||||
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100) or []
|
||||
pending_approvals = len(po_data) if isinstance(po_data, list) else 0
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch POs: {e}")
|
||||
pending_approvals = 0
|
||||
async def fetch_pending_pos():
|
||||
try:
|
||||
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100) or []
|
||||
return len(po_data) if isinstance(po_data, list) else 0
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch POs: {e}")
|
||||
return 0
|
||||
|
||||
# Get production delays
|
||||
try:
|
||||
prod_data = await production_client.get_production_batches_by_status(
|
||||
tenant_id, status="ON_HOLD", limit=100
|
||||
) or {}
|
||||
production_delays = len(prod_data.get("batches", []))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch production batches: {e}")
|
||||
production_delays = 0
|
||||
async def fetch_production_delays():
|
||||
try:
|
||||
prod_data = await production_client.get_production_batches_by_status(
|
||||
tenant_id, status="ON_HOLD", limit=100
|
||||
) or {}
|
||||
return len(prod_data.get("batches", []))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch production batches: {e}")
|
||||
return 0
|
||||
|
||||
# Get inventory status
|
||||
try:
|
||||
inv_data = await inventory_client.get_inventory_dashboard(tenant_id) or {}
|
||||
out_of_stock_count = inv_data.get("out_of_stock_count", 0)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch inventory: {e}")
|
||||
out_of_stock_count = 0
|
||||
async def fetch_inventory():
|
||||
try:
|
||||
inv_data = await inventory_client.get_inventory_dashboard(tenant_id) or {}
|
||||
return inv_data.get("out_of_stock_count", 0)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch inventory: {e}")
|
||||
return 0
|
||||
|
||||
# Execute all fetches in parallel
|
||||
critical_alerts, pending_approvals, production_delays, out_of_stock_count = await asyncio.gather(
|
||||
fetch_alerts(),
|
||||
fetch_pending_pos(),
|
||||
fetch_production_delays(),
|
||||
fetch_inventory()
|
||||
)
|
||||
|
||||
# System errors (would come from monitoring system)
|
||||
system_errors = 0
|
||||
@@ -247,6 +263,11 @@ async def get_bakery_health_status(
|
||||
system_errors=system_errors
|
||||
)
|
||||
|
||||
# Cache the result
|
||||
if settings.CACHE_ENABLED:
|
||||
cache_key = f"dashboard:health:{tenant_id}"
|
||||
await set_cached(cache_key, health_status, ttl=settings.CACHE_TTL_HEALTH)
|
||||
|
||||
return BakeryHealthStatusResponse(**health_status)
|
||||
|
||||
except Exception as e:
|
||||
@@ -267,6 +288,13 @@ async def get_orchestration_summary(
|
||||
and why, helping build user trust in the system.
|
||||
"""
|
||||
try:
|
||||
# Try to get from cache (only if no specific run_id is provided)
|
||||
if settings.CACHE_ENABLED and run_id is None:
|
||||
cache_key = f"dashboard:summary:{tenant_id}"
|
||||
cached = await get_cached(cache_key)
|
||||
if cached:
|
||||
return OrchestrationSummaryResponse(**cached)
|
||||
|
||||
dashboard_service = DashboardService(db)
|
||||
|
||||
# Get orchestration summary
|
||||
@@ -307,6 +335,11 @@ async def get_orchestration_summary(
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch batch details: {e}")
|
||||
|
||||
# Cache the result (only if no specific run_id)
|
||||
if settings.CACHE_ENABLED and run_id is None:
|
||||
cache_key = f"dashboard:summary:{tenant_id}"
|
||||
await set_cached(cache_key, summary, ttl=settings.CACHE_TTL_SUMMARY)
|
||||
|
||||
return OrchestrationSummaryResponse(**summary)
|
||||
|
||||
except Exception as e:
|
||||
@@ -328,38 +361,52 @@ async def get_action_queue(
|
||||
try:
|
||||
dashboard_service = DashboardService(db)
|
||||
|
||||
# Fetch data from various services
|
||||
# Get pending POs
|
||||
pending_pos = []
|
||||
try:
|
||||
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=20)
|
||||
if po_data and isinstance(po_data, list):
|
||||
pending_pos = po_data
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch pending POs: {e}")
|
||||
# Fetch data from various services in parallel
|
||||
async def fetch_pending_pos():
|
||||
try:
|
||||
po_data = await procurement_client.get_pending_purchase_orders(tenant_id, limit=20)
|
||||
if po_data and isinstance(po_data, list):
|
||||
return po_data
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch pending POs: {e}")
|
||||
return []
|
||||
|
||||
# Get critical alerts
|
||||
critical_alerts = []
|
||||
try:
|
||||
alerts_data = await alerts_client.get_critical_alerts(tenant_id, limit=20)
|
||||
if alerts_data:
|
||||
critical_alerts = alerts_data.get("alerts", [])
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch alerts: {e}")
|
||||
async def fetch_critical_alerts():
|
||||
try:
|
||||
alerts_data = await alerts_client.get_critical_alerts(tenant_id, limit=20)
|
||||
if alerts_data:
|
||||
return alerts_data.get("alerts", [])
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch alerts: {e}")
|
||||
return []
|
||||
|
||||
# Get onboarding status
|
||||
onboarding_incomplete = False
|
||||
onboarding_steps = []
|
||||
try:
|
||||
onboarding_data = await procurement_client.get(
|
||||
"/procurement/auth/onboarding-progress",
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
if onboarding_data:
|
||||
onboarding_incomplete = not onboarding_data.get("completed", True)
|
||||
onboarding_steps = onboarding_data.get("steps", [])
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch onboarding status: {e}")
|
||||
async def fetch_onboarding():
|
||||
try:
|
||||
onboarding_data = await procurement_client.get(
|
||||
"/procurement/auth/onboarding-progress",
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
if onboarding_data:
|
||||
return {
|
||||
"incomplete": not onboarding_data.get("completed", True),
|
||||
"steps": onboarding_data.get("steps", [])
|
||||
}
|
||||
return {"incomplete": False, "steps": []}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch onboarding status: {e}")
|
||||
return {"incomplete": False, "steps": []}
|
||||
|
||||
# Execute all fetches in parallel
|
||||
pending_pos, critical_alerts, onboarding = await asyncio.gather(
|
||||
fetch_pending_pos(),
|
||||
fetch_critical_alerts(),
|
||||
fetch_onboarding()
|
||||
)
|
||||
|
||||
onboarding_incomplete = onboarding["incomplete"]
|
||||
onboarding_steps = onboarding["steps"]
|
||||
|
||||
# Build action queue
|
||||
actions = await dashboard_service.get_action_queue(
|
||||
@@ -443,93 +490,106 @@ async def get_insights(
|
||||
Provides glanceable metrics on savings, inventory, waste, and deliveries.
|
||||
"""
|
||||
try:
|
||||
# Try to get from cache
|
||||
if settings.CACHE_ENABLED:
|
||||
cache_key = f"dashboard:insights:{tenant_id}"
|
||||
cached = await get_cached(cache_key)
|
||||
if cached:
|
||||
return InsightsResponse(**cached)
|
||||
|
||||
dashboard_service = DashboardService(db)
|
||||
|
||||
# Fetch data from various services
|
||||
# Sustainability data
|
||||
sustainability_data = {}
|
||||
try:
|
||||
sustainability_data = await inventory_client.get_sustainability_widget(tenant_id) or {}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch sustainability data: {e}")
|
||||
# Fetch data from various services in parallel
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
# Inventory data
|
||||
inventory_data = {}
|
||||
try:
|
||||
raw_inventory_data = await inventory_client.get_stock_status(tenant_id)
|
||||
# Handle case where API returns a list instead of dict
|
||||
if isinstance(raw_inventory_data, dict):
|
||||
inventory_data = raw_inventory_data
|
||||
elif isinstance(raw_inventory_data, list):
|
||||
# If it's a list, aggregate the data
|
||||
inventory_data = {
|
||||
"low_stock_count": sum(1 for item in raw_inventory_data if item.get("status") == "low_stock"),
|
||||
"out_of_stock_count": sum(1 for item in raw_inventory_data if item.get("status") == "out_of_stock"),
|
||||
"total_items": len(raw_inventory_data)
|
||||
}
|
||||
else:
|
||||
inventory_data = {}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch inventory data: {e}")
|
||||
async def fetch_sustainability():
|
||||
try:
|
||||
return await inventory_client.get_sustainability_widget(tenant_id) or {}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch sustainability data: {e}")
|
||||
return {}
|
||||
|
||||
# Deliveries data from procurement
|
||||
delivery_data = {}
|
||||
try:
|
||||
# Get recent POs with pending deliveries
|
||||
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100)
|
||||
if pos_result and isinstance(pos_result, list):
|
||||
# Count deliveries expected today
|
||||
from datetime import datetime, timezone
|
||||
today_start = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
today_end = today_start.replace(hour=23, minute=59, second=59)
|
||||
async def fetch_inventory():
|
||||
try:
|
||||
raw_inventory_data = await inventory_client.get_stock_status(tenant_id)
|
||||
# Handle case where API returns a list instead of dict
|
||||
if isinstance(raw_inventory_data, dict):
|
||||
return raw_inventory_data
|
||||
elif isinstance(raw_inventory_data, list):
|
||||
# If it's a list, aggregate the data
|
||||
return {
|
||||
"low_stock_count": sum(1 for item in raw_inventory_data if item.get("status") == "low_stock"),
|
||||
"out_of_stock_count": sum(1 for item in raw_inventory_data if item.get("status") == "out_of_stock"),
|
||||
"total_items": len(raw_inventory_data)
|
||||
}
|
||||
return {}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch inventory data: {e}")
|
||||
return {}
|
||||
|
||||
deliveries_today = 0
|
||||
for po in pos_result:
|
||||
expected_date = po.get("expected_delivery_date")
|
||||
if expected_date:
|
||||
if isinstance(expected_date, str):
|
||||
expected_date = datetime.fromisoformat(expected_date.replace('Z', '+00:00'))
|
||||
if today_start <= expected_date <= today_end:
|
||||
deliveries_today += 1
|
||||
async def fetch_deliveries():
|
||||
try:
|
||||
# Get recent POs with pending deliveries
|
||||
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=100)
|
||||
if pos_result and isinstance(pos_result, list):
|
||||
# Count deliveries expected today
|
||||
today_start = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
today_end = today_start.replace(hour=23, minute=59, second=59)
|
||||
|
||||
delivery_data = {"deliveries_today": deliveries_today}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch delivery data: {e}")
|
||||
deliveries_today = 0
|
||||
for po in pos_result:
|
||||
expected_date = po.get("expected_delivery_date")
|
||||
if expected_date:
|
||||
if isinstance(expected_date, str):
|
||||
expected_date = datetime.fromisoformat(expected_date.replace('Z', '+00:00'))
|
||||
if today_start <= expected_date <= today_end:
|
||||
deliveries_today += 1
|
||||
|
||||
# Savings data - Calculate from recent PO price optimizations
|
||||
savings_data = {}
|
||||
try:
|
||||
# Get recent POs (last 7 days) and sum up optimization savings
|
||||
from datetime import datetime, timedelta, timezone
|
||||
seven_days_ago = datetime.now(timezone.utc) - timedelta(days=7)
|
||||
return {"deliveries_today": deliveries_today}
|
||||
return {}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch delivery data: {e}")
|
||||
return {}
|
||||
|
||||
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=200)
|
||||
if pos_result and isinstance(pos_result, list):
|
||||
weekly_savings = 0
|
||||
# Calculate savings from price optimization
|
||||
for po in pos_result:
|
||||
# Check if PO was created in last 7 days
|
||||
created_at = po.get("created_at")
|
||||
if created_at:
|
||||
if isinstance(created_at, str):
|
||||
created_at = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
|
||||
if created_at >= seven_days_ago:
|
||||
# Sum up savings from optimization
|
||||
optimization_data = po.get("optimization_data", {})
|
||||
if isinstance(optimization_data, dict):
|
||||
savings = optimization_data.get("savings", 0) or 0
|
||||
weekly_savings += float(savings)
|
||||
async def fetch_savings():
|
||||
try:
|
||||
# Get recent POs (last 7 days) and sum up optimization savings
|
||||
seven_days_ago = datetime.now(timezone.utc) - timedelta(days=7)
|
||||
|
||||
# Default trend percentage (would need historical data for real trend)
|
||||
savings_data = {
|
||||
"weekly_savings": round(weekly_savings, 2),
|
||||
"trend_percentage": 12 if weekly_savings > 0 else 0
|
||||
}
|
||||
else:
|
||||
savings_data = {"weekly_savings": 0, "trend_percentage": 0}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to calculate savings data: {e}")
|
||||
savings_data = {"weekly_savings": 0, "trend_percentage": 0}
|
||||
pos_result = await procurement_client.get_pending_purchase_orders(tenant_id, limit=200)
|
||||
if pos_result and isinstance(pos_result, list):
|
||||
weekly_savings = 0
|
||||
# Calculate savings from price optimization
|
||||
for po in pos_result:
|
||||
# Check if PO was created in last 7 days
|
||||
created_at = po.get("created_at")
|
||||
if created_at:
|
||||
if isinstance(created_at, str):
|
||||
created_at = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
|
||||
if created_at >= seven_days_ago:
|
||||
# Sum up savings from optimization
|
||||
optimization_data = po.get("optimization_data", {})
|
||||
if isinstance(optimization_data, dict):
|
||||
savings = optimization_data.get("savings", 0) or 0
|
||||
weekly_savings += float(savings)
|
||||
|
||||
# Default trend percentage (would need historical data for real trend)
|
||||
return {
|
||||
"weekly_savings": round(weekly_savings, 2),
|
||||
"trend_percentage": 12 if weekly_savings > 0 else 0
|
||||
}
|
||||
return {"weekly_savings": 0, "trend_percentage": 0}
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to calculate savings data: {e}")
|
||||
return {"weekly_savings": 0, "trend_percentage": 0}
|
||||
|
||||
# Execute all fetches in parallel
|
||||
sustainability_data, inventory_data, delivery_data, savings_data = await asyncio.gather(
|
||||
fetch_sustainability(),
|
||||
fetch_inventory(),
|
||||
fetch_deliveries(),
|
||||
fetch_savings()
|
||||
)
|
||||
|
||||
# Merge delivery data into inventory data
|
||||
inventory_data.update(delivery_data)
|
||||
@@ -542,6 +602,19 @@ async def get_insights(
|
||||
savings_data=savings_data
|
||||
)
|
||||
|
||||
# Prepare response
|
||||
response_data = {
|
||||
"savings": insights["savings"],
|
||||
"inventory": insights["inventory"],
|
||||
"waste": insights["waste"],
|
||||
"deliveries": insights["deliveries"]
|
||||
}
|
||||
|
||||
# Cache the result
|
||||
if settings.CACHE_ENABLED:
|
||||
cache_key = f"dashboard:insights:{tenant_id}"
|
||||
await set_cached(cache_key, response_data, ttl=settings.CACHE_TTL_INSIGHTS)
|
||||
|
||||
return InsightsResponse(
|
||||
savings=InsightCard(**insights["savings"]),
|
||||
inventory=InsightCard(**insights["inventory"]),
|
||||
|
||||
Reference in New Issue
Block a user