New alert service
This commit is contained in:
165
services/production/app/api/batch.py
Normal file
165
services/production/app/api/batch.py
Normal file
@@ -0,0 +1,165 @@
|
||||
# services/production/app/api/batch.py
|
||||
"""
|
||||
Production Batch API - Batch operations for enterprise dashboards
|
||||
|
||||
Phase 2 optimization: Eliminate N+1 query patterns by fetching production data
|
||||
for multiple tenants in a single request.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Body
|
||||
from typing import List, Dict, Any
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
import asyncio
|
||||
|
||||
from app.services.production_service import ProductionService
|
||||
from app.core.config import settings
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
|
||||
router = APIRouter(tags=["production-batch"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
|
||||
|
||||
class ProductionSummaryBatchRequest(BaseModel):
|
||||
"""Request model for batch production summary"""
|
||||
tenant_ids: List[str] = Field(..., description="List of tenant IDs", max_length=100)
|
||||
|
||||
|
||||
class ProductionSummary(BaseModel):
|
||||
"""Production summary for a single tenant"""
|
||||
tenant_id: str
|
||||
total_batches: int
|
||||
pending_batches: int
|
||||
in_progress_batches: int
|
||||
completed_batches: int
|
||||
on_hold_batches: int
|
||||
cancelled_batches: int
|
||||
total_planned_quantity: float
|
||||
total_actual_quantity: float
|
||||
efficiency_rate: float
|
||||
|
||||
|
||||
@router.post("/batch/production-summary", response_model=Dict[str, ProductionSummary])
|
||||
async def get_production_summary_batch(
|
||||
request: ProductionSummaryBatchRequest = Body(...),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get production summary for multiple tenants in a single request.
|
||||
|
||||
Optimized for enterprise dashboards to eliminate N+1 query patterns.
|
||||
Fetches production data for all tenants in parallel.
|
||||
|
||||
Args:
|
||||
request: Batch request with tenant IDs
|
||||
|
||||
Returns:
|
||||
Dictionary mapping tenant_id -> production summary
|
||||
|
||||
Example:
|
||||
POST /api/v1/production/batch/production-summary
|
||||
{
|
||||
"tenant_ids": ["tenant-1", "tenant-2", "tenant-3"]
|
||||
}
|
||||
|
||||
Response:
|
||||
{
|
||||
"tenant-1": {"tenant_id": "tenant-1", "total_batches": 25, ...},
|
||||
"tenant-2": {"tenant_id": "tenant-2", "total_batches": 18, ...},
|
||||
"tenant-3": {"tenant_id": "tenant-3", "total_batches": 32, ...}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if len(request.tenant_ids) > 100:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Maximum 100 tenant IDs allowed per batch request"
|
||||
)
|
||||
|
||||
if not request.tenant_ids:
|
||||
return {}
|
||||
|
||||
logger.info(
|
||||
"Batch fetching production summaries",
|
||||
tenant_count=len(request.tenant_ids)
|
||||
)
|
||||
|
||||
async def fetch_tenant_production(tenant_id: str) -> tuple[str, ProductionSummary]:
|
||||
"""Fetch production summary for a single tenant"""
|
||||
try:
|
||||
tenant_uuid = UUID(tenant_id)
|
||||
summary = await production_service.get_dashboard_summary(tenant_uuid)
|
||||
|
||||
# Calculate efficiency rate
|
||||
efficiency_rate = 0.0
|
||||
if summary.total_planned_quantity > 0 and summary.total_actual_quantity is not None:
|
||||
efficiency_rate = (summary.total_actual_quantity / summary.total_planned_quantity) * 100
|
||||
|
||||
return tenant_id, ProductionSummary(
|
||||
tenant_id=tenant_id,
|
||||
total_batches=int(summary.total_batches or 0),
|
||||
pending_batches=int(summary.pending_batches or 0),
|
||||
in_progress_batches=int(summary.in_progress_batches or 0),
|
||||
completed_batches=int(summary.completed_batches or 0),
|
||||
on_hold_batches=int(summary.on_hold_batches or 0),
|
||||
cancelled_batches=int(summary.cancelled_batches or 0),
|
||||
total_planned_quantity=float(summary.total_planned_quantity or 0),
|
||||
total_actual_quantity=float(summary.total_actual_quantity or 0),
|
||||
efficiency_rate=efficiency_rate
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to fetch production for tenant in batch",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
return tenant_id, ProductionSummary(
|
||||
tenant_id=tenant_id,
|
||||
total_batches=0,
|
||||
pending_batches=0,
|
||||
in_progress_batches=0,
|
||||
completed_batches=0,
|
||||
on_hold_batches=0,
|
||||
cancelled_batches=0,
|
||||
total_planned_quantity=0.0,
|
||||
total_actual_quantity=0.0,
|
||||
efficiency_rate=0.0
|
||||
)
|
||||
|
||||
# Fetch all tenant production data in parallel
|
||||
tasks = [fetch_tenant_production(tid) for tid in request.tenant_ids]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# Build result dictionary
|
||||
result_dict = {}
|
||||
for result in results:
|
||||
if isinstance(result, Exception):
|
||||
logger.error("Exception in batch production fetch", error=str(result))
|
||||
continue
|
||||
tenant_id, summary = result
|
||||
result_dict[tenant_id] = summary
|
||||
|
||||
logger.info(
|
||||
"Batch production summaries retrieved",
|
||||
requested_count=len(request.tenant_ids),
|
||||
successful_count=len(result_dict)
|
||||
)
|
||||
|
||||
return result_dict
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error in batch production summary", error=str(e), exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to fetch batch production summaries: {str(e)}"
|
||||
)
|
||||
85
services/production/app/api/internal_alert_trigger.py
Normal file
85
services/production/app/api/internal_alert_trigger.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# services/production/app/api/internal_alert_trigger.py
|
||||
"""
|
||||
Internal API for triggering production alerts.
|
||||
Used by demo session cloning to generate realistic production delay alerts.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request, Path
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/api/internal/production-alerts/trigger/{tenant_id}")
|
||||
async def trigger_production_alerts(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID to check production for"),
|
||||
request: Request = None
|
||||
) -> dict:
|
||||
"""
|
||||
Trigger production alert checks for a specific tenant (internal use only).
|
||||
|
||||
This endpoint is called by the demo session cloning process after production
|
||||
batches are seeded to generate realistic production delay alerts.
|
||||
|
||||
Security: Protected by X-Internal-Service header check.
|
||||
"""
|
||||
try:
|
||||
# Verify internal service header
|
||||
if not request or request.headers.get("X-Internal-Service") not in ["demo-session", "internal"]:
|
||||
logger.warning("Unauthorized internal API call", tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="This endpoint is for internal service use only"
|
||||
)
|
||||
|
||||
# Get production alert service from app state
|
||||
production_alert_service = getattr(request.app.state, 'production_alert_service', None)
|
||||
|
||||
if not production_alert_service:
|
||||
logger.error("Production alert service not initialized")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Production alert service not available"
|
||||
)
|
||||
|
||||
# Trigger production alert checks (checks all tenants, including this one)
|
||||
logger.info("Triggering production alert checks", tenant_id=str(tenant_id))
|
||||
await production_alert_service.check_production_delays()
|
||||
|
||||
# Return success (service checks all tenants, we can't get specific count)
|
||||
result = {"total_alerts": 0, "message": "Production alert checks triggered"}
|
||||
|
||||
logger.info(
|
||||
"Production alert checks completed",
|
||||
tenant_id=str(tenant_id),
|
||||
alerts_generated=result.get("total_alerts", 0)
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"tenant_id": str(tenant_id),
|
||||
"alerts_generated": result.get("total_alerts", 0),
|
||||
"breakdown": {
|
||||
"critical": result.get("critical", 0),
|
||||
"high": result.get("high", 0),
|
||||
"medium": result.get("medium", 0),
|
||||
"low": result.get("low", 0)
|
||||
}
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error triggering production alerts",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to trigger production alerts: {str(e)}"
|
||||
)
|
||||
@@ -25,6 +25,7 @@ from app.schemas.production import (
|
||||
ProductionStatusEnum
|
||||
)
|
||||
from app.core.config import settings
|
||||
from app.utils.cache import get_cached, set_cached, make_cache_key
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
@@ -56,8 +57,23 @@ async def list_production_batches(
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""List batches with filters: date, status, product, order_id"""
|
||||
"""List batches with filters: date, status, product, order_id (with Redis caching - 20s TTL)"""
|
||||
try:
|
||||
# PERFORMANCE OPTIMIZATION: Cache frequently accessed queries (status filter, first page)
|
||||
cache_key = None
|
||||
if page == 1 and product_id is None and order_id is None and start_date is None and end_date is None:
|
||||
# Cache simple status-filtered queries (common for dashboards)
|
||||
cache_key = make_cache_key(
|
||||
"production_batches",
|
||||
str(tenant_id),
|
||||
status=status.value if status else None,
|
||||
page_size=page_size
|
||||
)
|
||||
cached_result = await get_cached(cache_key)
|
||||
if cached_result is not None:
|
||||
logger.debug("Cache hit for production batches", cache_key=cache_key, tenant_id=str(tenant_id), status=status)
|
||||
return ProductionBatchListResponse(**cached_result)
|
||||
|
||||
filters = {
|
||||
"status": status,
|
||||
"product_id": str(product_id) if product_id else None,
|
||||
@@ -68,6 +84,11 @@ async def list_production_batches(
|
||||
|
||||
batch_list = await production_service.get_production_batches_list(tenant_id, filters, page, page_size)
|
||||
|
||||
# Cache the result if applicable (20s TTL for production batches)
|
||||
if cache_key:
|
||||
await set_cached(cache_key, batch_list.model_dump(), ttl=20)
|
||||
logger.debug("Cached production batches", cache_key=cache_key, ttl=20, tenant_id=str(tenant_id), status=status)
|
||||
|
||||
logger.info("Retrieved production batches list",
|
||||
tenant_id=str(tenant_id), filters=filters)
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ from shared.routing import RouteBuilder
|
||||
from app.services.production_service import ProductionService
|
||||
from app.schemas.production import ProductionDashboardSummary
|
||||
from app.core.config import settings
|
||||
from app.utils.cache import get_cached, set_cached, make_cache_key
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
@@ -35,10 +36,22 @@ async def get_dashboard_summary(
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get production dashboard summary"""
|
||||
"""Get production dashboard summary with caching (60s TTL)"""
|
||||
try:
|
||||
# PHASE 2: Check cache first
|
||||
cache_key = make_cache_key("production_dashboard", str(tenant_id))
|
||||
cached_result = await get_cached(cache_key)
|
||||
if cached_result is not None:
|
||||
logger.debug("Cache hit for production dashboard", cache_key=cache_key, tenant_id=str(tenant_id))
|
||||
return ProductionDashboardSummary(**cached_result)
|
||||
|
||||
# Cache miss - fetch from database
|
||||
summary = await production_service.get_dashboard_summary(tenant_id)
|
||||
|
||||
# PHASE 2: Cache the result (60s TTL for production batches)
|
||||
await set_cached(cache_key, summary.model_dump(), ttl=60)
|
||||
logger.debug("Cached production dashboard", cache_key=cache_key, ttl=60, tenant_id=str(tenant_id))
|
||||
|
||||
logger.info("Retrieved production dashboard summary",
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user