Delete legacy alerts
This commit is contained in:
@@ -14,12 +14,10 @@ import structlog
|
||||
from shared.auth.decorators import get_current_user_dep, get_current_tenant_id_dep
|
||||
from app.core.database import get_db
|
||||
from app.services.production_service import ProductionService
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
from app.schemas.production import (
|
||||
ProductionBatchCreate, ProductionBatchUpdate, ProductionBatchStatusUpdate,
|
||||
ProductionBatchResponse, ProductionBatchListResponse,
|
||||
DailyProductionRequirements, ProductionDashboardSummary, ProductionMetrics,
|
||||
ProductionAlertResponse, ProductionAlertListResponse
|
||||
)
|
||||
from app.core.config import settings
|
||||
|
||||
@@ -34,10 +32,6 @@ def get_production_service() -> ProductionService:
|
||||
return ProductionService(database_manager, settings)
|
||||
|
||||
|
||||
def get_production_alert_service() -> ProductionAlertService:
|
||||
"""Dependency injection for production alert service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionAlertService(database_manager, settings)
|
||||
|
||||
|
||||
# ================================================================
|
||||
@@ -319,74 +313,6 @@ async def get_production_schedule(
|
||||
raise HTTPException(status_code=500, detail="Failed to get production schedule")
|
||||
|
||||
|
||||
# ================================================================
|
||||
# ALERTS ENDPOINTS
|
||||
# ================================================================
|
||||
|
||||
@router.get("/tenants/{tenant_id}/production/alerts", response_model=ProductionAlertListResponse)
|
||||
async def get_production_alerts(
|
||||
tenant_id: UUID = Path(...),
|
||||
active_only: bool = Query(True, description="Return only active alerts"),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
alert_service: ProductionAlertService = Depends(get_production_alert_service)
|
||||
):
|
||||
"""Get production-related alerts"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(status_code=403, detail="Access denied to this tenant")
|
||||
|
||||
if active_only:
|
||||
alerts = await alert_service.get_active_alerts(tenant_id)
|
||||
else:
|
||||
# Get all alerts (would need additional repo method)
|
||||
alerts = await alert_service.get_active_alerts(tenant_id)
|
||||
|
||||
alert_responses = [ProductionAlertResponse.model_validate(alert) for alert in alerts]
|
||||
|
||||
logger.info("Retrieved production alerts",
|
||||
count=len(alerts), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionAlertListResponse(
|
||||
alerts=alert_responses,
|
||||
total_count=len(alerts),
|
||||
page=1,
|
||||
page_size=len(alerts)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting production alerts",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get production alerts")
|
||||
|
||||
|
||||
@router.post("/tenants/{tenant_id}/production/alerts/{alert_id}/acknowledge", response_model=ProductionAlertResponse)
|
||||
async def acknowledge_alert(
|
||||
tenant_id: UUID = Path(...),
|
||||
alert_id: UUID = Path(...),
|
||||
current_tenant: str = Depends(get_current_tenant_id_dep),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
alert_service: ProductionAlertService = Depends(get_production_alert_service)
|
||||
):
|
||||
"""Acknowledge a production-related alert"""
|
||||
try:
|
||||
if str(tenant_id) != current_tenant:
|
||||
raise HTTPException(status_code=403, detail="Access denied to this tenant")
|
||||
|
||||
acknowledged_by = current_user.get("email", "unknown_user")
|
||||
alert = await alert_service.acknowledge_alert(tenant_id, alert_id, acknowledged_by)
|
||||
|
||||
logger.info("Acknowledged production alert",
|
||||
alert_id=str(alert_id),
|
||||
acknowledged_by=acknowledged_by,
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionAlertResponse.model_validate(alert)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error acknowledging production alert",
|
||||
error=str(e), alert_id=str(alert_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to acknowledge alert")
|
||||
|
||||
|
||||
# ================================================================
|
||||
|
||||
@@ -73,11 +73,6 @@ class ProductionSettings(BaseServiceSettings):
|
||||
HOLIDAY_PRODUCTION_FACTOR: float = float(os.getenv("HOLIDAY_PRODUCTION_FACTOR", "0.3"))
|
||||
SPECIAL_EVENT_PRODUCTION_FACTOR: float = float(os.getenv("SPECIAL_EVENT_PRODUCTION_FACTOR", "1.5"))
|
||||
|
||||
# Alert Thresholds
|
||||
CAPACITY_EXCEEDED_THRESHOLD: float = float(os.getenv("CAPACITY_EXCEEDED_THRESHOLD", "1.0"))
|
||||
PRODUCTION_DELAY_THRESHOLD_MINUTES: int = int(os.getenv("PRODUCTION_DELAY_THRESHOLD_MINUTES", "60"))
|
||||
LOW_YIELD_ALERT_THRESHOLD: float = float(os.getenv("LOW_YIELD_ALERT_THRESHOLD", "0.80"))
|
||||
URGENT_ORDER_THRESHOLD_HOURS: int = int(os.getenv("URGENT_ORDER_THRESHOLD_HOURS", "4"))
|
||||
|
||||
# Cost Management
|
||||
COST_TRACKING_ENABLED: bool = os.getenv("COST_TRACKING_ENABLED", "true").lower() == "true"
|
||||
|
||||
@@ -9,14 +9,12 @@ from .production import (
|
||||
ProductionBatch,
|
||||
ProductionSchedule,
|
||||
ProductionCapacity,
|
||||
QualityCheck,
|
||||
ProductionAlert
|
||||
QualityCheck
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ProductionBatch",
|
||||
"ProductionSchedule",
|
||||
"ProductionCapacity",
|
||||
"QualityCheck",
|
||||
"ProductionAlert"
|
||||
"QualityCheck"
|
||||
]
|
||||
@@ -35,12 +35,6 @@ class ProductionPriority(str, enum.Enum):
|
||||
URGENT = "urgent"
|
||||
|
||||
|
||||
class AlertSeverity(str, enum.Enum):
|
||||
"""Alert severity levels"""
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
class ProductionBatch(Base):
|
||||
@@ -391,81 +385,3 @@ class QualityCheck(Base):
|
||||
}
|
||||
|
||||
|
||||
class ProductionAlert(Base):
|
||||
"""Production alert model for tracking production issues and notifications"""
|
||||
__tablename__ = "production_alerts"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
|
||||
# Alert classification
|
||||
alert_type = Column(String(50), nullable=False, index=True) # capacity_exceeded, delay, quality_issue, etc.
|
||||
severity = Column(SQLEnum(AlertSeverity), nullable=False, default=AlertSeverity.MEDIUM)
|
||||
title = Column(String(255), nullable=False)
|
||||
message = Column(Text, nullable=False)
|
||||
|
||||
# Context
|
||||
batch_id = Column(UUID(as_uuid=True), nullable=True, index=True) # Associated batch if applicable
|
||||
schedule_id = Column(UUID(as_uuid=True), nullable=True, index=True) # Associated schedule if applicable
|
||||
source_system = Column(String(50), nullable=False, default="production")
|
||||
|
||||
# Status
|
||||
is_active = Column(Boolean, default=True)
|
||||
is_acknowledged = Column(Boolean, default=False)
|
||||
is_resolved = Column(Boolean, default=False)
|
||||
|
||||
# Actions and recommendations
|
||||
recommended_actions = Column(JSON, nullable=True) # List of suggested actions
|
||||
actions_taken = Column(JSON, nullable=True) # List of actions actually taken
|
||||
|
||||
# Business impact
|
||||
impact_level = Column(String(20), nullable=True) # low, medium, high, critical
|
||||
estimated_cost_impact = Column(Float, nullable=True)
|
||||
estimated_time_impact_minutes = Column(Integer, nullable=True)
|
||||
|
||||
# Resolution tracking
|
||||
acknowledged_by = Column(String(100), nullable=True)
|
||||
acknowledged_at = Column(DateTime(timezone=True), nullable=True)
|
||||
resolved_by = Column(String(100), nullable=True)
|
||||
resolved_at = Column(DateTime(timezone=True), nullable=True)
|
||||
resolution_notes = Column(Text, nullable=True)
|
||||
|
||||
# Alert data
|
||||
alert_data = Column(JSON, nullable=True) # Additional context data
|
||||
alert_metadata = Column(JSON, nullable=True) # Metadata for the alert
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"alert_type": self.alert_type,
|
||||
"severity": self.severity.value if self.severity else None,
|
||||
"title": self.title,
|
||||
"message": self.message,
|
||||
"batch_id": str(self.batch_id) if self.batch_id else None,
|
||||
"schedule_id": str(self.schedule_id) if self.schedule_id else None,
|
||||
"source_system": self.source_system,
|
||||
"is_active": self.is_active,
|
||||
"is_acknowledged": self.is_acknowledged,
|
||||
"is_resolved": self.is_resolved,
|
||||
"recommended_actions": self.recommended_actions,
|
||||
"actions_taken": self.actions_taken,
|
||||
"impact_level": self.impact_level,
|
||||
"estimated_cost_impact": self.estimated_cost_impact,
|
||||
"estimated_time_impact_minutes": self.estimated_time_impact_minutes,
|
||||
"acknowledged_by": self.acknowledged_by,
|
||||
"acknowledged_at": self.acknowledged_at.isoformat() if self.acknowledged_at else None,
|
||||
"resolved_by": self.resolved_by,
|
||||
"resolved_at": self.resolved_at.isoformat() if self.resolved_at else None,
|
||||
"resolution_notes": self.resolution_notes,
|
||||
"alert_data": self.alert_data,
|
||||
"alert_metadata": self.alert_metadata,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
@@ -9,12 +9,10 @@ from .production_batch_repository import ProductionBatchRepository
|
||||
from .production_schedule_repository import ProductionScheduleRepository
|
||||
from .production_capacity_repository import ProductionCapacityRepository
|
||||
from .quality_check_repository import QualityCheckRepository
|
||||
from .production_alert_repository import ProductionAlertRepository
|
||||
|
||||
__all__ = [
|
||||
"ProductionBatchRepository",
|
||||
"ProductionScheduleRepository",
|
||||
"ProductionCapacityRepository",
|
||||
"QualityCheckRepository",
|
||||
"ProductionAlertRepository"
|
||||
]
|
||||
@@ -1,379 +0,0 @@
|
||||
"""
|
||||
Production Alert Repository
|
||||
Repository for production alert operations
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, and_, text, desc, func
|
||||
from datetime import datetime, timedelta, date
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from .base import ProductionBaseRepository
|
||||
from app.models.production import ProductionAlert, AlertSeverity
|
||||
from shared.database.exceptions import DatabaseError, ValidationError
|
||||
from shared.database.transactions import transactional
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionAlertRepository(ProductionBaseRepository):
|
||||
"""Repository for production alert operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession, cache_ttl: Optional[int] = 60):
|
||||
# Alerts are very dynamic, very short cache time (1 minute)
|
||||
super().__init__(ProductionAlert, session, cache_ttl)
|
||||
|
||||
@transactional
|
||||
async def create_alert(self, alert_data: Dict[str, Any]) -> ProductionAlert:
|
||||
"""Create a new production alert with validation"""
|
||||
try:
|
||||
# Validate alert data
|
||||
validation_result = self._validate_production_data(
|
||||
alert_data,
|
||||
["tenant_id", "alert_type", "title", "message"]
|
||||
)
|
||||
|
||||
if not validation_result["is_valid"]:
|
||||
raise ValidationError(f"Invalid alert data: {validation_result['errors']}")
|
||||
|
||||
# Set default values
|
||||
if "severity" not in alert_data:
|
||||
alert_data["severity"] = AlertSeverity.MEDIUM
|
||||
if "source_system" not in alert_data:
|
||||
alert_data["source_system"] = "production"
|
||||
if "is_active" not in alert_data:
|
||||
alert_data["is_active"] = True
|
||||
if "is_acknowledged" not in alert_data:
|
||||
alert_data["is_acknowledged"] = False
|
||||
if "is_resolved" not in alert_data:
|
||||
alert_data["is_resolved"] = False
|
||||
|
||||
# Create alert
|
||||
alert = await self.create(alert_data)
|
||||
|
||||
logger.info("Production alert created successfully",
|
||||
alert_id=str(alert.id),
|
||||
alert_type=alert.alert_type,
|
||||
severity=alert.severity.value if alert.severity else None,
|
||||
tenant_id=str(alert.tenant_id))
|
||||
|
||||
return alert
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error creating production alert", error=str(e))
|
||||
raise DatabaseError(f"Failed to create production alert: {str(e)}")
|
||||
|
||||
@transactional
|
||||
async def get_active_alerts(
|
||||
self,
|
||||
tenant_id: str,
|
||||
severity: Optional[AlertSeverity] = None
|
||||
) -> List[ProductionAlert]:
|
||||
"""Get active production alerts for a tenant"""
|
||||
try:
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
"is_active": True,
|
||||
"is_resolved": False
|
||||
}
|
||||
|
||||
if severity:
|
||||
filters["severity"] = severity
|
||||
|
||||
alerts = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="created_at",
|
||||
order_desc=True
|
||||
)
|
||||
|
||||
logger.info("Retrieved active production alerts",
|
||||
count=len(alerts),
|
||||
severity=severity.value if severity else "all",
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching active alerts", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch active alerts: {str(e)}")
|
||||
|
||||
@transactional
|
||||
async def get_alerts_by_type(
|
||||
self,
|
||||
tenant_id: str,
|
||||
alert_type: str,
|
||||
include_resolved: bool = False
|
||||
) -> List[ProductionAlert]:
|
||||
"""Get production alerts by type"""
|
||||
try:
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
"alert_type": alert_type
|
||||
}
|
||||
|
||||
if not include_resolved:
|
||||
filters["is_resolved"] = False
|
||||
|
||||
alerts = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="created_at",
|
||||
order_desc=True
|
||||
)
|
||||
|
||||
logger.info("Retrieved alerts by type",
|
||||
count=len(alerts),
|
||||
alert_type=alert_type,
|
||||
include_resolved=include_resolved,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching alerts by type", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch alerts by type: {str(e)}")
|
||||
|
||||
@transactional
|
||||
async def get_alerts_by_batch(
|
||||
self,
|
||||
tenant_id: str,
|
||||
batch_id: str
|
||||
) -> List[ProductionAlert]:
|
||||
"""Get production alerts for a specific batch"""
|
||||
try:
|
||||
alerts = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"batch_id": batch_id
|
||||
},
|
||||
order_by="created_at",
|
||||
order_desc=True
|
||||
)
|
||||
|
||||
logger.info("Retrieved alerts by batch",
|
||||
count=len(alerts),
|
||||
batch_id=batch_id,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching alerts by batch", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch alerts by batch: {str(e)}")
|
||||
|
||||
@transactional
|
||||
async def acknowledge_alert(
|
||||
self,
|
||||
alert_id: UUID,
|
||||
acknowledged_by: str,
|
||||
acknowledgment_notes: Optional[str] = None
|
||||
) -> ProductionAlert:
|
||||
"""Acknowledge a production alert"""
|
||||
try:
|
||||
alert = await self.get(alert_id)
|
||||
if not alert:
|
||||
raise ValidationError(f"Alert {alert_id} not found")
|
||||
|
||||
if alert.is_acknowledged:
|
||||
raise ValidationError("Alert is already acknowledged")
|
||||
|
||||
update_data = {
|
||||
"is_acknowledged": True,
|
||||
"acknowledged_by": acknowledged_by,
|
||||
"acknowledged_at": datetime.utcnow(),
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
if acknowledgment_notes:
|
||||
current_actions = alert.actions_taken or []
|
||||
current_actions.append({
|
||||
"action": "acknowledged",
|
||||
"by": acknowledged_by,
|
||||
"at": datetime.utcnow().isoformat(),
|
||||
"notes": acknowledgment_notes
|
||||
})
|
||||
update_data["actions_taken"] = current_actions
|
||||
|
||||
alert = await self.update(alert_id, update_data)
|
||||
|
||||
logger.info("Acknowledged production alert",
|
||||
alert_id=str(alert_id),
|
||||
acknowledged_by=acknowledged_by)
|
||||
|
||||
return alert
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error acknowledging alert", error=str(e))
|
||||
raise DatabaseError(f"Failed to acknowledge alert: {str(e)}")
|
||||
|
||||
@transactional
|
||||
async def resolve_alert(
|
||||
self,
|
||||
alert_id: UUID,
|
||||
resolved_by: str,
|
||||
resolution_notes: str
|
||||
) -> ProductionAlert:
|
||||
"""Resolve a production alert"""
|
||||
try:
|
||||
alert = await self.get(alert_id)
|
||||
if not alert:
|
||||
raise ValidationError(f"Alert {alert_id} not found")
|
||||
|
||||
if alert.is_resolved:
|
||||
raise ValidationError("Alert is already resolved")
|
||||
|
||||
update_data = {
|
||||
"is_resolved": True,
|
||||
"is_active": False,
|
||||
"resolved_by": resolved_by,
|
||||
"resolved_at": datetime.utcnow(),
|
||||
"resolution_notes": resolution_notes,
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
# Add to actions taken
|
||||
current_actions = alert.actions_taken or []
|
||||
current_actions.append({
|
||||
"action": "resolved",
|
||||
"by": resolved_by,
|
||||
"at": datetime.utcnow().isoformat(),
|
||||
"notes": resolution_notes
|
||||
})
|
||||
update_data["actions_taken"] = current_actions
|
||||
|
||||
alert = await self.update(alert_id, update_data)
|
||||
|
||||
logger.info("Resolved production alert",
|
||||
alert_id=str(alert_id),
|
||||
resolved_by=resolved_by)
|
||||
|
||||
return alert
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error resolving alert", error=str(e))
|
||||
raise DatabaseError(f"Failed to resolve alert: {str(e)}")
|
||||
|
||||
@transactional
|
||||
async def get_alert_statistics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Dict[str, Any]:
|
||||
"""Get alert statistics for a tenant and date range"""
|
||||
try:
|
||||
start_datetime = datetime.combine(start_date, datetime.min.time())
|
||||
end_datetime = datetime.combine(end_date, datetime.max.time())
|
||||
|
||||
alerts = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"created_at__gte": start_datetime,
|
||||
"created_at__lte": end_datetime
|
||||
}
|
||||
)
|
||||
|
||||
total_alerts = len(alerts)
|
||||
active_alerts = len([a for a in alerts if a.is_active])
|
||||
acknowledged_alerts = len([a for a in alerts if a.is_acknowledged])
|
||||
resolved_alerts = len([a for a in alerts if a.is_resolved])
|
||||
|
||||
# Group by severity
|
||||
by_severity = {}
|
||||
for severity in AlertSeverity:
|
||||
severity_alerts = [a for a in alerts if a.severity == severity]
|
||||
by_severity[severity.value] = {
|
||||
"total": len(severity_alerts),
|
||||
"active": len([a for a in severity_alerts if a.is_active]),
|
||||
"resolved": len([a for a in severity_alerts if a.is_resolved])
|
||||
}
|
||||
|
||||
# Group by alert type
|
||||
by_type = {}
|
||||
for alert in alerts:
|
||||
alert_type = alert.alert_type
|
||||
if alert_type not in by_type:
|
||||
by_type[alert_type] = {
|
||||
"total": 0,
|
||||
"active": 0,
|
||||
"resolved": 0
|
||||
}
|
||||
|
||||
by_type[alert_type]["total"] += 1
|
||||
if alert.is_active:
|
||||
by_type[alert_type]["active"] += 1
|
||||
if alert.is_resolved:
|
||||
by_type[alert_type]["resolved"] += 1
|
||||
|
||||
# Calculate resolution time statistics
|
||||
resolved_with_times = [
|
||||
a for a in alerts
|
||||
if a.is_resolved and a.resolved_at and a.created_at
|
||||
]
|
||||
|
||||
resolution_times = []
|
||||
for alert in resolved_with_times:
|
||||
resolution_time = (alert.resolved_at - alert.created_at).total_seconds() / 3600 # hours
|
||||
resolution_times.append(resolution_time)
|
||||
|
||||
avg_resolution_time = sum(resolution_times) / len(resolution_times) if resolution_times else 0
|
||||
|
||||
return {
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"total_alerts": total_alerts,
|
||||
"active_alerts": active_alerts,
|
||||
"acknowledged_alerts": acknowledged_alerts,
|
||||
"resolved_alerts": resolved_alerts,
|
||||
"acknowledgment_rate": round((acknowledged_alerts / total_alerts * 100) if total_alerts > 0 else 0, 2),
|
||||
"resolution_rate": round((resolved_alerts / total_alerts * 100) if total_alerts > 0 else 0, 2),
|
||||
"average_resolution_time_hours": round(avg_resolution_time, 2),
|
||||
"by_severity": by_severity,
|
||||
"by_alert_type": by_type,
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating alert statistics", error=str(e))
|
||||
raise DatabaseError(f"Failed to calculate alert statistics: {str(e)}")
|
||||
|
||||
@transactional
|
||||
async def cleanup_old_resolved_alerts(
|
||||
self,
|
||||
tenant_id: str,
|
||||
days_to_keep: int = 30
|
||||
) -> int:
|
||||
"""Clean up old resolved alerts"""
|
||||
try:
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days_to_keep)
|
||||
|
||||
old_alerts = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"is_resolved": True,
|
||||
"resolved_at__lt": cutoff_date
|
||||
}
|
||||
)
|
||||
|
||||
deleted_count = 0
|
||||
for alert in old_alerts:
|
||||
await self.delete(alert.id)
|
||||
deleted_count += 1
|
||||
|
||||
logger.info("Cleaned up old resolved alerts",
|
||||
deleted_count=deleted_count,
|
||||
days_to_keep=days_to_keep,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return deleted_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error cleaning up old alerts", error=str(e))
|
||||
raise DatabaseError(f"Failed to clean up old alerts: {str(e)}")
|
||||
@@ -31,12 +31,6 @@ class ProductionPriorityEnum(str, Enum):
|
||||
URGENT = "urgent"
|
||||
|
||||
|
||||
class AlertSeverityEnum(str, Enum):
|
||||
"""Alert severity levels for API"""
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
# ================================================================
|
||||
@@ -280,61 +274,6 @@ class QualityCheckResponse(BaseModel):
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ================================================================
|
||||
# PRODUCTION ALERT SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class ProductionAlertBase(BaseModel):
|
||||
"""Base schema for production alert"""
|
||||
alert_type: str = Field(..., min_length=1, max_length=50)
|
||||
severity: AlertSeverityEnum = AlertSeverityEnum.MEDIUM
|
||||
title: str = Field(..., min_length=1, max_length=255)
|
||||
message: str = Field(..., min_length=1)
|
||||
batch_id: Optional[UUID] = None
|
||||
schedule_id: Optional[UUID] = None
|
||||
|
||||
|
||||
class ProductionAlertCreate(ProductionAlertBase):
|
||||
"""Schema for creating a production alert"""
|
||||
recommended_actions: Optional[List[str]] = None
|
||||
impact_level: Optional[str] = Field(None, pattern="^(low|medium|high|critical)$")
|
||||
estimated_cost_impact: Optional[float] = Field(None, ge=0)
|
||||
estimated_time_impact_minutes: Optional[int] = Field(None, ge=0)
|
||||
alert_data: Optional[Dict[str, Any]] = None
|
||||
alert_metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class ProductionAlertResponse(BaseModel):
|
||||
"""Schema for production alert response"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
alert_type: str
|
||||
severity: AlertSeverityEnum
|
||||
title: str
|
||||
message: str
|
||||
batch_id: Optional[UUID]
|
||||
schedule_id: Optional[UUID]
|
||||
source_system: str
|
||||
is_active: bool
|
||||
is_acknowledged: bool
|
||||
is_resolved: bool
|
||||
recommended_actions: Optional[List[str]]
|
||||
actions_taken: Optional[List[Dict[str, Any]]]
|
||||
impact_level: Optional[str]
|
||||
estimated_cost_impact: Optional[float]
|
||||
estimated_time_impact_minutes: Optional[int]
|
||||
acknowledged_by: Optional[str]
|
||||
acknowledged_at: Optional[datetime]
|
||||
resolved_by: Optional[str]
|
||||
resolved_at: Optional[datetime]
|
||||
resolution_notes: Optional[str]
|
||||
alert_data: Optional[Dict[str, Any]]
|
||||
alert_metadata: Optional[Dict[str, Any]]
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ================================================================
|
||||
@@ -346,7 +285,6 @@ class ProductionDashboardSummary(BaseModel):
|
||||
active_batches: int
|
||||
todays_production_plan: List[Dict[str, Any]]
|
||||
capacity_utilization: float
|
||||
current_alerts: int
|
||||
on_time_completion_rate: float
|
||||
average_quality_score: float
|
||||
total_output_today: float
|
||||
@@ -406,9 +344,3 @@ class QualityCheckListResponse(BaseModel):
|
||||
page_size: int
|
||||
|
||||
|
||||
class ProductionAlertListResponse(BaseModel):
|
||||
"""Schema for production alert list response"""
|
||||
alerts: List[ProductionAlertResponse]
|
||||
total_count: int
|
||||
page: int
|
||||
page_size: int
|
||||
@@ -6,9 +6,7 @@ Business logic services
|
||||
"""
|
||||
|
||||
from .production_service import ProductionService
|
||||
from .production_alert_service import ProductionAlertService
|
||||
|
||||
__all__ = [
|
||||
"ProductionService",
|
||||
"ProductionAlertService"
|
||||
"ProductionService"
|
||||
]
|
||||
@@ -1,435 +0,0 @@
|
||||
"""
|
||||
Production Alert Service
|
||||
Business logic for production alerts and notifications
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime, date, timedelta
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.database.transactions import transactional
|
||||
from shared.notifications.alert_integration import AlertIntegration
|
||||
from shared.config.base import BaseServiceSettings
|
||||
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
from app.repositories.production_capacity_repository import ProductionCapacityRepository
|
||||
from app.models.production import ProductionAlert, AlertSeverity, ProductionStatus
|
||||
from app.schemas.production import ProductionAlertCreate
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionAlertService:
|
||||
"""Production alert service with comprehensive monitoring"""
|
||||
|
||||
def __init__(self, database_manager, config: BaseServiceSettings):
|
||||
self.database_manager = database_manager
|
||||
self.config = config
|
||||
self.alert_integration = AlertIntegration()
|
||||
|
||||
@transactional
|
||||
async def check_production_capacity_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
|
||||
"""Monitor production capacity and generate alerts"""
|
||||
alerts = []
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
capacity_repo = ProductionCapacityRepository(session)
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
|
||||
today = date.today()
|
||||
|
||||
# Check capacity exceeded alert
|
||||
todays_batches = await batch_repo.get_batches_by_date_range(
|
||||
str(tenant_id), today, today
|
||||
)
|
||||
|
||||
# Calculate total planned hours for today
|
||||
total_planned_hours = sum(
|
||||
batch.planned_duration_minutes / 60
|
||||
for batch in todays_batches
|
||||
if batch.status != ProductionStatus.CANCELLED
|
||||
)
|
||||
|
||||
# Get available capacity
|
||||
available_capacity = await capacity_repo.get_capacity_utilization_summary(
|
||||
str(tenant_id), today, today
|
||||
)
|
||||
|
||||
total_capacity = available_capacity.get("total_capacity_units", 8.0)
|
||||
|
||||
if total_planned_hours > total_capacity:
|
||||
excess_hours = total_planned_hours - total_capacity
|
||||
alert_data = ProductionAlertCreate(
|
||||
alert_type="production_capacity_exceeded",
|
||||
severity=AlertSeverity.HIGH,
|
||||
title="Capacidad de Producción Excedida",
|
||||
message=f"🔥 Capacidad excedida: {excess_hours:.1f}h extra necesarias para completar la producción de hoy",
|
||||
recommended_actions=[
|
||||
"reschedule_batches",
|
||||
"outsource_production",
|
||||
"adjust_menu",
|
||||
"extend_working_hours"
|
||||
],
|
||||
impact_level="high",
|
||||
estimated_time_impact_minutes=int(excess_hours * 60),
|
||||
alert_data={
|
||||
"excess_hours": excess_hours,
|
||||
"total_planned_hours": total_planned_hours,
|
||||
"available_capacity_hours": total_capacity,
|
||||
"affected_batches": len(todays_batches)
|
||||
}
|
||||
)
|
||||
|
||||
alert = await alert_repo.create_alert({
|
||||
**alert_data.model_dump(),
|
||||
"tenant_id": tenant_id
|
||||
})
|
||||
alerts.append(alert)
|
||||
|
||||
# Check production delay alert
|
||||
current_time = datetime.utcnow()
|
||||
cutoff_time = current_time + timedelta(hours=4) # 4 hours ahead
|
||||
|
||||
urgent_batches = await batch_repo.get_urgent_batches(str(tenant_id), 4)
|
||||
delayed_batches = [
|
||||
batch for batch in urgent_batches
|
||||
if batch.planned_start_time <= current_time and batch.status == ProductionStatus.PENDING
|
||||
]
|
||||
|
||||
for batch in delayed_batches:
|
||||
delay_minutes = int((current_time - batch.planned_start_time).total_seconds() / 60)
|
||||
|
||||
if delay_minutes > self.config.PRODUCTION_DELAY_THRESHOLD_MINUTES:
|
||||
alert_data = ProductionAlertCreate(
|
||||
alert_type="production_delay",
|
||||
severity=AlertSeverity.HIGH,
|
||||
title="Retraso en Producción",
|
||||
message=f"⏰ Retraso: {batch.product_name} debía haber comenzado hace {delay_minutes} minutos",
|
||||
batch_id=batch.id,
|
||||
recommended_actions=[
|
||||
"start_production_immediately",
|
||||
"notify_staff",
|
||||
"prepare_alternatives",
|
||||
"update_customers"
|
||||
],
|
||||
impact_level="high",
|
||||
estimated_time_impact_minutes=delay_minutes,
|
||||
alert_data={
|
||||
"batch_number": batch.batch_number,
|
||||
"product_name": batch.product_name,
|
||||
"planned_start_time": batch.planned_start_time.isoformat(),
|
||||
"delay_minutes": delay_minutes,
|
||||
"affects_opening": delay_minutes > 120 # 2 hours
|
||||
}
|
||||
)
|
||||
|
||||
alert = await alert_repo.create_alert({
|
||||
**alert_data.model_dump(),
|
||||
"tenant_id": tenant_id
|
||||
})
|
||||
alerts.append(alert)
|
||||
|
||||
# Check cost spike alert
|
||||
high_cost_batches = [
|
||||
batch for batch in todays_batches
|
||||
if batch.estimated_cost and batch.estimated_cost > 100 # Threshold
|
||||
]
|
||||
|
||||
if high_cost_batches:
|
||||
total_high_cost = sum(batch.estimated_cost for batch in high_cost_batches)
|
||||
|
||||
alert_data = ProductionAlertCreate(
|
||||
alert_type="production_cost_spike",
|
||||
severity=AlertSeverity.MEDIUM,
|
||||
title="Costos de Producción Elevados",
|
||||
message=f"💰 Costos altos detectados: {len(high_cost_batches)} lotes con costo total de {total_high_cost:.2f}€",
|
||||
recommended_actions=[
|
||||
"review_ingredient_costs",
|
||||
"optimize_recipe",
|
||||
"negotiate_supplier_prices",
|
||||
"adjust_menu_pricing"
|
||||
],
|
||||
impact_level="medium",
|
||||
estimated_cost_impact=total_high_cost,
|
||||
alert_data={
|
||||
"high_cost_batches": len(high_cost_batches),
|
||||
"total_cost": total_high_cost,
|
||||
"average_cost": total_high_cost / len(high_cost_batches),
|
||||
"affected_products": [batch.product_name for batch in high_cost_batches]
|
||||
}
|
||||
)
|
||||
|
||||
alert = await alert_repo.create_alert({
|
||||
**alert_data.model_dump(),
|
||||
"tenant_id": tenant_id
|
||||
})
|
||||
alerts.append(alert)
|
||||
|
||||
# Send alerts using notification service
|
||||
await self._send_alerts(tenant_id, alerts)
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking production capacity alerts",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
return []
|
||||
|
||||
@transactional
|
||||
async def check_quality_control_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
|
||||
"""Monitor quality control issues and generate alerts"""
|
||||
alerts = []
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
|
||||
# Check for batches with low yield
|
||||
last_week = date.today() - timedelta(days=7)
|
||||
recent_batches = await batch_repo.get_batches_by_date_range(
|
||||
str(tenant_id), last_week, date.today(), ProductionStatus.COMPLETED
|
||||
)
|
||||
|
||||
low_yield_batches = [
|
||||
batch for batch in recent_batches
|
||||
if batch.yield_percentage and batch.yield_percentage < self.config.LOW_YIELD_ALERT_THRESHOLD * 100
|
||||
]
|
||||
|
||||
if low_yield_batches:
|
||||
avg_yield = sum(batch.yield_percentage for batch in low_yield_batches) / len(low_yield_batches)
|
||||
|
||||
alert_data = ProductionAlertCreate(
|
||||
alert_type="low_yield_detected",
|
||||
severity=AlertSeverity.MEDIUM,
|
||||
title="Rendimiento Bajo Detectado",
|
||||
message=f"📉 Rendimiento bajo: {len(low_yield_batches)} lotes con rendimiento promedio {avg_yield:.1f}%",
|
||||
recommended_actions=[
|
||||
"review_recipes",
|
||||
"check_ingredient_quality",
|
||||
"training_staff",
|
||||
"equipment_calibration"
|
||||
],
|
||||
impact_level="medium",
|
||||
alert_data={
|
||||
"low_yield_batches": len(low_yield_batches),
|
||||
"average_yield": avg_yield,
|
||||
"threshold": self.config.LOW_YIELD_ALERT_THRESHOLD * 100,
|
||||
"affected_products": list(set(batch.product_name for batch in low_yield_batches))
|
||||
}
|
||||
)
|
||||
|
||||
alert = await alert_repo.create_alert({
|
||||
**alert_data.model_dump(),
|
||||
"tenant_id": tenant_id
|
||||
})
|
||||
alerts.append(alert)
|
||||
|
||||
# Check for recurring quality issues
|
||||
quality_issues = [
|
||||
batch for batch in recent_batches
|
||||
if batch.quality_score and batch.quality_score < self.config.QUALITY_SCORE_THRESHOLD
|
||||
]
|
||||
|
||||
if len(quality_issues) >= 3: # 3 or more quality issues in a week
|
||||
avg_quality = sum(batch.quality_score for batch in quality_issues) / len(quality_issues)
|
||||
|
||||
alert_data = ProductionAlertCreate(
|
||||
alert_type="recurring_quality_issues",
|
||||
severity=AlertSeverity.HIGH,
|
||||
title="Problemas de Calidad Recurrentes",
|
||||
message=f"⚠️ Problemas de calidad: {len(quality_issues)} lotes con calidad promedio {avg_quality:.1f}/10",
|
||||
recommended_actions=[
|
||||
"quality_audit",
|
||||
"staff_retraining",
|
||||
"equipment_maintenance",
|
||||
"supplier_review"
|
||||
],
|
||||
impact_level="high",
|
||||
alert_data={
|
||||
"quality_issues_count": len(quality_issues),
|
||||
"average_quality_score": avg_quality,
|
||||
"threshold": self.config.QUALITY_SCORE_THRESHOLD,
|
||||
"trend": "declining"
|
||||
}
|
||||
)
|
||||
|
||||
alert = await alert_repo.create_alert({
|
||||
**alert_data.model_dump(),
|
||||
"tenant_id": tenant_id
|
||||
})
|
||||
alerts.append(alert)
|
||||
|
||||
# Send alerts
|
||||
await self._send_alerts(tenant_id, alerts)
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking quality control alerts",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
return []
|
||||
|
||||
@transactional
|
||||
async def check_equipment_maintenance_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
|
||||
"""Monitor equipment status and generate maintenance alerts"""
|
||||
alerts = []
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
capacity_repo = ProductionCapacityRepository(session)
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
|
||||
# Get equipment that needs maintenance
|
||||
today = date.today()
|
||||
equipment_capacity = await capacity_repo.get_multi(
|
||||
filters={
|
||||
"tenant_id": str(tenant_id),
|
||||
"resource_type": "equipment",
|
||||
"date": today
|
||||
}
|
||||
)
|
||||
|
||||
for equipment in equipment_capacity:
|
||||
# Check if maintenance is overdue
|
||||
if equipment.last_maintenance_date:
|
||||
days_since_maintenance = (today - equipment.last_maintenance_date.date()).days
|
||||
|
||||
if days_since_maintenance > 30: # 30 days threshold
|
||||
alert_data = ProductionAlertCreate(
|
||||
alert_type="equipment_maintenance_overdue",
|
||||
severity=AlertSeverity.MEDIUM,
|
||||
title="Mantenimiento de Equipo Vencido",
|
||||
message=f"🔧 Mantenimiento vencido: {equipment.resource_name} - {days_since_maintenance} días sin mantenimiento",
|
||||
recommended_actions=[
|
||||
"schedule_maintenance",
|
||||
"equipment_inspection",
|
||||
"backup_equipment_ready"
|
||||
],
|
||||
impact_level="medium",
|
||||
alert_data={
|
||||
"equipment_id": equipment.resource_id,
|
||||
"equipment_name": equipment.resource_name,
|
||||
"days_since_maintenance": days_since_maintenance,
|
||||
"last_maintenance": equipment.last_maintenance_date.isoformat() if equipment.last_maintenance_date else None
|
||||
}
|
||||
)
|
||||
|
||||
alert = await alert_repo.create_alert({
|
||||
**alert_data.model_dump(),
|
||||
"tenant_id": tenant_id
|
||||
})
|
||||
alerts.append(alert)
|
||||
|
||||
# Check equipment efficiency
|
||||
if equipment.efficiency_rating and equipment.efficiency_rating < 0.8: # 80% threshold
|
||||
alert_data = ProductionAlertCreate(
|
||||
alert_type="equipment_efficiency_low",
|
||||
severity=AlertSeverity.MEDIUM,
|
||||
title="Eficiencia de Equipo Baja",
|
||||
message=f"📊 Eficiencia baja: {equipment.resource_name} operando al {equipment.efficiency_rating*100:.1f}%",
|
||||
recommended_actions=[
|
||||
"equipment_calibration",
|
||||
"maintenance_check",
|
||||
"replace_parts"
|
||||
],
|
||||
impact_level="medium",
|
||||
alert_data={
|
||||
"equipment_id": equipment.resource_id,
|
||||
"equipment_name": equipment.resource_name,
|
||||
"efficiency_rating": equipment.efficiency_rating,
|
||||
"threshold": 0.8
|
||||
}
|
||||
)
|
||||
|
||||
alert = await alert_repo.create_alert({
|
||||
**alert_data.model_dump(),
|
||||
"tenant_id": tenant_id
|
||||
})
|
||||
alerts.append(alert)
|
||||
|
||||
# Send alerts
|
||||
await self._send_alerts(tenant_id, alerts)
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking equipment maintenance alerts",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
return []
|
||||
|
||||
async def _send_alerts(self, tenant_id: UUID, alerts: List[ProductionAlert]):
|
||||
"""Send alerts using notification service with proper urgency handling"""
|
||||
try:
|
||||
for alert in alerts:
|
||||
# Determine delivery channels based on severity
|
||||
channels = self._get_channels_by_severity(alert.severity)
|
||||
|
||||
# Send notification using alert integration
|
||||
await self.alert_integration.send_alert(
|
||||
tenant_id=str(tenant_id),
|
||||
message=alert.message,
|
||||
alert_type=alert.alert_type,
|
||||
severity=alert.severity.value,
|
||||
channels=channels,
|
||||
data={
|
||||
"actions": alert.recommended_actions or [],
|
||||
"alert_id": str(alert.id)
|
||||
}
|
||||
)
|
||||
|
||||
logger.info("Sent production alert notification",
|
||||
alert_id=str(alert.id),
|
||||
alert_type=alert.alert_type,
|
||||
severity=alert.severity.value,
|
||||
channels=channels)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error sending alert notifications",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
|
||||
def _get_channels_by_severity(self, severity: AlertSeverity) -> List[str]:
|
||||
"""Map severity to delivery channels following user-centric analysis"""
|
||||
if severity == AlertSeverity.CRITICAL:
|
||||
return ["whatsapp", "email", "dashboard", "sms"]
|
||||
elif severity == AlertSeverity.HIGH:
|
||||
return ["whatsapp", "email", "dashboard"]
|
||||
elif severity == AlertSeverity.MEDIUM:
|
||||
return ["email", "dashboard"]
|
||||
else:
|
||||
return ["dashboard"]
|
||||
|
||||
@transactional
|
||||
async def get_active_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
|
||||
"""Get all active production alerts for a tenant"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
return await alert_repo.get_active_alerts(str(tenant_id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting active alerts",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
return []
|
||||
|
||||
@transactional
|
||||
async def acknowledge_alert(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
acknowledged_by: str
|
||||
) -> ProductionAlert:
|
||||
"""Acknowledge a production alert"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
return await alert_repo.acknowledge_alert(alert_id, acknowledged_by)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error acknowledging alert",
|
||||
error=str(e), alert_id=str(alert_id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
@@ -205,7 +205,6 @@ class ProductionService:
|
||||
active_batches=len(active_batches),
|
||||
todays_production_plan=todays_plan,
|
||||
capacity_utilization=85.0, # TODO: Calculate from actual capacity data
|
||||
current_alerts=0, # TODO: Get from alerts
|
||||
on_time_completion_rate=weekly_metrics.get("on_time_completion_rate", 0),
|
||||
average_quality_score=8.5, # TODO: Get from quality checks
|
||||
total_output_today=sum(b.actual_quantity or 0 for b in todays_batches),
|
||||
|
||||
Reference in New Issue
Block a user