Delete legacy alerts

This commit is contained in:
Urtzi Alfaro
2025-08-22 15:31:52 +02:00
parent c6dd6fd1de
commit 90100a66c6
40 changed files with 25 additions and 3308 deletions

View File

@@ -6,9 +6,7 @@ Business logic services
"""
from .production_service import ProductionService
from .production_alert_service import ProductionAlertService
__all__ = [
"ProductionService",
"ProductionAlertService"
"ProductionService"
]

View File

@@ -1,435 +0,0 @@
"""
Production Alert Service
Business logic for production alerts and notifications
"""
from typing import Optional, List, Dict, Any
from datetime import datetime, date, timedelta
from uuid import UUID
import structlog
from shared.database.transactions import transactional
from shared.notifications.alert_integration import AlertIntegration
from shared.config.base import BaseServiceSettings
from app.repositories.production_alert_repository import ProductionAlertRepository
from app.repositories.production_batch_repository import ProductionBatchRepository
from app.repositories.production_capacity_repository import ProductionCapacityRepository
from app.models.production import ProductionAlert, AlertSeverity, ProductionStatus
from app.schemas.production import ProductionAlertCreate
logger = structlog.get_logger()
class ProductionAlertService:
"""Production alert service with comprehensive monitoring"""
def __init__(self, database_manager, config: BaseServiceSettings):
self.database_manager = database_manager
self.config = config
self.alert_integration = AlertIntegration()
@transactional
async def check_production_capacity_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Monitor production capacity and generate alerts"""
alerts = []
try:
async with self.database_manager.get_session() as session:
batch_repo = ProductionBatchRepository(session)
capacity_repo = ProductionCapacityRepository(session)
alert_repo = ProductionAlertRepository(session)
today = date.today()
# Check capacity exceeded alert
todays_batches = await batch_repo.get_batches_by_date_range(
str(tenant_id), today, today
)
# Calculate total planned hours for today
total_planned_hours = sum(
batch.planned_duration_minutes / 60
for batch in todays_batches
if batch.status != ProductionStatus.CANCELLED
)
# Get available capacity
available_capacity = await capacity_repo.get_capacity_utilization_summary(
str(tenant_id), today, today
)
total_capacity = available_capacity.get("total_capacity_units", 8.0)
if total_planned_hours > total_capacity:
excess_hours = total_planned_hours - total_capacity
alert_data = ProductionAlertCreate(
alert_type="production_capacity_exceeded",
severity=AlertSeverity.HIGH,
title="Capacidad de Producción Excedida",
message=f"🔥 Capacidad excedida: {excess_hours:.1f}h extra necesarias para completar la producción de hoy",
recommended_actions=[
"reschedule_batches",
"outsource_production",
"adjust_menu",
"extend_working_hours"
],
impact_level="high",
estimated_time_impact_minutes=int(excess_hours * 60),
alert_data={
"excess_hours": excess_hours,
"total_planned_hours": total_planned_hours,
"available_capacity_hours": total_capacity,
"affected_batches": len(todays_batches)
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check production delay alert
current_time = datetime.utcnow()
cutoff_time = current_time + timedelta(hours=4) # 4 hours ahead
urgent_batches = await batch_repo.get_urgent_batches(str(tenant_id), 4)
delayed_batches = [
batch for batch in urgent_batches
if batch.planned_start_time <= current_time and batch.status == ProductionStatus.PENDING
]
for batch in delayed_batches:
delay_minutes = int((current_time - batch.planned_start_time).total_seconds() / 60)
if delay_minutes > self.config.PRODUCTION_DELAY_THRESHOLD_MINUTES:
alert_data = ProductionAlertCreate(
alert_type="production_delay",
severity=AlertSeverity.HIGH,
title="Retraso en Producción",
message=f"⏰ Retraso: {batch.product_name} debía haber comenzado hace {delay_minutes} minutos",
batch_id=batch.id,
recommended_actions=[
"start_production_immediately",
"notify_staff",
"prepare_alternatives",
"update_customers"
],
impact_level="high",
estimated_time_impact_minutes=delay_minutes,
alert_data={
"batch_number": batch.batch_number,
"product_name": batch.product_name,
"planned_start_time": batch.planned_start_time.isoformat(),
"delay_minutes": delay_minutes,
"affects_opening": delay_minutes > 120 # 2 hours
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check cost spike alert
high_cost_batches = [
batch for batch in todays_batches
if batch.estimated_cost and batch.estimated_cost > 100 # Threshold
]
if high_cost_batches:
total_high_cost = sum(batch.estimated_cost for batch in high_cost_batches)
alert_data = ProductionAlertCreate(
alert_type="production_cost_spike",
severity=AlertSeverity.MEDIUM,
title="Costos de Producción Elevados",
message=f"💰 Costos altos detectados: {len(high_cost_batches)} lotes con costo total de {total_high_cost:.2f}",
recommended_actions=[
"review_ingredient_costs",
"optimize_recipe",
"negotiate_supplier_prices",
"adjust_menu_pricing"
],
impact_level="medium",
estimated_cost_impact=total_high_cost,
alert_data={
"high_cost_batches": len(high_cost_batches),
"total_cost": total_high_cost,
"average_cost": total_high_cost / len(high_cost_batches),
"affected_products": [batch.product_name for batch in high_cost_batches]
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Send alerts using notification service
await self._send_alerts(tenant_id, alerts)
return alerts
except Exception as e:
logger.error("Error checking production capacity alerts",
error=str(e), tenant_id=str(tenant_id))
return []
@transactional
async def check_quality_control_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Monitor quality control issues and generate alerts"""
alerts = []
try:
async with self.database_manager.get_session() as session:
alert_repo = ProductionAlertRepository(session)
batch_repo = ProductionBatchRepository(session)
# Check for batches with low yield
last_week = date.today() - timedelta(days=7)
recent_batches = await batch_repo.get_batches_by_date_range(
str(tenant_id), last_week, date.today(), ProductionStatus.COMPLETED
)
low_yield_batches = [
batch for batch in recent_batches
if batch.yield_percentage and batch.yield_percentage < self.config.LOW_YIELD_ALERT_THRESHOLD * 100
]
if low_yield_batches:
avg_yield = sum(batch.yield_percentage for batch in low_yield_batches) / len(low_yield_batches)
alert_data = ProductionAlertCreate(
alert_type="low_yield_detected",
severity=AlertSeverity.MEDIUM,
title="Rendimiento Bajo Detectado",
message=f"📉 Rendimiento bajo: {len(low_yield_batches)} lotes con rendimiento promedio {avg_yield:.1f}%",
recommended_actions=[
"review_recipes",
"check_ingredient_quality",
"training_staff",
"equipment_calibration"
],
impact_level="medium",
alert_data={
"low_yield_batches": len(low_yield_batches),
"average_yield": avg_yield,
"threshold": self.config.LOW_YIELD_ALERT_THRESHOLD * 100,
"affected_products": list(set(batch.product_name for batch in low_yield_batches))
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check for recurring quality issues
quality_issues = [
batch for batch in recent_batches
if batch.quality_score and batch.quality_score < self.config.QUALITY_SCORE_THRESHOLD
]
if len(quality_issues) >= 3: # 3 or more quality issues in a week
avg_quality = sum(batch.quality_score for batch in quality_issues) / len(quality_issues)
alert_data = ProductionAlertCreate(
alert_type="recurring_quality_issues",
severity=AlertSeverity.HIGH,
title="Problemas de Calidad Recurrentes",
message=f"⚠️ Problemas de calidad: {len(quality_issues)} lotes con calidad promedio {avg_quality:.1f}/10",
recommended_actions=[
"quality_audit",
"staff_retraining",
"equipment_maintenance",
"supplier_review"
],
impact_level="high",
alert_data={
"quality_issues_count": len(quality_issues),
"average_quality_score": avg_quality,
"threshold": self.config.QUALITY_SCORE_THRESHOLD,
"trend": "declining"
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Send alerts
await self._send_alerts(tenant_id, alerts)
return alerts
except Exception as e:
logger.error("Error checking quality control alerts",
error=str(e), tenant_id=str(tenant_id))
return []
@transactional
async def check_equipment_maintenance_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Monitor equipment status and generate maintenance alerts"""
alerts = []
try:
async with self.database_manager.get_session() as session:
capacity_repo = ProductionCapacityRepository(session)
alert_repo = ProductionAlertRepository(session)
# Get equipment that needs maintenance
today = date.today()
equipment_capacity = await capacity_repo.get_multi(
filters={
"tenant_id": str(tenant_id),
"resource_type": "equipment",
"date": today
}
)
for equipment in equipment_capacity:
# Check if maintenance is overdue
if equipment.last_maintenance_date:
days_since_maintenance = (today - equipment.last_maintenance_date.date()).days
if days_since_maintenance > 30: # 30 days threshold
alert_data = ProductionAlertCreate(
alert_type="equipment_maintenance_overdue",
severity=AlertSeverity.MEDIUM,
title="Mantenimiento de Equipo Vencido",
message=f"🔧 Mantenimiento vencido: {equipment.resource_name} - {days_since_maintenance} días sin mantenimiento",
recommended_actions=[
"schedule_maintenance",
"equipment_inspection",
"backup_equipment_ready"
],
impact_level="medium",
alert_data={
"equipment_id": equipment.resource_id,
"equipment_name": equipment.resource_name,
"days_since_maintenance": days_since_maintenance,
"last_maintenance": equipment.last_maintenance_date.isoformat() if equipment.last_maintenance_date else None
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check equipment efficiency
if equipment.efficiency_rating and equipment.efficiency_rating < 0.8: # 80% threshold
alert_data = ProductionAlertCreate(
alert_type="equipment_efficiency_low",
severity=AlertSeverity.MEDIUM,
title="Eficiencia de Equipo Baja",
message=f"📊 Eficiencia baja: {equipment.resource_name} operando al {equipment.efficiency_rating*100:.1f}%",
recommended_actions=[
"equipment_calibration",
"maintenance_check",
"replace_parts"
],
impact_level="medium",
alert_data={
"equipment_id": equipment.resource_id,
"equipment_name": equipment.resource_name,
"efficiency_rating": equipment.efficiency_rating,
"threshold": 0.8
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Send alerts
await self._send_alerts(tenant_id, alerts)
return alerts
except Exception as e:
logger.error("Error checking equipment maintenance alerts",
error=str(e), tenant_id=str(tenant_id))
return []
async def _send_alerts(self, tenant_id: UUID, alerts: List[ProductionAlert]):
"""Send alerts using notification service with proper urgency handling"""
try:
for alert in alerts:
# Determine delivery channels based on severity
channels = self._get_channels_by_severity(alert.severity)
# Send notification using alert integration
await self.alert_integration.send_alert(
tenant_id=str(tenant_id),
message=alert.message,
alert_type=alert.alert_type,
severity=alert.severity.value,
channels=channels,
data={
"actions": alert.recommended_actions or [],
"alert_id": str(alert.id)
}
)
logger.info("Sent production alert notification",
alert_id=str(alert.id),
alert_type=alert.alert_type,
severity=alert.severity.value,
channels=channels)
except Exception as e:
logger.error("Error sending alert notifications",
error=str(e), tenant_id=str(tenant_id))
def _get_channels_by_severity(self, severity: AlertSeverity) -> List[str]:
"""Map severity to delivery channels following user-centric analysis"""
if severity == AlertSeverity.CRITICAL:
return ["whatsapp", "email", "dashboard", "sms"]
elif severity == AlertSeverity.HIGH:
return ["whatsapp", "email", "dashboard"]
elif severity == AlertSeverity.MEDIUM:
return ["email", "dashboard"]
else:
return ["dashboard"]
@transactional
async def get_active_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Get all active production alerts for a tenant"""
try:
async with self.database_manager.get_session() as session:
alert_repo = ProductionAlertRepository(session)
return await alert_repo.get_active_alerts(str(tenant_id))
except Exception as e:
logger.error("Error getting active alerts",
error=str(e), tenant_id=str(tenant_id))
return []
@transactional
async def acknowledge_alert(
self,
tenant_id: UUID,
alert_id: UUID,
acknowledged_by: str
) -> ProductionAlert:
"""Acknowledge a production alert"""
try:
async with self.database_manager.get_session() as session:
alert_repo = ProductionAlertRepository(session)
return await alert_repo.acknowledge_alert(alert_id, acknowledged_by)
except Exception as e:
logger.error("Error acknowledging alert",
error=str(e), alert_id=str(alert_id), tenant_id=str(tenant_id))
raise

View File

@@ -205,7 +205,6 @@ class ProductionService:
active_batches=len(active_batches),
todays_production_plan=todays_plan,
capacity_utilization=85.0, # TODO: Calculate from actual capacity data
current_alerts=0, # TODO: Get from alerts
on_time_completion_rate=weekly_metrics.get("on_time_completion_rate", 0),
average_quality_score=8.5, # TODO: Get from quality checks
total_output_today=sum(b.actual_quantity or 0 for b in todays_batches),