demo seed change
This commit is contained in:
@@ -181,6 +181,41 @@ class ProductionAlertService:
|
||||
issue_type=issue_type
|
||||
)
|
||||
|
||||
async def emit_start_production_alert(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
product_name: str,
|
||||
batch_number: str,
|
||||
reasoning_data: Optional[Dict[str, Any]] = None,
|
||||
planned_start_time: Optional[str] = None
|
||||
):
|
||||
"""Emit start production alert when a new batch is created"""
|
||||
|
||||
metadata = {
|
||||
"batch_id": str(batch_id),
|
||||
"product_name": product_name,
|
||||
"batch_number": batch_number,
|
||||
"reasoning_data": reasoning_data
|
||||
}
|
||||
|
||||
if planned_start_time:
|
||||
metadata["planned_start_time"] = planned_start_time
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.start_production",
|
||||
tenant_id=tenant_id,
|
||||
severity="medium",
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"start_production_alert_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_number=batch_number,
|
||||
reasoning_type=reasoning_data.get("type") if reasoning_data else None
|
||||
)
|
||||
|
||||
async def emit_batch_start_delayed(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
@@ -376,73 +411,3 @@ class ProductionAlertService:
|
||||
tenant_id=str(tenant_id),
|
||||
time_savings=estimated_time_savings_minutes
|
||||
)
|
||||
|
||||
async def check_production_delays(self) -> int:
|
||||
"""
|
||||
Check for production delays and emit alerts for delayed batches.
|
||||
This method queries the database for production batches that are IN_PROGRESS
|
||||
but past their planned end time, and emits production delay alerts.
|
||||
|
||||
Returns:
|
||||
int: Number of delay alerts emitted
|
||||
"""
|
||||
if not self.database_manager:
|
||||
logger.warning("Database manager not available for delay checking")
|
||||
return 0
|
||||
|
||||
logger.info("Checking for production delays")
|
||||
alerts_emitted = 0
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Import the repository here to avoid circular imports
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
|
||||
# Get production delays from the database
|
||||
delayed_batches = await alert_repo.get_production_delays()
|
||||
|
||||
logger.info("Found delayed batches", count=len(delayed_batches))
|
||||
|
||||
# For each delayed batch, emit a production delay alert
|
||||
for batch in delayed_batches:
|
||||
try:
|
||||
batch_id = UUID(batch["id"])
|
||||
tenant_id = UUID(batch["tenant_id"])
|
||||
delay_minutes = int(batch["delay_minutes"])
|
||||
affected_orders = int(batch.get("affected_orders", 0))
|
||||
|
||||
# Emit production delay alert using existing method
|
||||
await self.emit_production_delay(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
delay_minutes=delay_minutes,
|
||||
affected_orders=affected_orders
|
||||
)
|
||||
|
||||
alerts_emitted += 1
|
||||
logger.info(
|
||||
"Production delay alert emitted",
|
||||
batch_id=str(batch_id),
|
||||
delay_minutes=delay_minutes,
|
||||
tenant_id=str(tenant_id)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting alert for delayed batch",
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking for production delays", error=str(e))
|
||||
# Don't raise the exception - this method is called internally
|
||||
# and we don't want to break the calling flow
|
||||
return 0
|
||||
|
||||
logger.info("Production delay check completed", alerts_emitted=alerts_emitted)
|
||||
return alerts_emitted
|
||||
|
||||
609
services/production/app/services/production_scheduler.py
Normal file
609
services/production/app/services/production_scheduler.py
Normal file
@@ -0,0 +1,609 @@
|
||||
"""
|
||||
Production Scheduler Service
|
||||
Background task that periodically checks for production alert conditions
|
||||
and triggers appropriate alerts.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import Dict, Any, List, Optional
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timedelta
|
||||
import structlog
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import text
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from apscheduler.triggers.interval import IntervalTrigger
|
||||
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
from app.repositories.equipment_repository import EquipmentRepository
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
class ProductionScheduler:
|
||||
"""Production scheduler service that checks for alert conditions"""
|
||||
|
||||
def __init__(self, alert_service: ProductionAlertService, database_manager: Any):
|
||||
self.alert_service = alert_service
|
||||
self.database_manager = database_manager
|
||||
self.scheduler = AsyncIOScheduler()
|
||||
self.check_interval = 300 # 5 minutes
|
||||
self.job_id = 'production_scheduler'
|
||||
|
||||
# Cache de alertas emitidas para evitar duplicados
|
||||
self._emitted_alerts: set = set()
|
||||
self._alert_cache_ttl = 3600 # 1 hora
|
||||
self._last_cache_clear = datetime.utcnow()
|
||||
|
||||
async def start(self):
|
||||
"""Start the production scheduler with APScheduler"""
|
||||
if self.scheduler.running:
|
||||
logger.warning("Production scheduler is already running")
|
||||
return
|
||||
|
||||
# Add the periodic job
|
||||
trigger = IntervalTrigger(seconds=self.check_interval)
|
||||
self.scheduler.add_job(
|
||||
self._run_scheduler_task,
|
||||
trigger=trigger,
|
||||
id=self.job_id,
|
||||
name="Production Alert Checks",
|
||||
max_instances=1 # Prevent overlapping executions
|
||||
)
|
||||
|
||||
# Start the scheduler
|
||||
self.scheduler.start()
|
||||
logger.info("Production scheduler started", interval_seconds=self.check_interval)
|
||||
|
||||
async def stop(self):
|
||||
"""Stop the production scheduler"""
|
||||
if self.scheduler.running:
|
||||
self.scheduler.shutdown(wait=True)
|
||||
logger.info("Production scheduler stopped")
|
||||
else:
|
||||
logger.info("Production scheduler already stopped")
|
||||
|
||||
async def _run_scheduler_task(self):
|
||||
"""Run scheduled production alert checks with leader election"""
|
||||
# Try to acquire leader lock for this scheduler
|
||||
lock_name = f"production_scheduler:{self.database_manager.database_url if hasattr(self.database_manager, 'database_url') else 'default'}"
|
||||
lock_id = abs(hash(lock_name)) % (2**31) # Generate a unique integer ID for the lock
|
||||
acquired = False
|
||||
|
||||
try:
|
||||
# Try to acquire PostgreSQL advisory lock for leader election
|
||||
async with self.database_manager.get_session() as session:
|
||||
result = await session.execute(text("SELECT pg_try_advisory_lock(:lock_id)"), {"lock_id": lock_id})
|
||||
acquired = True # If no exception, lock was acquired
|
||||
|
||||
start_time = datetime.now()
|
||||
logger.info("Running scheduled production alert checks (as leader)")
|
||||
|
||||
# Run all alert checks
|
||||
alerts_generated = await self.check_all_conditions()
|
||||
|
||||
duration = (datetime.now() - start_time).total_seconds()
|
||||
logger.info(
|
||||
"Completed scheduled production alert checks",
|
||||
alerts_generated=alerts_generated,
|
||||
duration_seconds=round(duration, 2)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# If it's a lock acquisition error, log and skip execution (another instance is running)
|
||||
error_str = str(e).lower()
|
||||
if "lock" in error_str or "timeout" in error_str or "could not acquire" in error_str:
|
||||
logger.debug(
|
||||
"Skipping production scheduler execution (not leader)",
|
||||
lock_name=lock_name
|
||||
)
|
||||
return # Not an error, just not the leader
|
||||
else:
|
||||
logger.error(
|
||||
"Error in production scheduler task",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
finally:
|
||||
if acquired:
|
||||
# Release the lock
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
await session.execute(text("SELECT pg_advisory_unlock(:lock_id)"), {"lock_id": lock_id})
|
||||
await session.commit()
|
||||
except Exception as unlock_error:
|
||||
logger.warning(
|
||||
"Error releasing leader lock (may have been automatically released)",
|
||||
error=str(unlock_error)
|
||||
)
|
||||
|
||||
async def check_all_conditions(self) -> int:
|
||||
"""
|
||||
Check all production alert conditions and trigger alerts.
|
||||
|
||||
Returns:
|
||||
int: Total number of alerts generated
|
||||
"""
|
||||
if not self.database_manager:
|
||||
logger.warning("Database manager not available for production checks")
|
||||
return 0
|
||||
|
||||
total_alerts = 0
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Get repositories
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
equipment_repo = EquipmentRepository(session)
|
||||
|
||||
# Check production delays
|
||||
delay_alerts = await self._check_production_delays(batch_repo)
|
||||
total_alerts += delay_alerts
|
||||
|
||||
# Check equipment maintenance
|
||||
maintenance_alerts = await self._check_equipment_maintenance(equipment_repo)
|
||||
total_alerts += maintenance_alerts
|
||||
|
||||
# Check batch start delays (batches that should have started but haven't)
|
||||
start_delay_alerts = await self._check_batch_start_delays(batch_repo)
|
||||
total_alerts += start_delay_alerts
|
||||
|
||||
logger.info(
|
||||
"Production alert checks completed",
|
||||
total_alerts=total_alerts,
|
||||
production_delays=delay_alerts,
|
||||
equipment_maintenance=maintenance_alerts,
|
||||
batch_start_delays=start_delay_alerts
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error during production alert checks",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
return total_alerts
|
||||
|
||||
async def _check_production_delays(self, batch_repo: ProductionBatchRepository) -> int:
|
||||
"""
|
||||
Check for production delays and trigger alerts.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
|
||||
Returns:
|
||||
int: Number of delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get delayed batches from repository
|
||||
delayed_batches = await batch_repo.get_production_delays()
|
||||
|
||||
logger.info("Found delayed production batches", count=len(delayed_batches))
|
||||
|
||||
# Limpiar cache si expiró
|
||||
if (datetime.utcnow() - self._last_cache_clear).total_seconds() > self._alert_cache_ttl:
|
||||
self._emitted_alerts.clear()
|
||||
self._last_cache_clear = datetime.utcnow()
|
||||
logger.info("Cleared alert cache due to TTL expiration")
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_batches:
|
||||
try:
|
||||
batch_id = UUID(str(batch["id"]))
|
||||
|
||||
# Verificar si ya emitimos alerta para este batch
|
||||
alert_key = f"delay:{batch_id}"
|
||||
if alert_key in self._emitted_alerts:
|
||||
logger.debug("Skipping duplicate delay alert", batch_id=str(batch_id))
|
||||
continue
|
||||
|
||||
tenant_id = UUID(str(batch["tenant_id"]))
|
||||
delay_minutes = int(batch["delay_minutes"]) if batch.get("delay_minutes") else 0
|
||||
affected_orders = int(batch.get("affected_orders", 0))
|
||||
|
||||
# Emit production delay alert
|
||||
await self.alert_service.emit_production_delay(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
delay_minutes=delay_minutes,
|
||||
affected_orders=affected_orders
|
||||
)
|
||||
|
||||
# Registrar en cache
|
||||
self._emitted_alerts.add(alert_key)
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting production delay alert",
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking production delays", error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_equipment_maintenance(self, equipment_repo: EquipmentRepository) -> int:
|
||||
"""
|
||||
Check for equipment needing maintenance and trigger alerts.
|
||||
|
||||
Args:
|
||||
equipment_repo: Equipment repository
|
||||
|
||||
Returns:
|
||||
int: Number of maintenance alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get equipment that needs maintenance using repository method
|
||||
equipment_needing_maintenance = await equipment_repo.get_equipment_needing_maintenance()
|
||||
|
||||
logger.info(
|
||||
"Found equipment needing maintenance",
|
||||
count=len(equipment_needing_maintenance)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for equipment in equipment_needing_maintenance:
|
||||
try:
|
||||
equipment_id = UUID(equipment["id"])
|
||||
tenant_id = UUID(equipment["tenant_id"])
|
||||
days_overdue = int(equipment.get("days_overdue", 0))
|
||||
|
||||
# Emit equipment maintenance alert
|
||||
await self.alert_service.emit_equipment_maintenance_due(
|
||||
tenant_id=tenant_id,
|
||||
equipment_id=equipment_id,
|
||||
equipment_name=equipment.get("name", "Unknown Equipment"),
|
||||
equipment_type=equipment.get("type", "unknown"),
|
||||
last_maintenance_date=equipment.get("last_maintenance_date"),
|
||||
days_overdue=days_overdue
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting equipment maintenance alert",
|
||||
equipment_id=equipment.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking equipment maintenance", error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_batch_start_delays(self, batch_repo: ProductionBatchRepository) -> int:
|
||||
"""
|
||||
Check for batches that should have started but haven't.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
|
||||
Returns:
|
||||
int: Number of start delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get batches that should have started using repository method
|
||||
delayed_start_batches = await batch_repo.get_batches_with_delayed_start()
|
||||
|
||||
logger.info(
|
||||
"Found batches with delayed start",
|
||||
count=len(delayed_start_batches)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_start_batches:
|
||||
try:
|
||||
batch_id = UUID(batch["id"])
|
||||
|
||||
# Verificar si ya emitimos alerta para este batch
|
||||
alert_key = f"start_delay:{batch_id}"
|
||||
if alert_key in self._emitted_alerts:
|
||||
logger.debug("Skipping duplicate start delay alert", batch_id=str(batch_id))
|
||||
continue
|
||||
|
||||
tenant_id = UUID(batch["tenant_id"])
|
||||
scheduled_start = batch.get("scheduled_start_time")
|
||||
|
||||
# Emit batch start delayed alert
|
||||
await self.alert_service.emit_batch_start_delayed(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
scheduled_start=scheduled_start,
|
||||
delay_reason="Batch has not started on time"
|
||||
)
|
||||
|
||||
# Registrar en cache
|
||||
self._emitted_alerts.add(alert_key)
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting batch start delay alert",
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking batch start delays", error=str(e))
|
||||
return 0
|
||||
|
||||
async def trigger_manual_check(self, tenant_id: Optional[UUID] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Manually trigger production alert checks for a specific tenant or all tenants.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to check. If None, checks all tenants.
|
||||
|
||||
Returns:
|
||||
Dict with alert generation results
|
||||
"""
|
||||
logger.info(
|
||||
"Manually triggering production alert checks",
|
||||
tenant_id=str(tenant_id) if tenant_id else "all_tenants"
|
||||
)
|
||||
|
||||
try:
|
||||
if tenant_id:
|
||||
# Run tenant-specific alert checks
|
||||
alerts_generated = await self.check_all_conditions_for_tenant(tenant_id)
|
||||
else:
|
||||
# Run all alert checks across all tenants
|
||||
alerts_generated = await self.check_all_conditions()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"tenant_id": str(tenant_id) if tenant_id else None,
|
||||
"alerts_generated": alerts_generated,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"message": "Production alert checks completed successfully"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error during manual production alert check",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"tenant_id": str(tenant_id) if tenant_id else None,
|
||||
"alerts_generated": 0,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def check_all_conditions_for_tenant(self, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check all production alert conditions for a specific tenant and trigger alerts.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID to check conditions for
|
||||
|
||||
Returns:
|
||||
int: Total number of alerts generated
|
||||
"""
|
||||
if not self.database_manager:
|
||||
logger.warning("Database manager not available for production checks")
|
||||
return 0
|
||||
|
||||
total_alerts = 0
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Get repositories
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
equipment_repo = EquipmentRepository(session)
|
||||
|
||||
# Check production delays for specific tenant
|
||||
delay_alerts = await self._check_production_delays_for_tenant(batch_repo, tenant_id)
|
||||
total_alerts += delay_alerts
|
||||
|
||||
# Check equipment maintenance for specific tenant
|
||||
maintenance_alerts = await self._check_equipment_maintenance_for_tenant(equipment_repo, tenant_id)
|
||||
total_alerts += maintenance_alerts
|
||||
|
||||
# Check batch start delays for specific tenant
|
||||
start_delay_alerts = await self._check_batch_start_delays_for_tenant(batch_repo, tenant_id)
|
||||
total_alerts += start_delay_alerts
|
||||
|
||||
logger.info(
|
||||
"Tenant-specific production alert checks completed",
|
||||
tenant_id=str(tenant_id),
|
||||
total_alerts=total_alerts,
|
||||
production_delays=delay_alerts,
|
||||
equipment_maintenance=maintenance_alerts,
|
||||
batch_start_delays=start_delay_alerts
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error during tenant-specific production alert checks",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
return total_alerts
|
||||
|
||||
async def _check_production_delays_for_tenant(self, batch_repo: ProductionBatchRepository, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check for production delays for a specific tenant and trigger alerts.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
tenant_id: Tenant ID to check for
|
||||
|
||||
Returns:
|
||||
int: Number of delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get delayed batches for the specific tenant using repository method
|
||||
delayed_batches = await batch_repo.get_production_delays(tenant_id)
|
||||
|
||||
logger.info("Found delayed production batches for tenant", count=len(delayed_batches), tenant_id=str(tenant_id))
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_batches:
|
||||
try:
|
||||
batch_id = UUID(str(batch["id"]))
|
||||
delay_minutes = int(batch["delay_minutes"]) if batch.get("delay_minutes") else 0
|
||||
affected_orders = int(batch.get("affected_orders", 0))
|
||||
|
||||
# Emit production delay alert
|
||||
await self.alert_service.emit_production_delay(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
delay_minutes=delay_minutes,
|
||||
affected_orders=affected_orders
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting production delay alert",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking production delays for tenant", tenant_id=str(tenant_id), error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_equipment_maintenance_for_tenant(self, equipment_repo: EquipmentRepository, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check for equipment needing maintenance for a specific tenant and trigger alerts.
|
||||
|
||||
Args:
|
||||
equipment_repo: Equipment repository
|
||||
tenant_id: Tenant ID to check for
|
||||
|
||||
Returns:
|
||||
int: Number of maintenance alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get equipment that needs maintenance for specific tenant using repository method
|
||||
equipment_needing_maintenance = await equipment_repo.get_equipment_needing_maintenance(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Found equipment needing maintenance for tenant",
|
||||
count=len(equipment_needing_maintenance),
|
||||
tenant_id=str(tenant_id)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for equipment in equipment_needing_maintenance:
|
||||
try:
|
||||
equipment_id = UUID(equipment["id"])
|
||||
days_overdue = int(equipment.get("days_overdue", 0))
|
||||
|
||||
# Emit equipment maintenance alert
|
||||
await self.alert_service.emit_equipment_maintenance_due(
|
||||
tenant_id=tenant_id,
|
||||
equipment_id=equipment_id,
|
||||
equipment_name=equipment.get("name", "Unknown Equipment"),
|
||||
equipment_type=equipment.get("type", "unknown"),
|
||||
last_maintenance_date=equipment.get("last_maintenance_date"),
|
||||
days_overdue=days_overdue
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting equipment maintenance alert",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_id=equipment.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking equipment maintenance for tenant", tenant_id=str(tenant_id), error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_batch_start_delays_for_tenant(self, batch_repo: ProductionBatchRepository, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check for batches that should have started but haven't for a specific tenant.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
tenant_id: Tenant ID to check for
|
||||
|
||||
Returns:
|
||||
int: Number of start delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get batches that should have started for specific tenant using repository method
|
||||
delayed_start_batches = await batch_repo.get_batches_with_delayed_start(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Found batches with delayed start for tenant",
|
||||
count=len(delayed_start_batches),
|
||||
tenant_id=str(tenant_id)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_start_batches:
|
||||
try:
|
||||
batch_id = UUID(batch["id"])
|
||||
scheduled_start = batch.get("scheduled_start_time")
|
||||
|
||||
# Emit batch start delayed alert
|
||||
await self.alert_service.emit_batch_start_delayed(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
scheduled_start=scheduled_start,
|
||||
delay_reason="Batch has not started on time"
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting batch start delay alert",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking batch start delays for tenant", tenant_id=str(tenant_id), error=str(e))
|
||||
return 0
|
||||
@@ -25,17 +25,24 @@ from app.schemas.production import (
|
||||
DailyProductionRequirements, ProductionDashboardSummary, ProductionMetrics
|
||||
)
|
||||
from app.utils.cache import delete_cached, make_cache_key
|
||||
from app.services.production_notification_service import ProductionNotificationService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionService:
|
||||
"""Main production service with business logic"""
|
||||
|
||||
def __init__(self, database_manager, config: BaseServiceSettings):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
database_manager,
|
||||
config: BaseServiceSettings,
|
||||
notification_service: Optional[ProductionNotificationService] = None
|
||||
):
|
||||
self.database_manager = database_manager
|
||||
self.config = config
|
||||
|
||||
self.notification_service = notification_service
|
||||
|
||||
# Initialize shared clients
|
||||
self.inventory_client = get_inventory_client(config, "production")
|
||||
self.orders_client = OrdersServiceClient(config)
|
||||
@@ -302,24 +309,28 @@ class ProductionService:
|
||||
raise
|
||||
|
||||
async def update_batch_status(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
status_update: ProductionBatchStatusUpdate
|
||||
) -> ProductionBatch:
|
||||
"""Update production batch status"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
|
||||
|
||||
# Get current batch to capture old status for notification
|
||||
current_batch = await batch_repo.get_batch(tenant_id, batch_id)
|
||||
old_status = current_batch.status.value if current_batch else None
|
||||
|
||||
# Update batch status
|
||||
batch = await batch_repo.update_batch_status(
|
||||
batch_id,
|
||||
batch_id,
|
||||
status_update.status,
|
||||
status_update.actual_quantity,
|
||||
status_update.notes
|
||||
)
|
||||
|
||||
|
||||
# Update inventory if batch is completed
|
||||
if status_update.status == ProductionStatus.COMPLETED and status_update.actual_quantity:
|
||||
await self._update_inventory_on_completion(
|
||||
@@ -331,15 +342,33 @@ class ProductionService:
|
||||
await delete_cached(cache_key)
|
||||
logger.debug("Invalidated production dashboard cache", cache_key=cache_key, tenant_id=str(tenant_id))
|
||||
|
||||
# Emit batch state changed notification
|
||||
if self.notification_service and old_status:
|
||||
try:
|
||||
await self.notification_service.emit_batch_state_changed_notification(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=str(batch.id),
|
||||
product_sku=batch.product_sku or "",
|
||||
product_name=batch.product_name or "Unknown Product",
|
||||
old_status=old_status,
|
||||
new_status=status_update.status.value,
|
||||
quantity=batch.planned_quantity or 0,
|
||||
unit=batch.unit or "units",
|
||||
assigned_to=batch.assigned_to
|
||||
)
|
||||
except Exception as notif_error:
|
||||
logger.warning("Failed to emit batch state notification",
|
||||
error=str(notif_error), batch_id=str(batch_id))
|
||||
|
||||
logger.info("Updated batch status",
|
||||
batch_id=str(batch_id),
|
||||
new_status=status_update.status.value,
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return batch
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error updating batch status",
|
||||
logger.error("Error updating batch status",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
@@ -664,6 +693,23 @@ class ProductionService:
|
||||
logger.info("Started production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
# Emit batch started notification
|
||||
if self.notification_service:
|
||||
try:
|
||||
await self.notification_service.emit_batch_started_notification(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=str(batch.id),
|
||||
product_sku=batch.product_sku or "",
|
||||
product_name=batch.product_name or "Unknown Product",
|
||||
quantity_planned=batch.planned_quantity or 0,
|
||||
unit=batch.unit or "units",
|
||||
estimated_duration_minutes=batch.planned_duration_minutes,
|
||||
assigned_to=batch.assigned_to
|
||||
)
|
||||
except Exception as notif_error:
|
||||
logger.warning("Failed to emit batch started notification",
|
||||
error=str(notif_error), batch_id=str(batch_id))
|
||||
|
||||
# Acknowledge production delay alerts (non-blocking)
|
||||
try:
|
||||
from shared.clients.alert_processor_client import get_alert_processor_client
|
||||
@@ -710,7 +756,30 @@ class ProductionService:
|
||||
logger.info("Completed production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return batch
|
||||
# Emit batch completed notification
|
||||
if self.notification_service:
|
||||
try:
|
||||
# Calculate production duration if start and end times are available
|
||||
production_duration_minutes = None
|
||||
if batch.actual_start_time and batch.actual_end_time:
|
||||
duration = batch.actual_end_time - batch.actual_start_time
|
||||
production_duration_minutes = int(duration.total_seconds() / 60)
|
||||
|
||||
await self.notification_service.emit_batch_completed_notification(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=str(batch.id),
|
||||
product_sku=batch.product_sku or "",
|
||||
product_name=batch.product_name or "Unknown Product",
|
||||
quantity_produced=batch.actual_quantity or batch.planned_quantity or 0,
|
||||
unit=batch.unit or "units",
|
||||
production_duration_minutes=production_duration_minutes,
|
||||
quality_score=batch.quality_score
|
||||
)
|
||||
except Exception as notif_error:
|
||||
logger.warning("Failed to emit batch completed notification",
|
||||
error=str(notif_error), batch_id=str(batch_id))
|
||||
|
||||
return batch
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error completing production batch",
|
||||
@@ -1568,11 +1637,13 @@ class ProductionService:
|
||||
from app.repositories.equipment_repository import EquipmentRepository
|
||||
equipment_repo = EquipmentRepository(session)
|
||||
|
||||
# First verify equipment belongs to tenant
|
||||
# First verify equipment belongs to tenant and capture old status
|
||||
equipment = await equipment_repo.get_equipment_by_id(tenant_id, equipment_id)
|
||||
if not equipment:
|
||||
return None
|
||||
|
||||
old_status = equipment.status if hasattr(equipment, 'status') else None
|
||||
|
||||
# Update equipment
|
||||
updated_equipment = await equipment_repo.update_equipment(
|
||||
equipment_id,
|
||||
@@ -1585,7 +1656,24 @@ class ProductionService:
|
||||
logger.info("Updated equipment",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
return updated_equipment
|
||||
# Emit equipment status notification if status changed
|
||||
update_dict = equipment_update.model_dump(exclude_none=True)
|
||||
new_status = update_dict.get('status')
|
||||
if self.notification_service and new_status and old_status and new_status != old_status:
|
||||
try:
|
||||
await self.notification_service.emit_equipment_status_notification(
|
||||
tenant_id=tenant_id,
|
||||
equipment_id=str(equipment_id),
|
||||
equipment_name=updated_equipment.name or "Unknown Equipment",
|
||||
old_status=old_status,
|
||||
new_status=new_status,
|
||||
reason=update_dict.get('notes') or update_dict.get('status_reason')
|
||||
)
|
||||
except Exception as notif_error:
|
||||
logger.warning("Failed to emit equipment status notification",
|
||||
error=str(notif_error), equipment_id=str(equipment_id))
|
||||
|
||||
return updated_equipment
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error updating equipment",
|
||||
@@ -1862,7 +1950,11 @@ class ProductionService:
|
||||
# For now, we assume recipe_id = product_id or fetch from a mapping
|
||||
|
||||
# Generate reasoning data for JTBD dashboard
|
||||
from shared.schemas.reasoning_types import create_batch_reasoning_forecast_demand
|
||||
from shared.schemas.reasoning_types import (
|
||||
create_production_batch_reasoning,
|
||||
PredictionFactor,
|
||||
PredictionFactorType
|
||||
)
|
||||
|
||||
# Try to get product name from forecast, stock_info, or use placeholder
|
||||
product_name = (
|
||||
@@ -1871,15 +1963,113 @@ class ProductionService:
|
||||
f"Product {product_id}"
|
||||
)
|
||||
|
||||
reasoning_data = create_batch_reasoning_forecast_demand(
|
||||
product_name=product_name,
|
||||
predicted_demand=predicted_demand,
|
||||
current_stock=current_stock,
|
||||
production_needed=production_needed,
|
||||
target_date=target_date.isoformat(),
|
||||
confidence_score=forecast.get('confidence_score', 0.85)
|
||||
# Calculate variance from historical average if available
|
||||
historical_average = forecast.get('historical_average', predicted_demand * 0.8) # Default to 80% of predicted
|
||||
variance_percent = ((predicted_demand - historical_average) / historical_average * 100) if historical_average > 0 else 0
|
||||
|
||||
# Create detailed factors for enhanced reasoning
|
||||
factors = []
|
||||
|
||||
# Factor 1: Historical pattern (always present)
|
||||
factors.append(
|
||||
PredictionFactor(
|
||||
factor=PredictionFactorType.HISTORICAL_PATTERN,
|
||||
weight=0.40,
|
||||
contribution=historical_average * 0.40,
|
||||
description="Based on historical sales patterns",
|
||||
historical_data={
|
||||
"historical_average": historical_average,
|
||||
"historical_period": "last_30_days"
|
||||
},
|
||||
confidence=0.90
|
||||
)
|
||||
)
|
||||
|
||||
# Factor 2: Weather impact (if weather data is available in forecast)
|
||||
weather_impact = forecast.get('weather_impact')
|
||||
if weather_impact:
|
||||
weather_type = weather_impact.get('type', 'sunny')
|
||||
weather_contribution = weather_impact.get('contribution', 0)
|
||||
weather_weight = weather_impact.get('weight', 0.25)
|
||||
|
||||
# Map weather type to PredictionFactorType
|
||||
weather_factor_map = {
|
||||
'sunny': PredictionFactorType.WEATHER_SUNNY,
|
||||
'rainy': PredictionFactorType.WEATHER_RAINY,
|
||||
'cold': PredictionFactorType.WEATHER_COLD,
|
||||
'hot': PredictionFactorType.WEATHER_HOT
|
||||
}
|
||||
weather_factor = weather_factor_map.get(weather_type, PredictionFactorType.WEATHER_SUNNY)
|
||||
|
||||
factors.append(
|
||||
PredictionFactor(
|
||||
factor=weather_factor,
|
||||
weight=weather_weight,
|
||||
contribution=weather_contribution,
|
||||
description=f"Weather impact: {weather_type}",
|
||||
weather_data={
|
||||
"condition": weather_type,
|
||||
"temperature": weather_impact.get('temperature', 22),
|
||||
"impact_direction": weather_impact.get('impact_direction', 'positive')
|
||||
},
|
||||
confidence=weather_impact.get('confidence', 0.85)
|
||||
)
|
||||
)
|
||||
|
||||
# Factor 3: Weekend boost (if target date is weekend)
|
||||
if target_date.weekday() >= 5: # Saturday (5) or Sunday (6)
|
||||
weekend_contribution = predicted_demand * 0.20 # 20% boost
|
||||
factors.append(
|
||||
PredictionFactor(
|
||||
factor=PredictionFactorType.WEEKEND_BOOST,
|
||||
weight=0.20,
|
||||
contribution=weekend_contribution,
|
||||
description="Weekend demand increase",
|
||||
confidence=0.80
|
||||
)
|
||||
)
|
||||
|
||||
# Factor 4: Inventory level consideration
|
||||
inventory_weight = 0.15
|
||||
inventory_contribution = current_stock * inventory_weight
|
||||
factors.append(
|
||||
PredictionFactor(
|
||||
factor=PredictionFactorType.INVENTORY_LEVEL,
|
||||
weight=inventory_weight,
|
||||
contribution=inventory_contribution,
|
||||
description="Current inventory consideration",
|
||||
inventory_data={
|
||||
"current_stock": current_stock,
|
||||
"safety_stock_days": 3
|
||||
},
|
||||
confidence=0.95
|
||||
)
|
||||
)
|
||||
|
||||
# Use unified reasoning function - enhanced when factors exist, basic otherwise
|
||||
if factors:
|
||||
reasoning_data = create_production_batch_reasoning(
|
||||
product_name=product_name,
|
||||
predicted_demand=predicted_demand,
|
||||
historical_average=historical_average,
|
||||
variance_percent=variance_percent,
|
||||
variance_reason="weather_sunny_weekend" if (target_date.weekday() >= 5 and weather_impact) else "historical_pattern",
|
||||
confidence_score=forecast.get('confidence_score', 0.87),
|
||||
factors=factors,
|
||||
urgency_level="normal",
|
||||
ready_by_time="08:00",
|
||||
forecast_id=forecast.get('forecast_id')
|
||||
)
|
||||
else:
|
||||
reasoning_data = create_production_batch_reasoning(
|
||||
product_name=product_name,
|
||||
predicted_demand=predicted_demand,
|
||||
current_stock=current_stock,
|
||||
production_needed=production_needed,
|
||||
target_date=target_date.isoformat(),
|
||||
confidence_score=forecast.get('confidence_score', 0.85)
|
||||
)
|
||||
|
||||
# Create production batch
|
||||
planned_start = datetime.combine(target_date, datetime.min.time())
|
||||
planned_end = datetime.combine(target_date, datetime.max.time())
|
||||
@@ -1953,4 +2143,4 @@ class ProductionService:
|
||||
) -> str:
|
||||
"""Generate batch number in format BATCH-YYYYMMDD-NNN"""
|
||||
date_str = target_date.strftime("%Y%m%d")
|
||||
return f"BATCH-{date_str}-{batch_index:03d}"
|
||||
return f"BATCH-{date_str}-{batch_index:03d}"
|
||||
|
||||
Reference in New Issue
Block a user