Delete legacy alerts

This commit is contained in:
Urtzi Alfaro
2025-08-22 15:31:52 +02:00
parent c6dd6fd1de
commit 90100a66c6
40 changed files with 25 additions and 3308 deletions

View File

@@ -12,7 +12,7 @@ import uuid
from app.services.forecasting_service import EnhancedForecastingService
from app.schemas.forecasts import (
ForecastRequest, ForecastResponse, BatchForecastRequest,
BatchForecastResponse, AlertResponse
BatchForecastResponse
)
from shared.auth.decorators import (
get_current_user_dep,
@@ -242,68 +242,6 @@ async def get_enhanced_tenant_forecasts(
)
@router.get("/tenants/{tenant_id}/forecasts/alerts")
@track_execution_time("enhanced_get_alerts_duration_seconds", "forecasting-service")
async def get_enhanced_forecast_alerts(
tenant_id: str = Path(..., description="Tenant ID"),
active_only: bool = Query(True, description="Return only active alerts"),
skip: int = Query(0, description="Number of records to skip"),
limit: int = Query(50, description="Number of records to return"),
request_obj: Request = None,
current_tenant: str = Depends(get_current_tenant_id_dep),
enhanced_forecasting_service: EnhancedForecastingService = Depends(get_enhanced_forecasting_service)
):
"""Get forecast alerts using enhanced repository pattern"""
metrics = get_metrics_collector(request_obj)
try:
# Enhanced tenant validation
if tenant_id != current_tenant:
if metrics:
metrics.increment_counter("enhanced_get_alerts_access_denied_total")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Access denied to tenant resources"
)
# Record metrics
if metrics:
metrics.increment_counter("enhanced_get_alerts_total")
# Get alerts using enhanced service
alerts = await enhanced_forecasting_service.get_tenant_alerts(
tenant_id=tenant_id,
active_only=active_only,
skip=skip,
limit=limit
)
if metrics:
metrics.increment_counter("enhanced_get_alerts_success_total")
return {
"tenant_id": tenant_id,
"alerts": alerts,
"total_returned": len(alerts),
"active_only": active_only,
"pagination": {
"skip": skip,
"limit": limit
},
"enhanced_features": True,
"repository_integration": True
}
except Exception as e:
if metrics:
metrics.increment_counter("enhanced_get_alerts_errors_total")
logger.error("Failed to get enhanced forecast alerts",
tenant_id=tenant_id,
error=str(e))
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to get forecast alerts"
)
@router.get("/tenants/{tenant_id}/forecasts/{forecast_id}")

View File

@@ -51,9 +51,5 @@ class ForecastingSettings(BaseServiceSettings):
TEMPERATURE_THRESHOLD_HOT: float = float(os.getenv("TEMPERATURE_THRESHOLD_HOT", "30.0"))
RAIN_IMPACT_FACTOR: float = float(os.getenv("RAIN_IMPACT_FACTOR", "0.7"))
# Alert Thresholds
HIGH_DEMAND_THRESHOLD: float = float(os.getenv("HIGH_DEMAND_THRESHOLD", "1.5"))
LOW_DEMAND_THRESHOLD: float = float(os.getenv("LOW_DEMAND_THRESHOLD", "0.5"))
STOCKOUT_RISK_THRESHOLD: float = float(os.getenv("STOCKOUT_RISK_THRESHOLD", "0.9"))
settings = ForecastingSettings()

View File

@@ -86,29 +86,4 @@ class PredictionBatch(Base):
def __repr__(self):
return f"<PredictionBatch(id={self.id}, status={self.status})>"
class ForecastAlert(Base):
"""Alerts based on forecast results"""
__tablename__ = "forecast_alerts"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
forecast_id = Column(UUID(as_uuid=True), nullable=False)
# Alert information
alert_type = Column(String(50), nullable=False) # high_demand, low_demand, stockout_risk
severity = Column(String(20), default="medium") # low, medium, high, critical
message = Column(Text, nullable=False)
# Status
created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
acknowledged_at = Column(DateTime(timezone=True))
resolved_at = Column(DateTime(timezone=True))
is_active = Column(Boolean, default=True)
# Notification
notification_sent = Column(Boolean, default=False)
notification_method = Column(String(50)) # email, whatsapp, sms
def __repr__(self):
return f"<ForecastAlert(id={self.id}, type={self.alert_type})>"

View File

@@ -6,7 +6,6 @@ Repository implementations for forecasting service
from .base import ForecastingBaseRepository
from .forecast_repository import ForecastRepository
from .prediction_batch_repository import PredictionBatchRepository
from .forecast_alert_repository import ForecastAlertRepository
from .performance_metric_repository import PerformanceMetricRepository
from .prediction_cache_repository import PredictionCacheRepository
@@ -14,7 +13,6 @@ __all__ = [
"ForecastingBaseRepository",
"ForecastRepository",
"PredictionBatchRepository",
"ForecastAlertRepository",
"PerformanceMetricRepository",
"PredictionCacheRepository"
]

View File

@@ -1,375 +0,0 @@
"""
Forecast Alert Repository
Repository for forecast alert operations
"""
from typing import Optional, List, Dict, Any
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import text
from datetime import datetime, timedelta
import structlog
from .base import ForecastingBaseRepository
from app.models.forecasts import ForecastAlert
from shared.database.exceptions import DatabaseError, ValidationError
logger = structlog.get_logger()
class ForecastAlertRepository(ForecastingBaseRepository):
"""Repository for forecast alert operations"""
def __init__(self, session: AsyncSession, cache_ttl: Optional[int] = 300):
# Alerts change frequently, shorter cache time (5 minutes)
super().__init__(ForecastAlert, session, cache_ttl)
async def create_alert(self, alert_data: Dict[str, Any]) -> ForecastAlert:
"""Create a new forecast alert"""
try:
# Validate alert data
validation_result = self._validate_forecast_data(
alert_data,
["tenant_id", "forecast_id", "alert_type", "message"]
)
if not validation_result["is_valid"]:
raise ValidationError(f"Invalid alert data: {validation_result['errors']}")
# Set default values
if "severity" not in alert_data:
alert_data["severity"] = "medium"
if "is_active" not in alert_data:
alert_data["is_active"] = True
if "notification_sent" not in alert_data:
alert_data["notification_sent"] = False
alert = await self.create(alert_data)
logger.info("Forecast alert created",
alert_id=alert.id,
tenant_id=alert.tenant_id,
alert_type=alert.alert_type,
severity=alert.severity)
return alert
except ValidationError:
raise
except Exception as e:
logger.error("Failed to create forecast alert",
tenant_id=alert_data.get("tenant_id"),
error=str(e))
raise DatabaseError(f"Failed to create alert: {str(e)}")
async def get_active_alerts(
self,
tenant_id: str,
alert_type: str = None,
severity: str = None
) -> List[ForecastAlert]:
"""Get active alerts for a tenant"""
try:
filters = {
"tenant_id": tenant_id,
"is_active": True
}
if alert_type:
filters["alert_type"] = alert_type
if severity:
filters["severity"] = severity
return await self.get_multi(
filters=filters,
order_by="created_at",
order_desc=True
)
except Exception as e:
logger.error("Failed to get active alerts",
tenant_id=tenant_id,
error=str(e))
return []
async def acknowledge_alert(
self,
alert_id: str,
acknowledged_by: str = None
) -> Optional[ForecastAlert]:
"""Acknowledge an alert"""
try:
update_data = {
"acknowledged_at": datetime.utcnow()
}
if acknowledged_by:
# Store in message or create a new field if needed
current_alert = await self.get_by_id(alert_id)
if current_alert:
update_data["message"] = f"{current_alert.message} (Acknowledged by: {acknowledged_by})"
updated_alert = await self.update(alert_id, update_data)
logger.info("Alert acknowledged",
alert_id=alert_id,
acknowledged_by=acknowledged_by)
return updated_alert
except Exception as e:
logger.error("Failed to acknowledge alert",
alert_id=alert_id,
error=str(e))
raise DatabaseError(f"Failed to acknowledge alert: {str(e)}")
async def resolve_alert(
self,
alert_id: str,
resolved_by: str = None
) -> Optional[ForecastAlert]:
"""Resolve an alert"""
try:
update_data = {
"resolved_at": datetime.utcnow(),
"is_active": False
}
if resolved_by:
current_alert = await self.get_by_id(alert_id)
if current_alert:
update_data["message"] = f"{current_alert.message} (Resolved by: {resolved_by})"
updated_alert = await self.update(alert_id, update_data)
logger.info("Alert resolved",
alert_id=alert_id,
resolved_by=resolved_by)
return updated_alert
except Exception as e:
logger.error("Failed to resolve alert",
alert_id=alert_id,
error=str(e))
raise DatabaseError(f"Failed to resolve alert: {str(e)}")
async def mark_notification_sent(
self,
alert_id: str,
notification_method: str
) -> Optional[ForecastAlert]:
"""Mark alert notification as sent"""
try:
update_data = {
"notification_sent": True,
"notification_method": notification_method
}
updated_alert = await self.update(alert_id, update_data)
logger.debug("Alert notification marked as sent",
alert_id=alert_id,
method=notification_method)
return updated_alert
except Exception as e:
logger.error("Failed to mark notification as sent",
alert_id=alert_id,
error=str(e))
return None
async def get_unnotified_alerts(self, tenant_id: str = None) -> List[ForecastAlert]:
"""Get alerts that haven't been notified yet"""
try:
filters = {
"is_active": True,
"notification_sent": False
}
if tenant_id:
filters["tenant_id"] = tenant_id
return await self.get_multi(
filters=filters,
order_by="created_at",
order_desc=False # Oldest first for notification
)
except Exception as e:
logger.error("Failed to get unnotified alerts",
tenant_id=tenant_id,
error=str(e))
return []
async def get_alert_statistics(self, tenant_id: str) -> Dict[str, Any]:
"""Get alert statistics for a tenant"""
try:
# Get counts by type
type_query = text("""
SELECT alert_type, COUNT(*) as count
FROM forecast_alerts
WHERE tenant_id = :tenant_id
GROUP BY alert_type
ORDER BY count DESC
""")
result = await self.session.execute(type_query, {"tenant_id": tenant_id})
alerts_by_type = {row.alert_type: row.count for row in result.fetchall()}
# Get counts by severity
severity_query = text("""
SELECT severity, COUNT(*) as count
FROM forecast_alerts
WHERE tenant_id = :tenant_id
GROUP BY severity
ORDER BY count DESC
""")
severity_result = await self.session.execute(severity_query, {"tenant_id": tenant_id})
alerts_by_severity = {row.severity: row.count for row in severity_result.fetchall()}
# Get status counts
total_alerts = await self.count(filters={"tenant_id": tenant_id})
active_alerts = await self.count(filters={
"tenant_id": tenant_id,
"is_active": True
})
acknowledged_alerts = await self.count(filters={
"tenant_id": tenant_id,
"acknowledged_at": "IS NOT NULL" # This won't work with our current filters
})
# Get recent activity (alerts in last 7 days)
seven_days_ago = datetime.utcnow() - timedelta(days=7)
recent_alerts = len(await self.get_by_date_range(
tenant_id, seven_days_ago, datetime.utcnow(), limit=1000
))
# Calculate response metrics
response_query = text("""
SELECT
AVG(EXTRACT(EPOCH FROM (acknowledged_at - created_at))/60) as avg_acknowledgment_time_minutes,
AVG(EXTRACT(EPOCH FROM (resolved_at - created_at))/60) as avg_resolution_time_minutes,
COUNT(CASE WHEN acknowledged_at IS NOT NULL THEN 1 END) as acknowledged_count,
COUNT(CASE WHEN resolved_at IS NOT NULL THEN 1 END) as resolved_count
FROM forecast_alerts
WHERE tenant_id = :tenant_id
""")
response_result = await self.session.execute(response_query, {"tenant_id": tenant_id})
response_row = response_result.fetchone()
return {
"total_alerts": total_alerts,
"active_alerts": active_alerts,
"resolved_alerts": total_alerts - active_alerts,
"alerts_by_type": alerts_by_type,
"alerts_by_severity": alerts_by_severity,
"recent_alerts_7d": recent_alerts,
"response_metrics": {
"avg_acknowledgment_time_minutes": float(response_row.avg_acknowledgment_time_minutes or 0),
"avg_resolution_time_minutes": float(response_row.avg_resolution_time_minutes or 0),
"acknowledgment_rate": round((response_row.acknowledged_count / max(total_alerts, 1)) * 100, 2),
"resolution_rate": round((response_row.resolved_count / max(total_alerts, 1)) * 100, 2)
} if response_row else {
"avg_acknowledgment_time_minutes": 0.0,
"avg_resolution_time_minutes": 0.0,
"acknowledgment_rate": 0.0,
"resolution_rate": 0.0
}
}
except Exception as e:
logger.error("Failed to get alert statistics",
tenant_id=tenant_id,
error=str(e))
return {
"total_alerts": 0,
"active_alerts": 0,
"resolved_alerts": 0,
"alerts_by_type": {},
"alerts_by_severity": {},
"recent_alerts_7d": 0,
"response_metrics": {
"avg_acknowledgment_time_minutes": 0.0,
"avg_resolution_time_minutes": 0.0,
"acknowledgment_rate": 0.0,
"resolution_rate": 0.0
}
}
async def cleanup_old_alerts(self, days_old: int = 90) -> int:
"""Clean up old resolved alerts"""
try:
cutoff_date = datetime.utcnow() - timedelta(days=days_old)
query_text = """
DELETE FROM forecast_alerts
WHERE is_active = false
AND resolved_at IS NOT NULL
AND resolved_at < :cutoff_date
"""
result = await self.session.execute(text(query_text), {"cutoff_date": cutoff_date})
deleted_count = result.rowcount
logger.info("Cleaned up old forecast alerts",
deleted_count=deleted_count,
days_old=days_old)
return deleted_count
except Exception as e:
logger.error("Failed to cleanup old alerts",
error=str(e))
raise DatabaseError(f"Alert cleanup failed: {str(e)}")
async def bulk_resolve_alerts(
self,
tenant_id: str,
alert_type: str = None,
older_than_hours: int = 24
) -> int:
"""Bulk resolve old alerts"""
try:
cutoff_time = datetime.utcnow() - timedelta(hours=older_than_hours)
conditions = [
"tenant_id = :tenant_id",
"is_active = true",
"created_at < :cutoff_time"
]
params = {
"tenant_id": tenant_id,
"cutoff_time": cutoff_time
}
if alert_type:
conditions.append("alert_type = :alert_type")
params["alert_type"] = alert_type
query_text = f"""
UPDATE forecast_alerts
SET is_active = false, resolved_at = :resolved_at
WHERE {' AND '.join(conditions)}
"""
params["resolved_at"] = datetime.utcnow()
result = await self.session.execute(text(query_text), params)
resolved_count = result.rowcount
logger.info("Bulk resolved old alerts",
tenant_id=tenant_id,
alert_type=alert_type,
resolved_count=resolved_count,
older_than_hours=older_than_hours)
return resolved_count
except Exception as e:
logger.error("Failed to bulk resolve alerts",
tenant_id=tenant_id,
error=str(e))
raise DatabaseError(f"Bulk resolve failed: {str(e)}")

View File

@@ -14,11 +14,6 @@ class BusinessType(str, Enum):
INDIVIDUAL = "individual"
CENTRAL_WORKSHOP = "central_workshop"
class AlertType(str, Enum):
HIGH_DEMAND = "high_demand"
LOW_DEMAND = "low_demand"
STOCKOUT_RISK = "stockout_risk"
OVERPRODUCTION = "overproduction"
class ForecastRequest(BaseModel):
"""Request schema for generating forecasts"""
@@ -100,16 +95,4 @@ class BatchForecastResponse(BaseModel):
forecasts: Optional[List[ForecastResponse]]
error_message: Optional[str]
class AlertResponse(BaseModel):
"""Response schema for forecast alerts"""
id: str
tenant_id: str
forecast_id: str
alert_type: str
severity: str
message: str
is_active: bool
created_at: datetime
acknowledged_at: Optional[datetime]
notification_sent: bool

View File

@@ -10,7 +10,6 @@ from .data_client import DataClient
from .messaging import (
publish_forecast_generated,
publish_batch_forecast_completed,
publish_forecast_alert,
ForecastingStatusPublisher
)
@@ -22,6 +21,5 @@ __all__ = [
"DataClient",
"publish_forecast_generated",
"publish_batch_forecast_completed",
"publish_forecast_alert",
"ForecastingStatusPublisher"
]

View File

@@ -18,7 +18,6 @@ from app.services.data_client import DataClient
from app.repositories import (
ForecastRepository,
PredictionBatchRepository,
ForecastAlertRepository,
PerformanceMetricRepository,
PredictionCacheRepository
)
@@ -36,7 +35,7 @@ logger = structlog.get_logger()
class EnhancedForecastingService:
"""
Enhanced forecasting service using repository pattern.
Handles forecast generation, batch processing, and alerting with proper data abstraction.
Handles forecast generation, batch processing with proper data abstraction.
"""
def __init__(self, database_manager=None):
@@ -55,7 +54,6 @@ class EnhancedForecastingService:
return {
'forecast': ForecastRepository(session),
'batch': PredictionBatchRepository(session),
'alert': ForecastAlertRepository(session),
'performance': PerformanceMetricRepository(session),
'cache': PredictionCacheRepository(session)
}
@@ -165,15 +163,6 @@ class EnhancedForecastingService:
logger.error("Failed to delete forecast", error=str(e))
return False
async def get_tenant_alerts(self, tenant_id: str, active_only: bool = True,
skip: int = 0, limit: int = 50) -> List[Dict]:
"""Get tenant alerts"""
try:
# Implementation would use repository pattern
return []
except Exception as e:
logger.error("Failed to get tenant alerts", error=str(e))
raise
async def get_tenant_forecast_statistics(self, tenant_id: str) -> Dict[str, Any]:
"""Get tenant forecast statistics"""
@@ -246,7 +235,7 @@ class EnhancedForecastingService:
request: ForecastRequest
) -> ForecastResponse:
"""
Generate forecast using repository pattern with caching and alerting.
Generate forecast using repository pattern with caching.
"""
start_time = datetime.utcnow()
@@ -339,8 +328,6 @@ class EnhancedForecastingService:
expires_in_hours=24
)
# Step 8: Check for alerts
await self._check_and_create_alerts(forecast, adjusted_prediction, repos)
logger.info("Enhanced forecast generated successfully",
forecast_id=forecast.id,
@@ -398,8 +385,6 @@ class EnhancedForecastingService:
# Get forecast summary
forecast_summary = await repos['forecast'].get_forecast_summary(tenant_id)
# Get alert statistics
alert_stats = await repos['alert'].get_alert_statistics(tenant_id)
# Get batch statistics
batch_stats = await repos['batch'].get_batch_statistics(tenant_id)
@@ -415,7 +400,6 @@ class EnhancedForecastingService:
return {
"tenant_id": tenant_id,
"forecast_analytics": forecast_summary,
"alert_analytics": alert_stats,
"batch_analytics": batch_stats,
"cache_performance": cache_stats,
"performance_trends": performance_trends,
@@ -469,51 +453,6 @@ class EnhancedForecastingService:
error=str(e))
raise DatabaseError(f"Failed to create batch: {str(e)}")
async def _check_and_create_alerts(self, forecast, prediction: Dict[str, Any], repos: Dict):
"""Check forecast results and create alerts if necessary"""
try:
alerts_to_create = []
# Check for high demand alert
if prediction['prediction'] > 100: # Threshold for high demand
alerts_to_create.append({
"tenant_id": str(forecast.tenant_id),
"forecast_id": str(forecast.id), # Convert UUID to string
"alert_type": "high_demand",
"severity": "high" if prediction['prediction'] > 200 else "medium",
"message": f"High demand predicted for inventory product {str(forecast.inventory_product_id)}: {prediction['prediction']:.1f} units"
})
# Check for low demand alert
elif prediction['prediction'] < 10: # Threshold for low demand
alerts_to_create.append({
"tenant_id": str(forecast.tenant_id),
"forecast_id": str(forecast.id), # Convert UUID to string
"alert_type": "low_demand",
"severity": "low",
"message": f"Low demand predicted for inventory product {str(forecast.inventory_product_id)}: {prediction['prediction']:.1f} units"
})
# Check for stockout risk (very low prediction with narrow confidence interval)
confidence_interval = prediction['upper_bound'] - prediction['lower_bound']
if prediction['prediction'] < 5 and confidence_interval < 10:
alerts_to_create.append({
"tenant_id": str(forecast.tenant_id),
"forecast_id": str(forecast.id), # Convert UUID to string
"alert_type": "stockout_risk",
"severity": "critical",
"message": f"Stockout risk for inventory product {str(forecast.inventory_product_id)}: predicted {prediction['prediction']:.1f} units with high confidence"
})
# Create alerts
for alert_data in alerts_to_create:
await repos['alert'].create_alert(alert_data)
except Exception as e:
logger.error("Failed to create alerts",
forecast_id=forecast.id,
error=str(e))
# Don't raise - alerts are not critical for forecast generation
def _create_forecast_response_from_cache(self, cache_entry) -> ForecastResponse:
"""Create forecast response from cached entry"""

View File

@@ -72,12 +72,6 @@ async def publish_forecast_completed(data: Dict[str, Any]):
event = ForecastGeneratedEvent(service_name="forecasting_service", data=data, event_type="forecast.completed")
await rabbitmq_client.publish_forecast_event(event_type="completed", forecast_data=event.to_dict())
async def publish_alert_created(data: Dict[str, Any]):
"""Publish alert created event"""
# Assuming 'alert.created' is a type of forecast event, or define a new exchange/publisher method
if rabbitmq_client:
event = ForecastGeneratedEvent(service_name="forecasting_service", data=data, event_type="alert.created")
await rabbitmq_client.publish_forecast_event(event_type="alert.created", forecast_data=event.to_dict())
async def publish_batch_completed(data: Dict[str, Any]):
"""Publish batch forecast completed event"""
@@ -181,19 +175,6 @@ async def publish_batch_forecast_completed(data: dict) -> bool:
logger.error("Failed to publish batch forecast event", error=str(e))
return False
async def publish_forecast_alert(data: dict) -> bool:
"""Publish forecast alert event"""
try:
if rabbitmq_client:
await rabbitmq_client.publish_event(
exchange="forecasting_events",
routing_key="forecast.alert",
message=data
)
return True
except Exception as e:
logger.error("Failed to publish forecast alert event", error=str(e))
return False
# Publisher class for compatibility

View File

@@ -12,7 +12,6 @@ from typing import List, Optional, Dict, Any
from uuid import UUID
import structlog
from shared.notifications.alert_integration import AlertIntegration
from shared.database.transactions import transactional
from app.core.config import settings
@@ -45,7 +44,7 @@ class FoodSafetyService:
"""Service for food safety and compliance operations"""
def __init__(self):
self.alert_integration = AlertIntegration()
pass
# ===== COMPLIANCE MANAGEMENT =====

View File

@@ -48,11 +48,6 @@ class OrdersSettings(BaseServiceSettings):
MAX_ORDER_VALUE: float = float(os.getenv("MAX_ORDER_VALUE", "100000.0"))
VALIDATE_PRODUCT_AVAILABILITY: bool = os.getenv("VALIDATE_PRODUCT_AVAILABILITY", "true").lower() == "true"
# Alert Thresholds
HIGH_VALUE_ORDER_THRESHOLD: float = float(os.getenv("HIGH_VALUE_ORDER_THRESHOLD", "5000.0"))
LARGE_QUANTITY_ORDER_THRESHOLD: int = int(os.getenv("LARGE_QUANTITY_ORDER_THRESHOLD", "100"))
RUSH_ORDER_HOURS_THRESHOLD: int = int(os.getenv("RUSH_ORDER_HOURS_THRESHOLD", "24"))
PROCUREMENT_SHORTAGE_THRESHOLD: float = float(os.getenv("PROCUREMENT_SHORTAGE_THRESHOLD", "90.0"))
# Payment and Pricing
PAYMENT_VALIDATION_ENABLED: bool = os.getenv("PAYMENT_VALIDATION_ENABLED", "true").lower() == "true"

View File

@@ -58,7 +58,6 @@ async def init_database():
from app.models.order import CustomerOrder, OrderItem, OrderStatusHistory
from app.models.customer import Customer, CustomerContact
from app.models.procurement import ProcurementPlan, ProcurementRequirement
from app.models.alerts import OrderAlert
# Create all tables
await conn.run_sync(Base.metadata.create_all)

View File

@@ -1,144 +0,0 @@
# ================================================================
# services/orders/app/models/alerts.py
# ================================================================
"""
Alert system database models for Orders Service
"""
import uuid
from datetime import datetime
from decimal import Decimal
from typing import Optional
from sqlalchemy import Column, String, Boolean, DateTime, Numeric, Text, Integer
from sqlalchemy.dialects.postgresql import UUID, JSONB
from sqlalchemy.sql import func
from app.core.database import Base
class OrderAlert(Base):
"""Alert system for orders and procurement issues"""
__tablename__ = "order_alerts"
# Primary identification
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
alert_code = Column(String(50), nullable=False, index=True)
# Alert categorization
alert_type = Column(String(50), nullable=False, index=True)
# Alert types: order_issue, procurement_shortage, payment_problem, delivery_delay,
# quality_concern, high_value_order, rush_order, customer_issue, supplier_problem
severity = Column(String(20), nullable=False, default="medium", index=True)
# Severity levels: critical, high, medium, low
category = Column(String(50), nullable=False, index=True)
# Categories: operational, financial, quality, customer, supplier, compliance
# Alert source and context
source_entity_type = Column(String(50), nullable=False) # order, customer, procurement_plan, etc.
source_entity_id = Column(UUID(as_uuid=True), nullable=False, index=True)
source_entity_reference = Column(String(100), nullable=True) # Human-readable reference
# Alert content
title = Column(String(200), nullable=False)
description = Column(Text, nullable=False)
detailed_message = Column(Text, nullable=True)
# Alert conditions and triggers
trigger_condition = Column(String(200), nullable=True)
threshold_value = Column(Numeric(15, 4), nullable=True)
actual_value = Column(Numeric(15, 4), nullable=True)
variance = Column(Numeric(15, 4), nullable=True)
# Context data
alert_data = Column(JSONB, nullable=True) # Additional context-specific data
business_impact = Column(Text, nullable=True)
customer_impact = Column(Text, nullable=True)
financial_impact = Column(Numeric(12, 2), nullable=True)
# Alert status and lifecycle
status = Column(String(50), nullable=False, default="active", index=True)
# Status values: active, acknowledged, in_progress, resolved, dismissed, expired
alert_state = Column(String(50), nullable=False, default="new") # new, escalated, recurring
# Resolution and follow-up
resolution_action = Column(String(200), nullable=True)
resolution_notes = Column(Text, nullable=True)
resolution_cost = Column(Numeric(10, 2), nullable=True)
# Timing and escalation
first_occurred_at = Column(DateTime(timezone=True), nullable=False, index=True)
last_occurred_at = Column(DateTime(timezone=True), nullable=False)
acknowledged_at = Column(DateTime(timezone=True), nullable=True)
resolved_at = Column(DateTime(timezone=True), nullable=True)
expires_at = Column(DateTime(timezone=True), nullable=True)
# Occurrence tracking
occurrence_count = Column(Integer, nullable=False, default=1)
is_recurring = Column(Boolean, nullable=False, default=False)
recurrence_pattern = Column(String(100), nullable=True)
# Responsibility and assignment
assigned_to = Column(UUID(as_uuid=True), nullable=True)
assigned_role = Column(String(50), nullable=True) # orders_manager, procurement_manager, etc.
escalated_to = Column(UUID(as_uuid=True), nullable=True)
escalation_level = Column(Integer, nullable=False, default=0)
# Notification tracking
notification_sent = Column(Boolean, nullable=False, default=False)
notification_methods = Column(JSONB, nullable=True) # [email, sms, whatsapp, dashboard]
notification_recipients = Column(JSONB, nullable=True) # List of recipients
last_notification_sent = Column(DateTime(timezone=True), nullable=True)
# Customer communication
customer_notified = Column(Boolean, nullable=False, default=False)
customer_notification_method = Column(String(50), nullable=True)
customer_message = Column(Text, nullable=True)
# Recommended actions
recommended_actions = Column(JSONB, nullable=True) # List of suggested actions
automated_actions_taken = Column(JSONB, nullable=True) # Actions performed automatically
manual_actions_required = Column(JSONB, nullable=True) # Actions requiring human intervention
# Priority and urgency
priority_score = Column(Integer, nullable=False, default=50) # 1-100 scale
urgency = Column(String(20), nullable=False, default="normal") # immediate, urgent, normal, low
business_priority = Column(String(20), nullable=False, default="normal")
# Related entities
related_orders = Column(JSONB, nullable=True) # Related order IDs
related_customers = Column(JSONB, nullable=True) # Related customer IDs
related_suppliers = Column(JSONB, nullable=True) # Related supplier IDs
related_alerts = Column(JSONB, nullable=True) # Related alert IDs
# Performance tracking
detection_time = Column(DateTime(timezone=True), nullable=True) # When issue was detected
response_time_minutes = Column(Integer, nullable=True) # Time to acknowledge
resolution_time_minutes = Column(Integer, nullable=True) # Time to resolve
# Quality and feedback
alert_accuracy = Column(Boolean, nullable=True) # Was this a valid alert?
false_positive = Column(Boolean, nullable=False, default=False)
feedback_notes = Column(Text, nullable=True)
# Compliance and audit
compliance_related = Column(Boolean, nullable=False, default=False)
audit_trail = Column(JSONB, nullable=True) # Changes and actions taken
regulatory_impact = Column(String(200), nullable=True)
# Integration and external systems
external_system_reference = Column(String(100), nullable=True)
external_ticket_number = Column(String(50), nullable=True)
erp_reference = Column(String(100), nullable=True)
# Audit fields
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
created_by = Column(UUID(as_uuid=True), nullable=True)
updated_by = Column(UUID(as_uuid=True), nullable=True)
# Additional metadata
alert_metadata = Column(JSONB, nullable=True)

View File

@@ -17,7 +17,6 @@ from shared.clients import (
ProductionServiceClient,
SalesServiceClient
)
from shared.notifications.alert_integration import AlertIntegration
from shared.database.transactions import transactional
from app.core.config import settings
@@ -52,7 +51,6 @@ class OrdersService:
inventory_client: InventoryServiceClient,
production_client: ProductionServiceClient,
sales_client: SalesServiceClient,
alert_integration: AlertIntegration
):
self.order_repo = order_repo
self.customer_repo = customer_repo
@@ -61,7 +59,6 @@ class OrdersService:
self.inventory_client = inventory_client
self.production_client = production_client
self.sales_client = sales_client
self.alert_integration = alert_integration
@transactional
async def create_order(
@@ -137,8 +134,6 @@ class OrdersService:
if business_model:
order.business_model = business_model
# 9. Check for high-value or rush orders for alerts
await self._check_order_alerts(db, order, order_data.tenant_id)
# 10. Integrate with production service if auto-processing is enabled
if settings.ORDER_PROCESSING_ENABLED:
@@ -440,46 +435,6 @@ class OrdersService:
# Fallback to UUID
return f"ORD-{uuid.uuid4().hex[:8].upper()}"
async def _check_order_alerts(self, db, order, tenant_id: UUID):
"""Check for conditions that require alerts"""
try:
alerts = []
# High-value order alert
if order.total_amount > settings.HIGH_VALUE_ORDER_THRESHOLD:
alerts.append({
"type": "high_value_order",
"severity": "medium",
"message": f"High-value order created: ${order.total_amount}"
})
# Rush order alert
if order.order_type == "rush":
time_to_delivery = order.requested_delivery_date - order.order_date
if time_to_delivery.total_seconds() < settings.RUSH_ORDER_HOURS_THRESHOLD * 3600:
alerts.append({
"type": "rush_order",
"severity": "high",
"message": f"Rush order with tight deadline: {order.order_number}"
})
# Large quantity alert
total_items = sum(item.quantity for item in order.items)
if total_items > settings.LARGE_QUANTITY_ORDER_THRESHOLD:
alerts.append({
"type": "large_quantity_order",
"severity": "medium",
"message": f"Large quantity order: {total_items} items"
})
# Send alerts if any
for alert in alerts:
await self._send_alert(tenant_id, order.id, alert)
except Exception as e:
logger.error("Error checking order alerts",
order_id=str(order.id),
error=str(e))
async def _notify_production_service(self, order):
"""Notify production service of new order"""
@@ -526,21 +481,3 @@ class OrdersService:
order_id=str(order.id),
error=str(e))
async def _send_alert(self, tenant_id: UUID, order_id: UUID, alert: Dict[str, Any]):
"""Send alert notification"""
try:
if self.notification_client:
await self.notification_client.send_alert(
str(tenant_id),
{
"alert_type": alert["type"],
"severity": alert["severity"],
"message": alert["message"],
"source_entity_id": str(order_id),
"source_entity_type": "order"
}
)
except Exception as e:
logger.warning("Failed to send alert",
tenant_id=str(tenant_id),
error=str(e))

View File

@@ -14,12 +14,10 @@ import structlog
from shared.auth.decorators import get_current_user_dep, get_current_tenant_id_dep
from app.core.database import get_db
from app.services.production_service import ProductionService
from app.services.production_alert_service import ProductionAlertService
from app.schemas.production import (
ProductionBatchCreate, ProductionBatchUpdate, ProductionBatchStatusUpdate,
ProductionBatchResponse, ProductionBatchListResponse,
DailyProductionRequirements, ProductionDashboardSummary, ProductionMetrics,
ProductionAlertResponse, ProductionAlertListResponse
)
from app.core.config import settings
@@ -34,10 +32,6 @@ def get_production_service() -> ProductionService:
return ProductionService(database_manager, settings)
def get_production_alert_service() -> ProductionAlertService:
"""Dependency injection for production alert service"""
from app.core.database import database_manager
return ProductionAlertService(database_manager, settings)
# ================================================================
@@ -319,74 +313,6 @@ async def get_production_schedule(
raise HTTPException(status_code=500, detail="Failed to get production schedule")
# ================================================================
# ALERTS ENDPOINTS
# ================================================================
@router.get("/tenants/{tenant_id}/production/alerts", response_model=ProductionAlertListResponse)
async def get_production_alerts(
tenant_id: UUID = Path(...),
active_only: bool = Query(True, description="Return only active alerts"),
current_tenant: str = Depends(get_current_tenant_id_dep),
current_user: dict = Depends(get_current_user_dep),
alert_service: ProductionAlertService = Depends(get_production_alert_service)
):
"""Get production-related alerts"""
try:
if str(tenant_id) != current_tenant:
raise HTTPException(status_code=403, detail="Access denied to this tenant")
if active_only:
alerts = await alert_service.get_active_alerts(tenant_id)
else:
# Get all alerts (would need additional repo method)
alerts = await alert_service.get_active_alerts(tenant_id)
alert_responses = [ProductionAlertResponse.model_validate(alert) for alert in alerts]
logger.info("Retrieved production alerts",
count=len(alerts), tenant_id=str(tenant_id))
return ProductionAlertListResponse(
alerts=alert_responses,
total_count=len(alerts),
page=1,
page_size=len(alerts)
)
except Exception as e:
logger.error("Error getting production alerts",
error=str(e), tenant_id=str(tenant_id))
raise HTTPException(status_code=500, detail="Failed to get production alerts")
@router.post("/tenants/{tenant_id}/production/alerts/{alert_id}/acknowledge", response_model=ProductionAlertResponse)
async def acknowledge_alert(
tenant_id: UUID = Path(...),
alert_id: UUID = Path(...),
current_tenant: str = Depends(get_current_tenant_id_dep),
current_user: dict = Depends(get_current_user_dep),
alert_service: ProductionAlertService = Depends(get_production_alert_service)
):
"""Acknowledge a production-related alert"""
try:
if str(tenant_id) != current_tenant:
raise HTTPException(status_code=403, detail="Access denied to this tenant")
acknowledged_by = current_user.get("email", "unknown_user")
alert = await alert_service.acknowledge_alert(tenant_id, alert_id, acknowledged_by)
logger.info("Acknowledged production alert",
alert_id=str(alert_id),
acknowledged_by=acknowledged_by,
tenant_id=str(tenant_id))
return ProductionAlertResponse.model_validate(alert)
except Exception as e:
logger.error("Error acknowledging production alert",
error=str(e), alert_id=str(alert_id), tenant_id=str(tenant_id))
raise HTTPException(status_code=500, detail="Failed to acknowledge alert")
# ================================================================

View File

@@ -73,11 +73,6 @@ class ProductionSettings(BaseServiceSettings):
HOLIDAY_PRODUCTION_FACTOR: float = float(os.getenv("HOLIDAY_PRODUCTION_FACTOR", "0.3"))
SPECIAL_EVENT_PRODUCTION_FACTOR: float = float(os.getenv("SPECIAL_EVENT_PRODUCTION_FACTOR", "1.5"))
# Alert Thresholds
CAPACITY_EXCEEDED_THRESHOLD: float = float(os.getenv("CAPACITY_EXCEEDED_THRESHOLD", "1.0"))
PRODUCTION_DELAY_THRESHOLD_MINUTES: int = int(os.getenv("PRODUCTION_DELAY_THRESHOLD_MINUTES", "60"))
LOW_YIELD_ALERT_THRESHOLD: float = float(os.getenv("LOW_YIELD_ALERT_THRESHOLD", "0.80"))
URGENT_ORDER_THRESHOLD_HOURS: int = int(os.getenv("URGENT_ORDER_THRESHOLD_HOURS", "4"))
# Cost Management
COST_TRACKING_ENABLED: bool = os.getenv("COST_TRACKING_ENABLED", "true").lower() == "true"

View File

@@ -9,14 +9,12 @@ from .production import (
ProductionBatch,
ProductionSchedule,
ProductionCapacity,
QualityCheck,
ProductionAlert
QualityCheck
)
__all__ = [
"ProductionBatch",
"ProductionSchedule",
"ProductionCapacity",
"QualityCheck",
"ProductionAlert"
"QualityCheck"
]

View File

@@ -35,12 +35,6 @@ class ProductionPriority(str, enum.Enum):
URGENT = "urgent"
class AlertSeverity(str, enum.Enum):
"""Alert severity levels"""
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class ProductionBatch(Base):
@@ -391,81 +385,3 @@ class QualityCheck(Base):
}
class ProductionAlert(Base):
"""Production alert model for tracking production issues and notifications"""
__tablename__ = "production_alerts"
# Primary identification
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
# Alert classification
alert_type = Column(String(50), nullable=False, index=True) # capacity_exceeded, delay, quality_issue, etc.
severity = Column(SQLEnum(AlertSeverity), nullable=False, default=AlertSeverity.MEDIUM)
title = Column(String(255), nullable=False)
message = Column(Text, nullable=False)
# Context
batch_id = Column(UUID(as_uuid=True), nullable=True, index=True) # Associated batch if applicable
schedule_id = Column(UUID(as_uuid=True), nullable=True, index=True) # Associated schedule if applicable
source_system = Column(String(50), nullable=False, default="production")
# Status
is_active = Column(Boolean, default=True)
is_acknowledged = Column(Boolean, default=False)
is_resolved = Column(Boolean, default=False)
# Actions and recommendations
recommended_actions = Column(JSON, nullable=True) # List of suggested actions
actions_taken = Column(JSON, nullable=True) # List of actions actually taken
# Business impact
impact_level = Column(String(20), nullable=True) # low, medium, high, critical
estimated_cost_impact = Column(Float, nullable=True)
estimated_time_impact_minutes = Column(Integer, nullable=True)
# Resolution tracking
acknowledged_by = Column(String(100), nullable=True)
acknowledged_at = Column(DateTime(timezone=True), nullable=True)
resolved_by = Column(String(100), nullable=True)
resolved_at = Column(DateTime(timezone=True), nullable=True)
resolution_notes = Column(Text, nullable=True)
# Alert data
alert_data = Column(JSON, nullable=True) # Additional context data
alert_metadata = Column(JSON, nullable=True) # Metadata for the alert
# Timestamps
created_at = Column(DateTime(timezone=True), server_default=func.now())
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary following shared pattern"""
return {
"id": str(self.id),
"tenant_id": str(self.tenant_id),
"alert_type": self.alert_type,
"severity": self.severity.value if self.severity else None,
"title": self.title,
"message": self.message,
"batch_id": str(self.batch_id) if self.batch_id else None,
"schedule_id": str(self.schedule_id) if self.schedule_id else None,
"source_system": self.source_system,
"is_active": self.is_active,
"is_acknowledged": self.is_acknowledged,
"is_resolved": self.is_resolved,
"recommended_actions": self.recommended_actions,
"actions_taken": self.actions_taken,
"impact_level": self.impact_level,
"estimated_cost_impact": self.estimated_cost_impact,
"estimated_time_impact_minutes": self.estimated_time_impact_minutes,
"acknowledged_by": self.acknowledged_by,
"acknowledged_at": self.acknowledged_at.isoformat() if self.acknowledged_at else None,
"resolved_by": self.resolved_by,
"resolved_at": self.resolved_at.isoformat() if self.resolved_at else None,
"resolution_notes": self.resolution_notes,
"alert_data": self.alert_data,
"alert_metadata": self.alert_metadata,
"created_at": self.created_at.isoformat() if self.created_at else None,
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}

View File

@@ -9,12 +9,10 @@ from .production_batch_repository import ProductionBatchRepository
from .production_schedule_repository import ProductionScheduleRepository
from .production_capacity_repository import ProductionCapacityRepository
from .quality_check_repository import QualityCheckRepository
from .production_alert_repository import ProductionAlertRepository
__all__ = [
"ProductionBatchRepository",
"ProductionScheduleRepository",
"ProductionCapacityRepository",
"QualityCheckRepository",
"ProductionAlertRepository"
]

View File

@@ -1,379 +0,0 @@
"""
Production Alert Repository
Repository for production alert operations
"""
from typing import Optional, List, Dict, Any
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, and_, text, desc, func
from datetime import datetime, timedelta, date
from uuid import UUID
import structlog
from .base import ProductionBaseRepository
from app.models.production import ProductionAlert, AlertSeverity
from shared.database.exceptions import DatabaseError, ValidationError
from shared.database.transactions import transactional
logger = structlog.get_logger()
class ProductionAlertRepository(ProductionBaseRepository):
"""Repository for production alert operations"""
def __init__(self, session: AsyncSession, cache_ttl: Optional[int] = 60):
# Alerts are very dynamic, very short cache time (1 minute)
super().__init__(ProductionAlert, session, cache_ttl)
@transactional
async def create_alert(self, alert_data: Dict[str, Any]) -> ProductionAlert:
"""Create a new production alert with validation"""
try:
# Validate alert data
validation_result = self._validate_production_data(
alert_data,
["tenant_id", "alert_type", "title", "message"]
)
if not validation_result["is_valid"]:
raise ValidationError(f"Invalid alert data: {validation_result['errors']}")
# Set default values
if "severity" not in alert_data:
alert_data["severity"] = AlertSeverity.MEDIUM
if "source_system" not in alert_data:
alert_data["source_system"] = "production"
if "is_active" not in alert_data:
alert_data["is_active"] = True
if "is_acknowledged" not in alert_data:
alert_data["is_acknowledged"] = False
if "is_resolved" not in alert_data:
alert_data["is_resolved"] = False
# Create alert
alert = await self.create(alert_data)
logger.info("Production alert created successfully",
alert_id=str(alert.id),
alert_type=alert.alert_type,
severity=alert.severity.value if alert.severity else None,
tenant_id=str(alert.tenant_id))
return alert
except ValidationError:
raise
except Exception as e:
logger.error("Error creating production alert", error=str(e))
raise DatabaseError(f"Failed to create production alert: {str(e)}")
@transactional
async def get_active_alerts(
self,
tenant_id: str,
severity: Optional[AlertSeverity] = None
) -> List[ProductionAlert]:
"""Get active production alerts for a tenant"""
try:
filters = {
"tenant_id": tenant_id,
"is_active": True,
"is_resolved": False
}
if severity:
filters["severity"] = severity
alerts = await self.get_multi(
filters=filters,
order_by="created_at",
order_desc=True
)
logger.info("Retrieved active production alerts",
count=len(alerts),
severity=severity.value if severity else "all",
tenant_id=tenant_id)
return alerts
except Exception as e:
logger.error("Error fetching active alerts", error=str(e))
raise DatabaseError(f"Failed to fetch active alerts: {str(e)}")
@transactional
async def get_alerts_by_type(
self,
tenant_id: str,
alert_type: str,
include_resolved: bool = False
) -> List[ProductionAlert]:
"""Get production alerts by type"""
try:
filters = {
"tenant_id": tenant_id,
"alert_type": alert_type
}
if not include_resolved:
filters["is_resolved"] = False
alerts = await self.get_multi(
filters=filters,
order_by="created_at",
order_desc=True
)
logger.info("Retrieved alerts by type",
count=len(alerts),
alert_type=alert_type,
include_resolved=include_resolved,
tenant_id=tenant_id)
return alerts
except Exception as e:
logger.error("Error fetching alerts by type", error=str(e))
raise DatabaseError(f"Failed to fetch alerts by type: {str(e)}")
@transactional
async def get_alerts_by_batch(
self,
tenant_id: str,
batch_id: str
) -> List[ProductionAlert]:
"""Get production alerts for a specific batch"""
try:
alerts = await self.get_multi(
filters={
"tenant_id": tenant_id,
"batch_id": batch_id
},
order_by="created_at",
order_desc=True
)
logger.info("Retrieved alerts by batch",
count=len(alerts),
batch_id=batch_id,
tenant_id=tenant_id)
return alerts
except Exception as e:
logger.error("Error fetching alerts by batch", error=str(e))
raise DatabaseError(f"Failed to fetch alerts by batch: {str(e)}")
@transactional
async def acknowledge_alert(
self,
alert_id: UUID,
acknowledged_by: str,
acknowledgment_notes: Optional[str] = None
) -> ProductionAlert:
"""Acknowledge a production alert"""
try:
alert = await self.get(alert_id)
if not alert:
raise ValidationError(f"Alert {alert_id} not found")
if alert.is_acknowledged:
raise ValidationError("Alert is already acknowledged")
update_data = {
"is_acknowledged": True,
"acknowledged_by": acknowledged_by,
"acknowledged_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
if acknowledgment_notes:
current_actions = alert.actions_taken or []
current_actions.append({
"action": "acknowledged",
"by": acknowledged_by,
"at": datetime.utcnow().isoformat(),
"notes": acknowledgment_notes
})
update_data["actions_taken"] = current_actions
alert = await self.update(alert_id, update_data)
logger.info("Acknowledged production alert",
alert_id=str(alert_id),
acknowledged_by=acknowledged_by)
return alert
except ValidationError:
raise
except Exception as e:
logger.error("Error acknowledging alert", error=str(e))
raise DatabaseError(f"Failed to acknowledge alert: {str(e)}")
@transactional
async def resolve_alert(
self,
alert_id: UUID,
resolved_by: str,
resolution_notes: str
) -> ProductionAlert:
"""Resolve a production alert"""
try:
alert = await self.get(alert_id)
if not alert:
raise ValidationError(f"Alert {alert_id} not found")
if alert.is_resolved:
raise ValidationError("Alert is already resolved")
update_data = {
"is_resolved": True,
"is_active": False,
"resolved_by": resolved_by,
"resolved_at": datetime.utcnow(),
"resolution_notes": resolution_notes,
"updated_at": datetime.utcnow()
}
# Add to actions taken
current_actions = alert.actions_taken or []
current_actions.append({
"action": "resolved",
"by": resolved_by,
"at": datetime.utcnow().isoformat(),
"notes": resolution_notes
})
update_data["actions_taken"] = current_actions
alert = await self.update(alert_id, update_data)
logger.info("Resolved production alert",
alert_id=str(alert_id),
resolved_by=resolved_by)
return alert
except ValidationError:
raise
except Exception as e:
logger.error("Error resolving alert", error=str(e))
raise DatabaseError(f"Failed to resolve alert: {str(e)}")
@transactional
async def get_alert_statistics(
self,
tenant_id: str,
start_date: date,
end_date: date
) -> Dict[str, Any]:
"""Get alert statistics for a tenant and date range"""
try:
start_datetime = datetime.combine(start_date, datetime.min.time())
end_datetime = datetime.combine(end_date, datetime.max.time())
alerts = await self.get_multi(
filters={
"tenant_id": tenant_id,
"created_at__gte": start_datetime,
"created_at__lte": end_datetime
}
)
total_alerts = len(alerts)
active_alerts = len([a for a in alerts if a.is_active])
acknowledged_alerts = len([a for a in alerts if a.is_acknowledged])
resolved_alerts = len([a for a in alerts if a.is_resolved])
# Group by severity
by_severity = {}
for severity in AlertSeverity:
severity_alerts = [a for a in alerts if a.severity == severity]
by_severity[severity.value] = {
"total": len(severity_alerts),
"active": len([a for a in severity_alerts if a.is_active]),
"resolved": len([a for a in severity_alerts if a.is_resolved])
}
# Group by alert type
by_type = {}
for alert in alerts:
alert_type = alert.alert_type
if alert_type not in by_type:
by_type[alert_type] = {
"total": 0,
"active": 0,
"resolved": 0
}
by_type[alert_type]["total"] += 1
if alert.is_active:
by_type[alert_type]["active"] += 1
if alert.is_resolved:
by_type[alert_type]["resolved"] += 1
# Calculate resolution time statistics
resolved_with_times = [
a for a in alerts
if a.is_resolved and a.resolved_at and a.created_at
]
resolution_times = []
for alert in resolved_with_times:
resolution_time = (alert.resolved_at - alert.created_at).total_seconds() / 3600 # hours
resolution_times.append(resolution_time)
avg_resolution_time = sum(resolution_times) / len(resolution_times) if resolution_times else 0
return {
"period_start": start_date.isoformat(),
"period_end": end_date.isoformat(),
"total_alerts": total_alerts,
"active_alerts": active_alerts,
"acknowledged_alerts": acknowledged_alerts,
"resolved_alerts": resolved_alerts,
"acknowledgment_rate": round((acknowledged_alerts / total_alerts * 100) if total_alerts > 0 else 0, 2),
"resolution_rate": round((resolved_alerts / total_alerts * 100) if total_alerts > 0 else 0, 2),
"average_resolution_time_hours": round(avg_resolution_time, 2),
"by_severity": by_severity,
"by_alert_type": by_type,
"tenant_id": tenant_id
}
except Exception as e:
logger.error("Error calculating alert statistics", error=str(e))
raise DatabaseError(f"Failed to calculate alert statistics: {str(e)}")
@transactional
async def cleanup_old_resolved_alerts(
self,
tenant_id: str,
days_to_keep: int = 30
) -> int:
"""Clean up old resolved alerts"""
try:
cutoff_date = datetime.utcnow() - timedelta(days=days_to_keep)
old_alerts = await self.get_multi(
filters={
"tenant_id": tenant_id,
"is_resolved": True,
"resolved_at__lt": cutoff_date
}
)
deleted_count = 0
for alert in old_alerts:
await self.delete(alert.id)
deleted_count += 1
logger.info("Cleaned up old resolved alerts",
deleted_count=deleted_count,
days_to_keep=days_to_keep,
tenant_id=tenant_id)
return deleted_count
except Exception as e:
logger.error("Error cleaning up old alerts", error=str(e))
raise DatabaseError(f"Failed to clean up old alerts: {str(e)}")

View File

@@ -31,12 +31,6 @@ class ProductionPriorityEnum(str, Enum):
URGENT = "urgent"
class AlertSeverityEnum(str, Enum):
"""Alert severity levels for API"""
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
# ================================================================
@@ -280,61 +274,6 @@ class QualityCheckResponse(BaseModel):
from_attributes = True
# ================================================================
# PRODUCTION ALERT SCHEMAS
# ================================================================
class ProductionAlertBase(BaseModel):
"""Base schema for production alert"""
alert_type: str = Field(..., min_length=1, max_length=50)
severity: AlertSeverityEnum = AlertSeverityEnum.MEDIUM
title: str = Field(..., min_length=1, max_length=255)
message: str = Field(..., min_length=1)
batch_id: Optional[UUID] = None
schedule_id: Optional[UUID] = None
class ProductionAlertCreate(ProductionAlertBase):
"""Schema for creating a production alert"""
recommended_actions: Optional[List[str]] = None
impact_level: Optional[str] = Field(None, pattern="^(low|medium|high|critical)$")
estimated_cost_impact: Optional[float] = Field(None, ge=0)
estimated_time_impact_minutes: Optional[int] = Field(None, ge=0)
alert_data: Optional[Dict[str, Any]] = None
alert_metadata: Optional[Dict[str, Any]] = None
class ProductionAlertResponse(BaseModel):
"""Schema for production alert response"""
id: UUID
tenant_id: UUID
alert_type: str
severity: AlertSeverityEnum
title: str
message: str
batch_id: Optional[UUID]
schedule_id: Optional[UUID]
source_system: str
is_active: bool
is_acknowledged: bool
is_resolved: bool
recommended_actions: Optional[List[str]]
actions_taken: Optional[List[Dict[str, Any]]]
impact_level: Optional[str]
estimated_cost_impact: Optional[float]
estimated_time_impact_minutes: Optional[int]
acknowledged_by: Optional[str]
acknowledged_at: Optional[datetime]
resolved_by: Optional[str]
resolved_at: Optional[datetime]
resolution_notes: Optional[str]
alert_data: Optional[Dict[str, Any]]
alert_metadata: Optional[Dict[str, Any]]
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
# ================================================================
@@ -346,7 +285,6 @@ class ProductionDashboardSummary(BaseModel):
active_batches: int
todays_production_plan: List[Dict[str, Any]]
capacity_utilization: float
current_alerts: int
on_time_completion_rate: float
average_quality_score: float
total_output_today: float
@@ -406,9 +344,3 @@ class QualityCheckListResponse(BaseModel):
page_size: int
class ProductionAlertListResponse(BaseModel):
"""Schema for production alert list response"""
alerts: List[ProductionAlertResponse]
total_count: int
page: int
page_size: int

View File

@@ -6,9 +6,7 @@ Business logic services
"""
from .production_service import ProductionService
from .production_alert_service import ProductionAlertService
__all__ = [
"ProductionService",
"ProductionAlertService"
"ProductionService"
]

View File

@@ -1,435 +0,0 @@
"""
Production Alert Service
Business logic for production alerts and notifications
"""
from typing import Optional, List, Dict, Any
from datetime import datetime, date, timedelta
from uuid import UUID
import structlog
from shared.database.transactions import transactional
from shared.notifications.alert_integration import AlertIntegration
from shared.config.base import BaseServiceSettings
from app.repositories.production_alert_repository import ProductionAlertRepository
from app.repositories.production_batch_repository import ProductionBatchRepository
from app.repositories.production_capacity_repository import ProductionCapacityRepository
from app.models.production import ProductionAlert, AlertSeverity, ProductionStatus
from app.schemas.production import ProductionAlertCreate
logger = structlog.get_logger()
class ProductionAlertService:
"""Production alert service with comprehensive monitoring"""
def __init__(self, database_manager, config: BaseServiceSettings):
self.database_manager = database_manager
self.config = config
self.alert_integration = AlertIntegration()
@transactional
async def check_production_capacity_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Monitor production capacity and generate alerts"""
alerts = []
try:
async with self.database_manager.get_session() as session:
batch_repo = ProductionBatchRepository(session)
capacity_repo = ProductionCapacityRepository(session)
alert_repo = ProductionAlertRepository(session)
today = date.today()
# Check capacity exceeded alert
todays_batches = await batch_repo.get_batches_by_date_range(
str(tenant_id), today, today
)
# Calculate total planned hours for today
total_planned_hours = sum(
batch.planned_duration_minutes / 60
for batch in todays_batches
if batch.status != ProductionStatus.CANCELLED
)
# Get available capacity
available_capacity = await capacity_repo.get_capacity_utilization_summary(
str(tenant_id), today, today
)
total_capacity = available_capacity.get("total_capacity_units", 8.0)
if total_planned_hours > total_capacity:
excess_hours = total_planned_hours - total_capacity
alert_data = ProductionAlertCreate(
alert_type="production_capacity_exceeded",
severity=AlertSeverity.HIGH,
title="Capacidad de Producción Excedida",
message=f"🔥 Capacidad excedida: {excess_hours:.1f}h extra necesarias para completar la producción de hoy",
recommended_actions=[
"reschedule_batches",
"outsource_production",
"adjust_menu",
"extend_working_hours"
],
impact_level="high",
estimated_time_impact_minutes=int(excess_hours * 60),
alert_data={
"excess_hours": excess_hours,
"total_planned_hours": total_planned_hours,
"available_capacity_hours": total_capacity,
"affected_batches": len(todays_batches)
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check production delay alert
current_time = datetime.utcnow()
cutoff_time = current_time + timedelta(hours=4) # 4 hours ahead
urgent_batches = await batch_repo.get_urgent_batches(str(tenant_id), 4)
delayed_batches = [
batch for batch in urgent_batches
if batch.planned_start_time <= current_time and batch.status == ProductionStatus.PENDING
]
for batch in delayed_batches:
delay_minutes = int((current_time - batch.planned_start_time).total_seconds() / 60)
if delay_minutes > self.config.PRODUCTION_DELAY_THRESHOLD_MINUTES:
alert_data = ProductionAlertCreate(
alert_type="production_delay",
severity=AlertSeverity.HIGH,
title="Retraso en Producción",
message=f"⏰ Retraso: {batch.product_name} debía haber comenzado hace {delay_minutes} minutos",
batch_id=batch.id,
recommended_actions=[
"start_production_immediately",
"notify_staff",
"prepare_alternatives",
"update_customers"
],
impact_level="high",
estimated_time_impact_minutes=delay_minutes,
alert_data={
"batch_number": batch.batch_number,
"product_name": batch.product_name,
"planned_start_time": batch.planned_start_time.isoformat(),
"delay_minutes": delay_minutes,
"affects_opening": delay_minutes > 120 # 2 hours
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check cost spike alert
high_cost_batches = [
batch for batch in todays_batches
if batch.estimated_cost and batch.estimated_cost > 100 # Threshold
]
if high_cost_batches:
total_high_cost = sum(batch.estimated_cost for batch in high_cost_batches)
alert_data = ProductionAlertCreate(
alert_type="production_cost_spike",
severity=AlertSeverity.MEDIUM,
title="Costos de Producción Elevados",
message=f"💰 Costos altos detectados: {len(high_cost_batches)} lotes con costo total de {total_high_cost:.2f}",
recommended_actions=[
"review_ingredient_costs",
"optimize_recipe",
"negotiate_supplier_prices",
"adjust_menu_pricing"
],
impact_level="medium",
estimated_cost_impact=total_high_cost,
alert_data={
"high_cost_batches": len(high_cost_batches),
"total_cost": total_high_cost,
"average_cost": total_high_cost / len(high_cost_batches),
"affected_products": [batch.product_name for batch in high_cost_batches]
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Send alerts using notification service
await self._send_alerts(tenant_id, alerts)
return alerts
except Exception as e:
logger.error("Error checking production capacity alerts",
error=str(e), tenant_id=str(tenant_id))
return []
@transactional
async def check_quality_control_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Monitor quality control issues and generate alerts"""
alerts = []
try:
async with self.database_manager.get_session() as session:
alert_repo = ProductionAlertRepository(session)
batch_repo = ProductionBatchRepository(session)
# Check for batches with low yield
last_week = date.today() - timedelta(days=7)
recent_batches = await batch_repo.get_batches_by_date_range(
str(tenant_id), last_week, date.today(), ProductionStatus.COMPLETED
)
low_yield_batches = [
batch for batch in recent_batches
if batch.yield_percentage and batch.yield_percentage < self.config.LOW_YIELD_ALERT_THRESHOLD * 100
]
if low_yield_batches:
avg_yield = sum(batch.yield_percentage for batch in low_yield_batches) / len(low_yield_batches)
alert_data = ProductionAlertCreate(
alert_type="low_yield_detected",
severity=AlertSeverity.MEDIUM,
title="Rendimiento Bajo Detectado",
message=f"📉 Rendimiento bajo: {len(low_yield_batches)} lotes con rendimiento promedio {avg_yield:.1f}%",
recommended_actions=[
"review_recipes",
"check_ingredient_quality",
"training_staff",
"equipment_calibration"
],
impact_level="medium",
alert_data={
"low_yield_batches": len(low_yield_batches),
"average_yield": avg_yield,
"threshold": self.config.LOW_YIELD_ALERT_THRESHOLD * 100,
"affected_products": list(set(batch.product_name for batch in low_yield_batches))
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check for recurring quality issues
quality_issues = [
batch for batch in recent_batches
if batch.quality_score and batch.quality_score < self.config.QUALITY_SCORE_THRESHOLD
]
if len(quality_issues) >= 3: # 3 or more quality issues in a week
avg_quality = sum(batch.quality_score for batch in quality_issues) / len(quality_issues)
alert_data = ProductionAlertCreate(
alert_type="recurring_quality_issues",
severity=AlertSeverity.HIGH,
title="Problemas de Calidad Recurrentes",
message=f"⚠️ Problemas de calidad: {len(quality_issues)} lotes con calidad promedio {avg_quality:.1f}/10",
recommended_actions=[
"quality_audit",
"staff_retraining",
"equipment_maintenance",
"supplier_review"
],
impact_level="high",
alert_data={
"quality_issues_count": len(quality_issues),
"average_quality_score": avg_quality,
"threshold": self.config.QUALITY_SCORE_THRESHOLD,
"trend": "declining"
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Send alerts
await self._send_alerts(tenant_id, alerts)
return alerts
except Exception as e:
logger.error("Error checking quality control alerts",
error=str(e), tenant_id=str(tenant_id))
return []
@transactional
async def check_equipment_maintenance_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Monitor equipment status and generate maintenance alerts"""
alerts = []
try:
async with self.database_manager.get_session() as session:
capacity_repo = ProductionCapacityRepository(session)
alert_repo = ProductionAlertRepository(session)
# Get equipment that needs maintenance
today = date.today()
equipment_capacity = await capacity_repo.get_multi(
filters={
"tenant_id": str(tenant_id),
"resource_type": "equipment",
"date": today
}
)
for equipment in equipment_capacity:
# Check if maintenance is overdue
if equipment.last_maintenance_date:
days_since_maintenance = (today - equipment.last_maintenance_date.date()).days
if days_since_maintenance > 30: # 30 days threshold
alert_data = ProductionAlertCreate(
alert_type="equipment_maintenance_overdue",
severity=AlertSeverity.MEDIUM,
title="Mantenimiento de Equipo Vencido",
message=f"🔧 Mantenimiento vencido: {equipment.resource_name} - {days_since_maintenance} días sin mantenimiento",
recommended_actions=[
"schedule_maintenance",
"equipment_inspection",
"backup_equipment_ready"
],
impact_level="medium",
alert_data={
"equipment_id": equipment.resource_id,
"equipment_name": equipment.resource_name,
"days_since_maintenance": days_since_maintenance,
"last_maintenance": equipment.last_maintenance_date.isoformat() if equipment.last_maintenance_date else None
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check equipment efficiency
if equipment.efficiency_rating and equipment.efficiency_rating < 0.8: # 80% threshold
alert_data = ProductionAlertCreate(
alert_type="equipment_efficiency_low",
severity=AlertSeverity.MEDIUM,
title="Eficiencia de Equipo Baja",
message=f"📊 Eficiencia baja: {equipment.resource_name} operando al {equipment.efficiency_rating*100:.1f}%",
recommended_actions=[
"equipment_calibration",
"maintenance_check",
"replace_parts"
],
impact_level="medium",
alert_data={
"equipment_id": equipment.resource_id,
"equipment_name": equipment.resource_name,
"efficiency_rating": equipment.efficiency_rating,
"threshold": 0.8
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Send alerts
await self._send_alerts(tenant_id, alerts)
return alerts
except Exception as e:
logger.error("Error checking equipment maintenance alerts",
error=str(e), tenant_id=str(tenant_id))
return []
async def _send_alerts(self, tenant_id: UUID, alerts: List[ProductionAlert]):
"""Send alerts using notification service with proper urgency handling"""
try:
for alert in alerts:
# Determine delivery channels based on severity
channels = self._get_channels_by_severity(alert.severity)
# Send notification using alert integration
await self.alert_integration.send_alert(
tenant_id=str(tenant_id),
message=alert.message,
alert_type=alert.alert_type,
severity=alert.severity.value,
channels=channels,
data={
"actions": alert.recommended_actions or [],
"alert_id": str(alert.id)
}
)
logger.info("Sent production alert notification",
alert_id=str(alert.id),
alert_type=alert.alert_type,
severity=alert.severity.value,
channels=channels)
except Exception as e:
logger.error("Error sending alert notifications",
error=str(e), tenant_id=str(tenant_id))
def _get_channels_by_severity(self, severity: AlertSeverity) -> List[str]:
"""Map severity to delivery channels following user-centric analysis"""
if severity == AlertSeverity.CRITICAL:
return ["whatsapp", "email", "dashboard", "sms"]
elif severity == AlertSeverity.HIGH:
return ["whatsapp", "email", "dashboard"]
elif severity == AlertSeverity.MEDIUM:
return ["email", "dashboard"]
else:
return ["dashboard"]
@transactional
async def get_active_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Get all active production alerts for a tenant"""
try:
async with self.database_manager.get_session() as session:
alert_repo = ProductionAlertRepository(session)
return await alert_repo.get_active_alerts(str(tenant_id))
except Exception as e:
logger.error("Error getting active alerts",
error=str(e), tenant_id=str(tenant_id))
return []
@transactional
async def acknowledge_alert(
self,
tenant_id: UUID,
alert_id: UUID,
acknowledged_by: str
) -> ProductionAlert:
"""Acknowledge a production alert"""
try:
async with self.database_manager.get_session() as session:
alert_repo = ProductionAlertRepository(session)
return await alert_repo.acknowledge_alert(alert_id, acknowledged_by)
except Exception as e:
logger.error("Error acknowledging alert",
error=str(e), alert_id=str(alert_id), tenant_id=str(tenant_id))
raise

View File

@@ -205,7 +205,6 @@ class ProductionService:
active_batches=len(active_batches),
todays_production_plan=todays_plan,
capacity_utilization=85.0, # TODO: Calculate from actual capacity data
current_alerts=0, # TODO: Get from alerts
on_time_completion_rate=weekly_metrics.get("on_time_completion_rate", 0),
average_quality_score=8.5, # TODO: Get from quality checks
total_output_today=sum(b.actual_quantity or 0 for b in todays_batches),