New alert service
This commit is contained in:
@@ -1 +0,0 @@
|
||||
# shared/alerts/__init__.py
|
||||
@@ -1,573 +0,0 @@
|
||||
# shared/alerts/base_service.py
|
||||
"""
|
||||
Base alert service pattern for all microservices
|
||||
Supports both alerts and recommendations through unified detection patterns
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import random
|
||||
import uuid
|
||||
from typing import List, Dict, Any, Optional
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timedelta
|
||||
import structlog
|
||||
from redis.asyncio import Redis
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
|
||||
from shared.messaging.rabbitmq import RabbitMQClient
|
||||
from shared.database.base import DatabaseManager
|
||||
from shared.config.rabbitmq_config import get_routing_key
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
class BaseAlertService:
|
||||
"""
|
||||
Base class for service-specific alert and recommendation detection
|
||||
Implements hybrid detection patterns: scheduled jobs, event-driven, and database triggers
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.db_manager = DatabaseManager(config.DATABASE_URL)
|
||||
self.rabbitmq_client = RabbitMQClient(config.RABBITMQ_URL, config.SERVICE_NAME)
|
||||
self.redis = None
|
||||
self.scheduler = AsyncIOScheduler()
|
||||
self.is_leader = False
|
||||
self.exchange = "alerts.exchange"
|
||||
|
||||
# Metrics
|
||||
self._items_published = 0
|
||||
self._checks_performed = 0
|
||||
self._errors_count = 0
|
||||
|
||||
async def start(self):
|
||||
"""Initialize all detection mechanisms"""
|
||||
try:
|
||||
# Connect to Redis for leader election and deduplication
|
||||
# Use the shared Redis URL which includes TLS configuration
|
||||
from redis.asyncio import from_url
|
||||
redis_url = self.config.REDIS_URL
|
||||
|
||||
# Create Redis client from URL (supports TLS via rediss:// protocol)
|
||||
# For self-signed certificates, disable SSL verification
|
||||
redis_kwargs = {
|
||||
'decode_responses': True,
|
||||
'max_connections': 20
|
||||
}
|
||||
|
||||
# If using SSL/TLS, add SSL parameters to handle self-signed certificates
|
||||
if redis_url.startswith('rediss://'):
|
||||
redis_kwargs.update({
|
||||
'ssl_cert_reqs': None, # Disable certificate verification
|
||||
'ssl_ca_certs': None, # Don't require CA certificates
|
||||
'ssl_certfile': None, # Don't require client cert
|
||||
'ssl_keyfile': None # Don't require client key
|
||||
})
|
||||
|
||||
self.redis = await from_url(redis_url, **redis_kwargs)
|
||||
logger.info("Connected to Redis", service=self.config.SERVICE_NAME, redis_url=redis_url.split("@")[-1])
|
||||
|
||||
# Connect to RabbitMQ
|
||||
await self.rabbitmq_client.connect()
|
||||
logger.info("Connected to RabbitMQ", service=self.config.SERVICE_NAME)
|
||||
|
||||
# Start leader election for scheduled jobs
|
||||
asyncio.create_task(self.maintain_leadership())
|
||||
|
||||
# Setup scheduled checks (runs only on leader)
|
||||
self.setup_scheduled_checks()
|
||||
|
||||
# Start database listener (runs on all instances)
|
||||
await self.start_database_listener()
|
||||
|
||||
# Start event listener (runs on all instances)
|
||||
await self.start_event_listener()
|
||||
|
||||
logger.info("Alert service started", service=self.config.SERVICE_NAME)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to start alert service", service=self.config.SERVICE_NAME, error=str(e))
|
||||
raise
|
||||
|
||||
async def stop(self):
|
||||
"""Clean shutdown"""
|
||||
try:
|
||||
# Stop scheduler
|
||||
if self.scheduler.running:
|
||||
self.scheduler.shutdown()
|
||||
|
||||
# Close connections
|
||||
if self.redis:
|
||||
await self.redis.aclose() # Use aclose() for modern Redis client
|
||||
|
||||
await self.rabbitmq_client.disconnect()
|
||||
|
||||
logger.info("Alert service stopped", service=self.config.SERVICE_NAME)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error stopping alert service", service=self.config.SERVICE_NAME, error=str(e))
|
||||
|
||||
# PATTERN 1: Scheduled Background Jobs
|
||||
def setup_scheduled_checks(self):
|
||||
"""Configure scheduled alert checks - Override in service"""
|
||||
raise NotImplementedError("Subclasses must implement setup_scheduled_checks")
|
||||
|
||||
async def maintain_leadership(self):
|
||||
"""Leader election for scheduled jobs"""
|
||||
lock_key = f"scheduler_lock:{self.config.SERVICE_NAME}"
|
||||
lock_ttl = 60
|
||||
# Generate instance_id once for the lifetime of this leadership loop
|
||||
# IMPORTANT: Don't regenerate on each iteration or lock extension will always fail!
|
||||
instance_id = getattr(self.config, 'INSTANCE_ID', str(uuid.uuid4()))
|
||||
|
||||
logger.info("DEBUG: maintain_leadership starting",
|
||||
service=self.config.SERVICE_NAME,
|
||||
instance_id=instance_id,
|
||||
redis_client_type=str(type(self.redis)))
|
||||
|
||||
while True:
|
||||
try:
|
||||
was_leader = self.is_leader
|
||||
|
||||
# Add jitter to avoid thundering herd when multiple instances start
|
||||
if not was_leader:
|
||||
await asyncio.sleep(random.uniform(0.1, 0.5)) # Small random delay before attempting to acquire
|
||||
|
||||
# Try to acquire new leadership if not currently leader
|
||||
if not self.is_leader:
|
||||
# Use atomic Redis operation to acquire lock
|
||||
result = await self.redis.set(
|
||||
lock_key,
|
||||
instance_id,
|
||||
ex=lock_ttl,
|
||||
nx=True # Only set if key doesn't exist
|
||||
)
|
||||
acquired = result is not None
|
||||
self.is_leader = acquired
|
||||
else:
|
||||
# Already leader - try to extend the lock atomically
|
||||
# Use SET with EX and GET to atomically refresh the lock
|
||||
try:
|
||||
# SET key value EX ttl GET returns the old value (atomic check-and-set)
|
||||
# This is atomic and works in both standalone and cluster mode
|
||||
old_value = await self.redis.set(
|
||||
lock_key,
|
||||
instance_id,
|
||||
ex=lock_ttl,
|
||||
get=True # Return old value (Python redis uses 'get' param for GET option)
|
||||
)
|
||||
# If old value matches our instance_id, we successfully extended
|
||||
self.is_leader = old_value == instance_id
|
||||
if self.is_leader:
|
||||
logger.debug("Lock extended successfully",
|
||||
service=self.config.SERVICE_NAME,
|
||||
instance_id=instance_id,
|
||||
ttl=lock_ttl)
|
||||
else:
|
||||
# Lock was taken by someone else or expired
|
||||
logger.info("Lost lock ownership during extension",
|
||||
service=self.config.SERVICE_NAME,
|
||||
old_owner=old_value,
|
||||
instance_id=instance_id)
|
||||
except Exception as e:
|
||||
# If extend fails, try to verify we still have the lock
|
||||
logger.warning("Failed to extend lock, verifying ownership",
|
||||
service=self.config.SERVICE_NAME,
|
||||
error=str(e))
|
||||
current_check = await self.redis.get(lock_key)
|
||||
self.is_leader = current_check == instance_id
|
||||
|
||||
# Handle leadership changes
|
||||
if self.is_leader and not was_leader:
|
||||
# Add a small delay to allow other instances to detect leadership change
|
||||
await asyncio.sleep(0.1)
|
||||
if self.is_leader: # Double-check we're still the leader
|
||||
self.scheduler.start()
|
||||
logger.info("Acquired scheduler leadership", service=self.config.SERVICE_NAME)
|
||||
elif not self.is_leader and was_leader:
|
||||
if self.scheduler.running:
|
||||
self.scheduler.shutdown()
|
||||
logger.info("Lost scheduler leadership", service=self.config.SERVICE_NAME)
|
||||
|
||||
# Add jitter to reduce contention between instances
|
||||
await asyncio.sleep(lock_ttl // 2 + random.uniform(0, 2))
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
logger.error("Leadership error",
|
||||
service=self.config.SERVICE_NAME,
|
||||
error=str(e),
|
||||
error_type=type(e).__name__,
|
||||
traceback=traceback.format_exc())
|
||||
self.is_leader = False
|
||||
await asyncio.sleep(5)
|
||||
|
||||
# PATTERN 2: Event-Driven Detection
|
||||
async def start_event_listener(self):
|
||||
"""Listen for business events - Override in service"""
|
||||
pass
|
||||
|
||||
# PATTERN 3: Database Triggers
|
||||
async def start_database_listener(self):
|
||||
"""Listen for database notifications with connection management"""
|
||||
try:
|
||||
import asyncpg
|
||||
# Convert SQLAlchemy URL format to plain PostgreSQL for asyncpg
|
||||
database_url = self.config.DATABASE_URL
|
||||
if database_url.startswith('postgresql+asyncpg://'):
|
||||
database_url = database_url.replace('postgresql+asyncpg://', 'postgresql://')
|
||||
|
||||
# Add connection timeout and retry logic
|
||||
max_retries = 3
|
||||
retry_count = 0
|
||||
conn = None
|
||||
|
||||
while retry_count < max_retries and not conn:
|
||||
try:
|
||||
conn = await asyncio.wait_for(
|
||||
asyncpg.connect(database_url),
|
||||
timeout=10.0
|
||||
)
|
||||
break
|
||||
except (asyncio.TimeoutError, Exception) as e:
|
||||
retry_count += 1
|
||||
if retry_count < max_retries:
|
||||
logger.warning(f"DB listener connection attempt {retry_count} failed, retrying...",
|
||||
service=self.config.SERVICE_NAME, error=str(e))
|
||||
await asyncio.sleep(2)
|
||||
else:
|
||||
raise
|
||||
|
||||
if conn:
|
||||
# Register listeners based on service
|
||||
await self.register_db_listeners(conn)
|
||||
logger.info("Database listeners registered", service=self.config.SERVICE_NAME)
|
||||
|
||||
# Keep connection alive with periodic ping
|
||||
asyncio.create_task(self._maintain_db_connection(conn))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to setup database listeners", service=self.config.SERVICE_NAME, error=str(e))
|
||||
|
||||
async def _maintain_db_connection(self, conn):
|
||||
"""Maintain database connection for listeners"""
|
||||
try:
|
||||
while not conn.is_closed():
|
||||
# Use a timeout to avoid hanging indefinitely
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
conn.fetchval("SELECT 1"),
|
||||
timeout=5.0
|
||||
)
|
||||
await asyncio.sleep(30) # Check every 30 seconds
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning("DB ping timed out, connection may be dead", service=self.config.SERVICE_NAME)
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error("DB listener connection lost", service=self.config.SERVICE_NAME, error=str(e))
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error("Error maintaining DB connection", service=self.config.SERVICE_NAME, error=str(e))
|
||||
|
||||
async def register_db_listeners(self, conn):
|
||||
"""Register database listeners - Override in service"""
|
||||
pass
|
||||
|
||||
# Publishing (Updated for type)
|
||||
async def publish_item(self, tenant_id: UUID, item: Dict[str, Any], item_type: str = 'alert'):
|
||||
"""Publish alert or recommendation to RabbitMQ with deduplication and validation"""
|
||||
|
||||
try:
|
||||
# Validate alert structure before publishing
|
||||
from shared.schemas.alert_types import RawAlert
|
||||
try:
|
||||
raw_alert = RawAlert(
|
||||
tenant_id=str(tenant_id),
|
||||
alert_type=item.get('type'),
|
||||
title=item.get('title'),
|
||||
message=item.get('message'),
|
||||
service=self.config.SERVICE_NAME,
|
||||
actions=item.get('actions', []),
|
||||
alert_metadata=item.get('metadata', {}),
|
||||
item_type=item_type
|
||||
)
|
||||
# Validation passed, continue with validated data
|
||||
logger.debug("Alert schema validation passed",
|
||||
service=self.config.SERVICE_NAME,
|
||||
alert_type=item.get('type'))
|
||||
except Exception as validation_error:
|
||||
logger.error("Alert schema validation failed",
|
||||
service=self.config.SERVICE_NAME,
|
||||
alert_type=item.get('type'),
|
||||
error=str(validation_error))
|
||||
self._errors_count += 1
|
||||
return False
|
||||
|
||||
# Generate proper deduplication key based on alert type and specific identifiers
|
||||
unique_id = self._generate_unique_identifier(item)
|
||||
item_key = f"{tenant_id}:{item_type}:{item['type']}:{unique_id}"
|
||||
|
||||
if await self.is_duplicate_item(item_key):
|
||||
logger.debug("Duplicate item skipped",
|
||||
service=self.config.SERVICE_NAME,
|
||||
item_type=item_type,
|
||||
alert_type=item['type'],
|
||||
dedup_key=item_key)
|
||||
return False
|
||||
|
||||
# Add metadata
|
||||
item['id'] = str(uuid.uuid4())
|
||||
item['tenant_id'] = str(tenant_id)
|
||||
item['service'] = self.config.SERVICE_NAME
|
||||
item['timestamp'] = datetime.utcnow().isoformat()
|
||||
item['item_type'] = item_type # 'alert' or 'recommendation'
|
||||
|
||||
# Determine routing key based on severity and type
|
||||
routing_key = get_routing_key(item_type, item['severity'], self.config.SERVICE_NAME)
|
||||
|
||||
# Publish to RabbitMQ with timeout to prevent blocking
|
||||
try:
|
||||
success = await asyncio.wait_for(
|
||||
self.rabbitmq_client.publish_event(
|
||||
exchange_name=self.exchange,
|
||||
routing_key=routing_key,
|
||||
event_data=item
|
||||
),
|
||||
timeout=10.0 # 10 second timeout
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.error("RabbitMQ publish timed out",
|
||||
service=self.config.SERVICE_NAME,
|
||||
item_type=item_type,
|
||||
alert_type=item['type'])
|
||||
return False
|
||||
|
||||
if success:
|
||||
self._items_published += 1
|
||||
logger.info("Item published successfully",
|
||||
service=self.config.SERVICE_NAME,
|
||||
item_type=item_type,
|
||||
alert_type=item['type'],
|
||||
severity=item['severity'],
|
||||
routing_key=routing_key)
|
||||
else:
|
||||
self._errors_count += 1
|
||||
logger.error("Failed to publish item",
|
||||
service=self.config.SERVICE_NAME,
|
||||
item_type=item_type,
|
||||
alert_type=item['type'])
|
||||
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
self._errors_count += 1
|
||||
logger.error("Error publishing item",
|
||||
service=self.config.SERVICE_NAME,
|
||||
error=str(e),
|
||||
item_type=item_type)
|
||||
return False
|
||||
|
||||
def _generate_unique_identifier(self, item: Dict[str, Any]) -> str:
|
||||
"""Generate unique identifier for deduplication based on alert type and content"""
|
||||
alert_type = item.get('type', '')
|
||||
metadata = item.get('metadata', {})
|
||||
|
||||
# Generate unique identifier based on alert type
|
||||
# Inventory alerts
|
||||
if alert_type == 'overstock_warning':
|
||||
return metadata.get('ingredient_id', '')
|
||||
elif alert_type == 'critical_stock_shortage' or alert_type == 'low_stock_warning':
|
||||
return metadata.get('ingredient_id', '')
|
||||
elif alert_type == 'expired_products':
|
||||
# For expired products alerts, create hash of all expired item IDs
|
||||
expired_items = metadata.get('expired_items', [])
|
||||
if expired_items:
|
||||
expired_ids = sorted([str(item.get('id', '')) for item in expired_items])
|
||||
import hashlib
|
||||
return hashlib.md5(':'.join(expired_ids).encode()).hexdigest()[:16]
|
||||
return ''
|
||||
elif alert_type == 'urgent_expiry':
|
||||
return f"{metadata.get('ingredient_id', '')}:{metadata.get('stock_id', '')}"
|
||||
elif alert_type == 'temperature_breach':
|
||||
return f"{metadata.get('sensor_id', '')}:{metadata.get('location', '')}"
|
||||
elif alert_type == 'stock_depleted_by_order':
|
||||
return f"{metadata.get('order_id', '')}:{metadata.get('ingredient_id', '')}"
|
||||
elif alert_type == 'expired_batches_auto_processed':
|
||||
# Use processing date and total batches as identifier
|
||||
processing_date = metadata.get('processing_date', '')[:10] # Date only
|
||||
total_batches = metadata.get('total_batches_processed', 0)
|
||||
return f"{processing_date}:{total_batches}"
|
||||
elif alert_type == 'inventory_optimization':
|
||||
return f"opt:{metadata.get('ingredient_id', '')}:{metadata.get('recommendation_type', '')}"
|
||||
elif alert_type == 'waste_reduction':
|
||||
return f"waste:{metadata.get('ingredient_id', '')}"
|
||||
|
||||
# Procurement alerts
|
||||
elif alert_type == 'procurement_pos_pending_approval':
|
||||
# Use hash of PO IDs for grouped alerts
|
||||
pos = metadata.get('pos', [])
|
||||
if pos:
|
||||
po_ids = sorted([str(po.get('po_id', '')) for po in pos])
|
||||
import hashlib
|
||||
return hashlib.md5(':'.join(po_ids).encode()).hexdigest()[:16]
|
||||
return ''
|
||||
elif alert_type == 'procurement_approval_reminder':
|
||||
return metadata.get('po_id', '')
|
||||
elif alert_type == 'procurement_critical_po':
|
||||
return metadata.get('po_id', '')
|
||||
elif alert_type == 'procurement_po_approved':
|
||||
return metadata.get('po_id', '')
|
||||
elif alert_type == 'procurement_auto_approval_summary':
|
||||
# Daily summary - use date as identifier
|
||||
summary_date = metadata.get('summary_date', '')[:10] # Date only
|
||||
return f"summary:{summary_date}"
|
||||
|
||||
# Production alerts
|
||||
elif alert_type in ['severe_capacity_overload', 'capacity_overload', 'near_capacity']:
|
||||
return f"capacity:{metadata.get('planned_date', '')}"
|
||||
elif alert_type == 'production_delay':
|
||||
return metadata.get('batch_id', '')
|
||||
elif alert_type == 'quality_control_failure':
|
||||
return metadata.get('quality_check_id', '')
|
||||
elif alert_type in ['equipment_failure', 'maintenance_required', 'low_equipment_efficiency']:
|
||||
return metadata.get('equipment_id', '')
|
||||
elif alert_type == 'production_ingredient_shortage':
|
||||
return metadata.get('ingredient_id', '')
|
||||
|
||||
# Forecasting alerts
|
||||
elif alert_type in ['demand_surge_weekend', 'holiday_preparation', 'demand_spike_detected', 'unexpected_demand_spike']:
|
||||
return f"{alert_type}:{metadata.get('product_name', '')}:{metadata.get('forecast_date', '')}"
|
||||
elif alert_type == 'weather_impact_alert':
|
||||
return f"weather:{metadata.get('forecast_date', '')}"
|
||||
elif alert_type == 'severe_weather_impact':
|
||||
return f"severe_weather:{metadata.get('weather_type', '')}:{metadata.get('duration_hours', '')}"
|
||||
|
||||
else:
|
||||
# Fallback to generic metadata.id or empty string
|
||||
return metadata.get('id', '')
|
||||
|
||||
async def is_duplicate_item(self, item_key: str, window_minutes: int = 15) -> bool:
|
||||
"""Prevent duplicate items within time window"""
|
||||
key = f"item_sent:{item_key}"
|
||||
try:
|
||||
result = await self.redis.set(
|
||||
key, "1",
|
||||
ex=window_minutes * 60,
|
||||
nx=True
|
||||
)
|
||||
return result is None # None means duplicate
|
||||
except Exception as e:
|
||||
logger.error("Error checking duplicate", error=str(e))
|
||||
return False # Allow publishing if check fails
|
||||
|
||||
# Helper methods
|
||||
async def get_active_tenants(self) -> List[UUID]:
|
||||
"""Get list of active tenant IDs"""
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
query = text("SELECT DISTINCT tenant_id FROM tenants WHERE status = 'active'")
|
||||
async with self.db_manager.get_session() as session:
|
||||
result = await session.execute(query)
|
||||
return [row.tenant_id for row in result.fetchall()]
|
||||
except Exception as e:
|
||||
# If tenants table doesn't exist, skip tenant-based processing
|
||||
if "does not exist" in str(e):
|
||||
logger.debug("Tenants table not found, skipping tenant-based alert processing")
|
||||
return []
|
||||
else:
|
||||
logger.error("Error fetching active tenants", error=str(e))
|
||||
return []
|
||||
|
||||
async def get_tenant_config(self, tenant_id: UUID) -> Dict[str, Any]:
|
||||
"""Get tenant-specific configuration"""
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
query = text("SELECT config FROM tenants WHERE tenant_id = :tenant_id")
|
||||
async with self.db_manager.get_session() as session:
|
||||
result = await session.execute(query, {"tenant_id": tenant_id})
|
||||
row = result.fetchone()
|
||||
return json.loads(row.config) if row and row.config else {}
|
||||
except Exception as e:
|
||||
logger.error("Error fetching tenant config", tenant_id=str(tenant_id), error=str(e))
|
||||
return {}
|
||||
|
||||
# Health and metrics
|
||||
def get_metrics(self) -> Dict[str, Any]:
|
||||
"""Get service metrics"""
|
||||
return {
|
||||
"items_published": self._items_published,
|
||||
"checks_performed": self._checks_performed,
|
||||
"errors_count": self._errors_count,
|
||||
"is_leader": self.is_leader,
|
||||
"scheduler_running": self.scheduler.running,
|
||||
"redis_connected": self.redis and not self.redis.closed,
|
||||
"rabbitmq_connected": self.rabbitmq_client.connected if self.rabbitmq_client else False
|
||||
}
|
||||
|
||||
async def health_check(self) -> Dict[str, Any]:
|
||||
"""Comprehensive health check"""
|
||||
try:
|
||||
# Check Redis
|
||||
redis_healthy = False
|
||||
if self.redis and not self.redis.closed:
|
||||
await self.redis.ping()
|
||||
redis_healthy = True
|
||||
|
||||
# Check RabbitMQ
|
||||
rabbitmq_healthy = self.rabbitmq_client.connected if self.rabbitmq_client else False
|
||||
|
||||
# Check database
|
||||
db_healthy = False
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
async with self.db_manager.get_session() as session:
|
||||
await session.execute(text("SELECT 1"))
|
||||
db_healthy = True
|
||||
except:
|
||||
pass
|
||||
|
||||
status = "healthy" if all([redis_healthy, rabbitmq_healthy, db_healthy]) else "unhealthy"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"service": self.config.SERVICE_NAME,
|
||||
"components": {
|
||||
"redis": "healthy" if redis_healthy else "unhealthy",
|
||||
"rabbitmq": "healthy" if rabbitmq_healthy else "unhealthy",
|
||||
"database": "healthy" if db_healthy else "unhealthy",
|
||||
"scheduler": "running" if self.scheduler.running else "stopped"
|
||||
},
|
||||
"metrics": self.get_metrics()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"status": "error",
|
||||
"service": self.config.SERVICE_NAME,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
|
||||
class AlertServiceMixin:
|
||||
"""Mixin providing common alert helper methods"""
|
||||
|
||||
def get_business_hours_severity(self, base_severity: str) -> str:
|
||||
"""Adjust severity based on business hours"""
|
||||
current_hour = datetime.now().hour
|
||||
|
||||
# Reduce non-critical severity outside business hours (7-20)
|
||||
if not (7 <= current_hour <= 20):
|
||||
if base_severity == 'medium':
|
||||
return 'low'
|
||||
elif base_severity == 'high' and current_hour < 6 or current_hour > 22:
|
||||
return 'medium'
|
||||
|
||||
return base_severity
|
||||
|
||||
def should_send_recommendation(self, tenant_id: UUID, rec_type: str) -> bool:
|
||||
"""Check if recommendation should be sent based on tenant preferences"""
|
||||
# Implement tenant-specific recommendation frequency limits
|
||||
# This is a simplified version
|
||||
return True
|
||||
@@ -1,948 +0,0 @@
|
||||
"""
|
||||
Context-Aware Alert Message Templates with i18n Support
|
||||
|
||||
This module generates parametrized alert messages that can be translated by the frontend.
|
||||
Instead of hardcoded Spanish messages, we generate structured message keys and parameters.
|
||||
|
||||
Messages are generated AFTER enrichment, leveraging:
|
||||
- Orchestrator context (AI actions already taken)
|
||||
- Business impact (financial, customers affected)
|
||||
- Urgency context (hours until consequence, actual dates)
|
||||
- User agency (supplier contacts, external dependencies)
|
||||
|
||||
Frontend uses i18n to translate message keys with parameters.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, Any, Optional, List
|
||||
from shared.schemas.alert_types import (
|
||||
EnrichedAlert, OrchestratorContext, BusinessImpact,
|
||||
UrgencyContext, UserAgency, TrendContext, SmartAction
|
||||
)
|
||||
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
def format_date_spanish(dt: datetime) -> str:
|
||||
"""Format datetime in Spanish (for backwards compatibility)"""
|
||||
days = ["lunes", "martes", "miércoles", "jueves", "viernes", "sábado", "domingo"]
|
||||
months = ["enero", "febrero", "marzo", "abril", "mayo", "junio",
|
||||
"julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"]
|
||||
|
||||
day_name = days[dt.weekday()]
|
||||
month_name = months[dt.month - 1]
|
||||
|
||||
return f"{day_name} {dt.day} de {month_name}"
|
||||
|
||||
|
||||
def format_iso_date(dt: datetime) -> str:
|
||||
"""Format datetime as ISO date for frontend i18n"""
|
||||
return dt.strftime('%Y-%m-%d')
|
||||
|
||||
|
||||
def get_production_date(metadata: Dict[str, Any], default_days: int = 1) -> datetime:
|
||||
"""Get actual production date from metadata or estimate"""
|
||||
if metadata.get('production_date'):
|
||||
if isinstance(metadata['production_date'], str):
|
||||
return datetime.fromisoformat(metadata['production_date'])
|
||||
return metadata['production_date']
|
||||
else:
|
||||
return datetime.now() + timedelta(days=default_days)
|
||||
|
||||
|
||||
class ContextualMessageGenerator:
|
||||
"""Generates context-aware parametrized messages for i18n"""
|
||||
|
||||
@staticmethod
|
||||
def generate_message_data(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate contextual message data with i18n support
|
||||
|
||||
Returns dict with:
|
||||
- title_key: i18n key for title
|
||||
- title_params: parameters for title translation
|
||||
- message_key: i18n key for message
|
||||
- message_params: parameters for message translation
|
||||
- fallback_title: fallback if i18n not available
|
||||
- fallback_message: fallback if i18n not available
|
||||
"""
|
||||
alert_type = enriched.alert_type
|
||||
|
||||
# Dispatch to specific generator based on alert type
|
||||
generators = {
|
||||
# Inventory alerts
|
||||
'critical_stock_shortage': ContextualMessageGenerator._stock_shortage,
|
||||
'low_stock_warning': ContextualMessageGenerator._low_stock,
|
||||
'stock_depleted_by_order': ContextualMessageGenerator._stock_depleted,
|
||||
'production_ingredient_shortage': ContextualMessageGenerator._ingredient_shortage,
|
||||
'expired_products': ContextualMessageGenerator._expired_products,
|
||||
|
||||
# Production alerts
|
||||
'production_delay': ContextualMessageGenerator._production_delay,
|
||||
'equipment_failure': ContextualMessageGenerator._equipment_failure,
|
||||
'maintenance_required': ContextualMessageGenerator._maintenance_required,
|
||||
'low_equipment_efficiency': ContextualMessageGenerator._low_efficiency,
|
||||
'order_overload': ContextualMessageGenerator._order_overload,
|
||||
|
||||
# Supplier alerts
|
||||
'supplier_delay': ContextualMessageGenerator._supplier_delay,
|
||||
|
||||
# Procurement alerts
|
||||
'po_approval_needed': ContextualMessageGenerator._po_approval_needed,
|
||||
'production_batch_start': ContextualMessageGenerator._production_batch_start,
|
||||
|
||||
# Environmental alerts
|
||||
'temperature_breach': ContextualMessageGenerator._temperature_breach,
|
||||
|
||||
# Forecasting alerts
|
||||
'demand_surge_weekend': ContextualMessageGenerator._demand_surge,
|
||||
'weather_impact_alert': ContextualMessageGenerator._weather_impact,
|
||||
'holiday_preparation': ContextualMessageGenerator._holiday_prep,
|
||||
'severe_weather_impact': ContextualMessageGenerator._severe_weather,
|
||||
'unexpected_demand_spike': ContextualMessageGenerator._demand_spike,
|
||||
'demand_pattern_optimization': ContextualMessageGenerator._demand_pattern,
|
||||
|
||||
# Recommendations
|
||||
'inventory_optimization': ContextualMessageGenerator._inventory_optimization,
|
||||
'production_efficiency': ContextualMessageGenerator._production_efficiency,
|
||||
'sales_opportunity': ContextualMessageGenerator._sales_opportunity,
|
||||
'seasonal_adjustment': ContextualMessageGenerator._seasonal_adjustment,
|
||||
'cost_reduction': ContextualMessageGenerator._cost_reduction,
|
||||
'waste_reduction': ContextualMessageGenerator._waste_reduction,
|
||||
'quality_improvement': ContextualMessageGenerator._quality_improvement,
|
||||
'customer_satisfaction': ContextualMessageGenerator._customer_satisfaction,
|
||||
'energy_optimization': ContextualMessageGenerator._energy_optimization,
|
||||
'staff_optimization': ContextualMessageGenerator._staff_optimization,
|
||||
}
|
||||
|
||||
generator_func = generators.get(alert_type)
|
||||
if generator_func:
|
||||
return generator_func(enriched)
|
||||
else:
|
||||
# Fallback for unknown alert types
|
||||
return {
|
||||
'title_key': f'alerts.{alert_type}.title',
|
||||
'title_params': {},
|
||||
'message_key': f'alerts.{alert_type}.message',
|
||||
'message_params': {'alert_type': alert_type},
|
||||
'fallback_title': f"Alerta: {alert_type}",
|
||||
'fallback_message': f"Se detectó una situación que requiere atención: {alert_type}"
|
||||
}
|
||||
|
||||
# ===================================================================
|
||||
# INVENTORY ALERTS
|
||||
# ===================================================================
|
||||
|
||||
@staticmethod
|
||||
def _stock_shortage(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Critical stock shortage with AI context"""
|
||||
metadata = enriched.alert_metadata
|
||||
orch = enriched.orchestrator_context
|
||||
urgency = enriched.urgency_context
|
||||
agency = enriched.user_agency
|
||||
|
||||
ingredient_name = metadata.get('ingredient_name', 'Ingrediente')
|
||||
current_stock = round(metadata.get('current_stock', 0), 1)
|
||||
required_stock = round(metadata.get('required_stock', metadata.get('tomorrow_needed', 0)), 1)
|
||||
|
||||
# Base parameters
|
||||
params = {
|
||||
'ingredient_name': ingredient_name,
|
||||
'current_stock': current_stock,
|
||||
'required_stock': required_stock
|
||||
}
|
||||
|
||||
# Determine message variant based on context
|
||||
if orch and orch.already_addressed and orch.action_type == "purchase_order":
|
||||
# AI already created PO
|
||||
params['po_id'] = orch.action_id
|
||||
params['po_amount'] = metadata.get('po_amount', 0)
|
||||
|
||||
if orch.delivery_date:
|
||||
params['delivery_date'] = format_iso_date(orch.delivery_date)
|
||||
params['delivery_day_name'] = format_date_spanish(orch.delivery_date)
|
||||
|
||||
if orch.action_status == "pending_approval":
|
||||
message_key = 'alerts.critical_stock_shortage.message_with_po_pending'
|
||||
else:
|
||||
message_key = 'alerts.critical_stock_shortage.message_with_po_created'
|
||||
|
||||
elif urgency and urgency.time_until_consequence_hours:
|
||||
# Time-specific message
|
||||
hours = urgency.time_until_consequence_hours
|
||||
params['hours_until'] = round(hours, 1)
|
||||
message_key = 'alerts.critical_stock_shortage.message_with_hours'
|
||||
|
||||
elif metadata.get('production_date'):
|
||||
# Date-specific message
|
||||
prod_date = get_production_date(metadata)
|
||||
params['production_date'] = format_iso_date(prod_date)
|
||||
params['production_day_name'] = format_date_spanish(prod_date)
|
||||
message_key = 'alerts.critical_stock_shortage.message_with_date'
|
||||
|
||||
else:
|
||||
# Generic message
|
||||
message_key = 'alerts.critical_stock_shortage.message_generic'
|
||||
|
||||
# Add supplier contact if available
|
||||
if agency and agency.requires_external_party and agency.external_party_contact:
|
||||
params['supplier_name'] = agency.external_party_name
|
||||
params['supplier_contact'] = agency.external_party_contact
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.critical_stock_shortage.title',
|
||||
'title_params': {'ingredient_name': ingredient_name},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': f"🚨 Stock Crítico: {ingredient_name}",
|
||||
'fallback_message': f"Solo {current_stock}kg de {ingredient_name} disponibles (necesitas {required_stock}kg)."
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _low_stock(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Low stock warning"""
|
||||
metadata = enriched.alert_metadata
|
||||
orch = enriched.orchestrator_context
|
||||
|
||||
ingredient_name = metadata.get('ingredient_name', 'Ingrediente')
|
||||
current_stock = round(metadata.get('current_stock', 0), 1)
|
||||
minimum_stock = round(metadata.get('minimum_stock', 0), 1)
|
||||
|
||||
params = {
|
||||
'ingredient_name': ingredient_name,
|
||||
'current_stock': current_stock,
|
||||
'minimum_stock': minimum_stock
|
||||
}
|
||||
|
||||
if orch and orch.already_addressed and orch.action_type == "purchase_order":
|
||||
params['po_id'] = orch.action_id
|
||||
message_key = 'alerts.low_stock.message_with_po'
|
||||
else:
|
||||
message_key = 'alerts.low_stock.message_generic'
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.low_stock.title',
|
||||
'title_params': {'ingredient_name': ingredient_name},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': f"⚠️ Stock Bajo: {ingredient_name}",
|
||||
'fallback_message': f"Stock de {ingredient_name}: {current_stock}kg (mínimo: {minimum_stock}kg)."
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _stock_depleted(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Stock depleted by order"""
|
||||
metadata = enriched.alert_metadata
|
||||
agency = enriched.user_agency
|
||||
|
||||
ingredient_name = metadata.get('ingredient_name', 'Ingrediente')
|
||||
order_id = metadata.get('order_id', '???')
|
||||
current_stock = round(metadata.get('current_stock', 0), 1)
|
||||
minimum_stock = round(metadata.get('minimum_stock', 0), 1)
|
||||
|
||||
params = {
|
||||
'ingredient_name': ingredient_name,
|
||||
'order_id': order_id,
|
||||
'current_stock': current_stock,
|
||||
'minimum_stock': minimum_stock
|
||||
}
|
||||
|
||||
if agency and agency.requires_external_party and agency.external_party_contact:
|
||||
params['supplier_name'] = agency.external_party_name
|
||||
params['supplier_contact'] = agency.external_party_contact
|
||||
message_key = 'alerts.stock_depleted.message_with_supplier'
|
||||
else:
|
||||
message_key = 'alerts.stock_depleted.message_generic'
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.stock_depleted.title',
|
||||
'title_params': {'ingredient_name': ingredient_name},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': f"⚠️ Stock Agotado por Pedido: {ingredient_name}",
|
||||
'fallback_message': f"El pedido #{order_id} agotaría el stock de {ingredient_name}."
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _ingredient_shortage(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Production ingredient shortage"""
|
||||
metadata = enriched.alert_metadata
|
||||
impact = enriched.business_impact
|
||||
|
||||
ingredient_name = metadata.get('ingredient_name', 'Ingrediente')
|
||||
shortage_amount = round(metadata.get('shortage_amount', 0), 1)
|
||||
affected_batches = metadata.get('affected_batches_count', 0)
|
||||
|
||||
params = {
|
||||
'ingredient_name': ingredient_name,
|
||||
'shortage_amount': shortage_amount,
|
||||
'affected_batches': affected_batches
|
||||
}
|
||||
|
||||
if impact and impact.affected_customers:
|
||||
params['customer_count'] = len(impact.affected_customers)
|
||||
params['customer_names'] = ', '.join(impact.affected_customers[:2])
|
||||
if len(impact.affected_customers) > 2:
|
||||
params['additional_customers'] = len(impact.affected_customers) - 2
|
||||
message_key = 'alerts.ingredient_shortage.message_with_customers'
|
||||
else:
|
||||
message_key = 'alerts.ingredient_shortage.message_generic'
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.ingredient_shortage.title',
|
||||
'title_params': {'ingredient_name': ingredient_name},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': f"🚨 Escasez en Producción: {ingredient_name}",
|
||||
'fallback_message': f"Faltan {shortage_amount}kg de {ingredient_name}."
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _expired_products(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Expired products alert"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
product_count = metadata.get('product_count', 0)
|
||||
expired_items = metadata.get('expired_items', [])
|
||||
|
||||
params = {
|
||||
'product_count': product_count
|
||||
}
|
||||
|
||||
if len(expired_items) > 0:
|
||||
params['product_names'] = ', '.join([item.get('name', 'Producto') for item in expired_items[:2]])
|
||||
if len(expired_items) > 2:
|
||||
params['additional_count'] = len(expired_items) - 2
|
||||
message_key = 'alerts.expired_products.message_with_names'
|
||||
else:
|
||||
message_key = 'alerts.expired_products.message_generic'
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.expired_products.title',
|
||||
'title_params': {},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': "📅 Productos Caducados",
|
||||
'fallback_message': f"{product_count} producto(s) caducado(s). Retirar inmediatamente."
|
||||
}
|
||||
|
||||
# ===================================================================
|
||||
# PRODUCTION ALERTS
|
||||
# ===================================================================
|
||||
|
||||
@staticmethod
|
||||
def _production_delay(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Production delay with affected orders"""
|
||||
metadata = enriched.alert_metadata
|
||||
impact = enriched.business_impact
|
||||
|
||||
batch_name = metadata.get('batch_name', 'Lote')
|
||||
delay_minutes = metadata.get('delay_minutes', 0)
|
||||
|
||||
params = {
|
||||
'batch_name': batch_name,
|
||||
'delay_minutes': delay_minutes
|
||||
}
|
||||
|
||||
if impact and impact.affected_customers:
|
||||
params['customer_names'] = ', '.join(impact.affected_customers[:2])
|
||||
if len(impact.affected_customers) > 2:
|
||||
params['additional_count'] = len(impact.affected_customers) - 2
|
||||
message_key = 'alerts.production_delay.message_with_customers'
|
||||
elif impact and impact.affected_orders:
|
||||
params['affected_orders'] = impact.affected_orders
|
||||
message_key = 'alerts.production_delay.message_with_orders'
|
||||
else:
|
||||
message_key = 'alerts.production_delay.message_generic'
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.production_delay.title',
|
||||
'title_params': {},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': "⏰ Retraso en Producción",
|
||||
'fallback_message': f"Lote {batch_name} con {delay_minutes} minutos de retraso."
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _equipment_failure(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Equipment failure"""
|
||||
metadata = enriched.alert_metadata
|
||||
impact = enriched.business_impact
|
||||
|
||||
equipment_name = metadata.get('equipment_name', 'Equipo')
|
||||
|
||||
params = {
|
||||
'equipment_name': equipment_name
|
||||
}
|
||||
|
||||
if impact and impact.production_batches_at_risk:
|
||||
params['batch_count'] = len(impact.production_batches_at_risk)
|
||||
message_key = 'alerts.equipment_failure.message_with_batches'
|
||||
else:
|
||||
message_key = 'alerts.equipment_failure.message_generic'
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.equipment_failure.title',
|
||||
'title_params': {'equipment_name': equipment_name},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': f"⚙️ Fallo de Equipo: {equipment_name}",
|
||||
'fallback_message': f"{equipment_name} no está funcionando correctamente."
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _maintenance_required(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Maintenance required"""
|
||||
metadata = enriched.alert_metadata
|
||||
urgency = enriched.urgency_context
|
||||
|
||||
equipment_name = metadata.get('equipment_name', 'Equipo')
|
||||
|
||||
params = {
|
||||
'equipment_name': equipment_name
|
||||
}
|
||||
|
||||
if urgency and urgency.time_until_consequence_hours:
|
||||
params['hours_until'] = round(urgency.time_until_consequence_hours, 1)
|
||||
message_key = 'alerts.maintenance_required.message_with_hours'
|
||||
else:
|
||||
params['days_until'] = metadata.get('days_until_maintenance', 0)
|
||||
message_key = 'alerts.maintenance_required.message_with_days'
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.maintenance_required.title',
|
||||
'title_params': {'equipment_name': equipment_name},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': f"🔧 Mantenimiento Requerido: {equipment_name}",
|
||||
'fallback_message': f"Equipo {equipment_name} requiere mantenimiento."
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _low_efficiency(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Low equipment efficiency"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
equipment_name = metadata.get('equipment_name', 'Equipo')
|
||||
efficiency = round(metadata.get('efficiency_percent', 0), 1)
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.low_efficiency.title',
|
||||
'title_params': {'equipment_name': equipment_name},
|
||||
'message_key': 'alerts.low_efficiency.message',
|
||||
'message_params': {
|
||||
'equipment_name': equipment_name,
|
||||
'efficiency_percent': efficiency
|
||||
},
|
||||
'fallback_title': f"📉 Baja Eficiencia: {equipment_name}",
|
||||
'fallback_message': f"Eficiencia del {equipment_name} bajó a {efficiency}%."
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _order_overload(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Order capacity overload"""
|
||||
metadata = enriched.alert_metadata
|
||||
impact = enriched.business_impact
|
||||
|
||||
percentage = round(metadata.get('percentage', 0), 1)
|
||||
|
||||
params = {
|
||||
'percentage': percentage
|
||||
}
|
||||
|
||||
if impact and impact.affected_orders:
|
||||
params['affected_orders'] = impact.affected_orders
|
||||
message_key = 'alerts.order_overload.message_with_orders'
|
||||
else:
|
||||
message_key = 'alerts.order_overload.message_generic'
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.order_overload.title',
|
||||
'title_params': {},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': "📋 Sobrecarga de Pedidos",
|
||||
'fallback_message': f"Capacidad excedida en {percentage}%."
|
||||
}
|
||||
|
||||
# ===================================================================
|
||||
# SUPPLIER ALERTS
|
||||
# ===================================================================
|
||||
|
||||
@staticmethod
|
||||
def _supplier_delay(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Supplier delivery delay"""
|
||||
metadata = enriched.alert_metadata
|
||||
impact = enriched.business_impact
|
||||
agency = enriched.user_agency
|
||||
|
||||
supplier_name = metadata.get('supplier_name', 'Proveedor')
|
||||
hours = round(metadata.get('hours', metadata.get('delay_hours', 0)), 0)
|
||||
products = metadata.get('products', metadata.get('affected_products', ''))
|
||||
|
||||
params = {
|
||||
'supplier_name': supplier_name,
|
||||
'hours': hours,
|
||||
'products': products
|
||||
}
|
||||
|
||||
if impact and impact.production_batches_at_risk:
|
||||
params['batch_count'] = len(impact.production_batches_at_risk)
|
||||
|
||||
if agency and agency.external_party_contact:
|
||||
params['supplier_contact'] = agency.external_party_contact
|
||||
|
||||
message_key = 'alerts.supplier_delay.message'
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.supplier_delay.title',
|
||||
'title_params': {'supplier_name': supplier_name},
|
||||
'message_key': message_key,
|
||||
'message_params': params,
|
||||
'fallback_title': f"🚚 Retraso de Proveedor: {supplier_name}",
|
||||
'fallback_message': f"Entrega de {supplier_name} retrasada {hours} hora(s)."
|
||||
}
|
||||
|
||||
# ===================================================================
|
||||
# PROCUREMENT ALERTS
|
||||
# ===================================================================
|
||||
|
||||
@staticmethod
|
||||
def _po_approval_needed(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Purchase order approval needed"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
po_number = metadata.get('po_number', 'PO-XXXX')
|
||||
supplier_name = metadata.get('supplier_name', 'Proveedor')
|
||||
total_amount = metadata.get('total_amount', 0)
|
||||
currency = metadata.get('currency', '€')
|
||||
required_delivery_date = metadata.get('required_delivery_date')
|
||||
|
||||
# Format required delivery date for i18n
|
||||
required_delivery_date_iso = None
|
||||
if required_delivery_date:
|
||||
if isinstance(required_delivery_date, str):
|
||||
try:
|
||||
dt = datetime.fromisoformat(required_delivery_date.replace('Z', '+00:00'))
|
||||
required_delivery_date_iso = format_iso_date(dt)
|
||||
except:
|
||||
required_delivery_date_iso = required_delivery_date
|
||||
elif isinstance(required_delivery_date, datetime):
|
||||
required_delivery_date_iso = format_iso_date(required_delivery_date)
|
||||
|
||||
params = {
|
||||
'po_number': po_number,
|
||||
'supplier_name': supplier_name,
|
||||
'total_amount': round(total_amount, 2),
|
||||
'currency': currency,
|
||||
'required_delivery_date': required_delivery_date_iso or 'fecha no especificada'
|
||||
}
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.po_approval_needed.title',
|
||||
'title_params': {'po_number': po_number},
|
||||
'message_key': 'alerts.po_approval_needed.message',
|
||||
'message_params': params
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _production_batch_start(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Production batch ready to start"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
batch_number = metadata.get('batch_number', 'BATCH-XXXX')
|
||||
product_name = metadata.get('product_name', 'Producto')
|
||||
quantity_planned = metadata.get('quantity_planned', 0)
|
||||
unit = metadata.get('unit', 'kg')
|
||||
priority = metadata.get('priority', 'normal')
|
||||
|
||||
params = {
|
||||
'batch_number': batch_number,
|
||||
'product_name': product_name,
|
||||
'quantity_planned': round(quantity_planned, 1),
|
||||
'unit': unit,
|
||||
'priority': priority
|
||||
}
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.production_batch_start.title',
|
||||
'title_params': {'product_name': product_name},
|
||||
'message_key': 'alerts.production_batch_start.message',
|
||||
'message_params': params
|
||||
}
|
||||
|
||||
# ===================================================================
|
||||
# ENVIRONMENTAL ALERTS
|
||||
# ===================================================================
|
||||
|
||||
@staticmethod
|
||||
def _temperature_breach(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Temperature breach alert"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
location = metadata.get('location', 'Ubicación')
|
||||
temperature = round(metadata.get('temperature', 0), 1)
|
||||
duration = metadata.get('duration', 0)
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.temperature_breach.title',
|
||||
'title_params': {'location': location},
|
||||
'message_key': 'alerts.temperature_breach.message',
|
||||
'message_params': {
|
||||
'location': location,
|
||||
'temperature': temperature,
|
||||
'duration': duration
|
||||
}
|
||||
}
|
||||
|
||||
# ===================================================================
|
||||
# FORECASTING ALERTS
|
||||
# ===================================================================
|
||||
|
||||
@staticmethod
|
||||
def _demand_surge(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Weekend demand surge"""
|
||||
metadata = enriched.alert_metadata
|
||||
urgency = enriched.urgency_context
|
||||
|
||||
product_name = metadata.get('product_name', 'Producto')
|
||||
percentage = round(metadata.get('percentage', metadata.get('growth_percentage', 0)), 0)
|
||||
predicted_demand = metadata.get('predicted_demand', 0)
|
||||
current_stock = metadata.get('current_stock', 0)
|
||||
|
||||
params = {
|
||||
'product_name': product_name,
|
||||
'percentage': percentage
|
||||
}
|
||||
|
||||
if predicted_demand and current_stock:
|
||||
params['predicted_demand'] = round(predicted_demand, 0)
|
||||
params['current_stock'] = round(current_stock, 0)
|
||||
|
||||
if urgency and urgency.time_until_consequence_hours:
|
||||
params['hours_until'] = round(urgency.time_until_consequence_hours, 1)
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.demand_surge.title',
|
||||
'title_params': {'product_name': product_name},
|
||||
'message_key': 'alerts.demand_surge.message',
|
||||
'message_params': params
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _weather_impact(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Weather impact on demand"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
weather_type = metadata.get('weather_type', 'Lluvia')
|
||||
impact_percentage = round(metadata.get('impact_percentage', -20), 0)
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.weather_impact.title',
|
||||
'title_params': {},
|
||||
'message_key': 'alerts.weather_impact.message',
|
||||
'message_params': {
|
||||
'weather_type': weather_type,
|
||||
'impact_percentage': abs(impact_percentage),
|
||||
'is_negative': impact_percentage < 0
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _holiday_prep(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Holiday preparation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
holiday_name = metadata.get('holiday_name', 'Festividad')
|
||||
days = metadata.get('days', 0)
|
||||
percentage = round(metadata.get('percentage', metadata.get('increase_percentage', 0)), 0)
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.holiday_prep.title',
|
||||
'title_params': {'holiday_name': holiday_name},
|
||||
'message_key': 'alerts.holiday_prep.message',
|
||||
'message_params': {
|
||||
'holiday_name': holiday_name,
|
||||
'days': days,
|
||||
'percentage': percentage
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _severe_weather(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Severe weather impact"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
weather_type = metadata.get('weather_type', 'Tormenta')
|
||||
duration_hours = metadata.get('duration_hours', 0)
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.severe_weather.title',
|
||||
'title_params': {'weather_type': weather_type},
|
||||
'message_key': 'alerts.severe_weather.message',
|
||||
'message_params': {
|
||||
'weather_type': weather_type,
|
||||
'duration_hours': duration_hours
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _demand_spike(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Unexpected demand spike"""
|
||||
metadata = enriched.alert_metadata
|
||||
trend = enriched.trend_context
|
||||
|
||||
product_name = metadata.get('product_name', 'Producto')
|
||||
spike_percentage = round(metadata.get('spike_percentage', metadata.get('growth_percentage', 0)), 0)
|
||||
|
||||
params = {
|
||||
'product_name': product_name,
|
||||
'spike_percentage': spike_percentage
|
||||
}
|
||||
|
||||
if trend:
|
||||
params['current_value'] = round(trend.current_value, 0)
|
||||
params['baseline_value'] = round(trend.baseline_value, 0)
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.demand_spike.title',
|
||||
'title_params': {'product_name': product_name},
|
||||
'message_key': 'alerts.demand_spike.message',
|
||||
'message_params': params
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _demand_pattern(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Demand pattern optimization"""
|
||||
metadata = enriched.alert_metadata
|
||||
trend = enriched.trend_context
|
||||
|
||||
product_name = metadata.get('product_name', 'Producto')
|
||||
variation = round(metadata.get('variation_percent', 0), 0)
|
||||
|
||||
params = {
|
||||
'product_name': product_name,
|
||||
'variation_percent': variation
|
||||
}
|
||||
|
||||
if trend and trend.possible_causes:
|
||||
params['possible_causes'] = ', '.join(trend.possible_causes[:2])
|
||||
|
||||
return {
|
||||
'title_key': 'alerts.demand_pattern.title',
|
||||
'title_params': {'product_name': product_name},
|
||||
'message_key': 'alerts.demand_pattern.message',
|
||||
'message_params': params
|
||||
}
|
||||
|
||||
# ===================================================================
|
||||
# RECOMMENDATIONS
|
||||
# ===================================================================
|
||||
|
||||
@staticmethod
|
||||
def _inventory_optimization(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Inventory optimization recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
ingredient_name = metadata.get('ingredient_name', 'Ingrediente')
|
||||
period = metadata.get('period', 7)
|
||||
suggested_increase = round(metadata.get('suggested_increase', 0), 1)
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.inventory_optimization.title',
|
||||
'title_params': {'ingredient_name': ingredient_name},
|
||||
'message_key': 'recommendations.inventory_optimization.message',
|
||||
'message_params': {
|
||||
'ingredient_name': ingredient_name,
|
||||
'period': period,
|
||||
'suggested_increase': suggested_increase
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _production_efficiency(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Production efficiency recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
suggested_time = metadata.get('suggested_time', '')
|
||||
savings_percent = round(metadata.get('savings_percent', 0), 1)
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.production_efficiency.title',
|
||||
'title_params': {},
|
||||
'message_key': 'recommendations.production_efficiency.message',
|
||||
'message_params': {
|
||||
'suggested_time': suggested_time,
|
||||
'savings_percent': savings_percent
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _sales_opportunity(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Sales opportunity recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
product_name = metadata.get('product_name', 'Producto')
|
||||
days = metadata.get('days', '')
|
||||
increase_percent = round(metadata.get('increase_percent', 0), 0)
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.sales_opportunity.title',
|
||||
'title_params': {'product_name': product_name},
|
||||
'message_key': 'recommendations.sales_opportunity.message',
|
||||
'message_params': {
|
||||
'product_name': product_name,
|
||||
'days': days,
|
||||
'increase_percent': increase_percent
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _seasonal_adjustment(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Seasonal adjustment recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
season = metadata.get('season', 'temporada')
|
||||
products = metadata.get('products', 'productos estacionales')
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.seasonal_adjustment.title',
|
||||
'title_params': {},
|
||||
'message_key': 'recommendations.seasonal_adjustment.message',
|
||||
'message_params': {
|
||||
'season': season,
|
||||
'products': products
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _cost_reduction(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Cost reduction recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
supplier_name = metadata.get('supplier_name', 'Proveedor')
|
||||
ingredient = metadata.get('ingredient', 'ingrediente')
|
||||
savings_euros = round(metadata.get('savings_euros', 0), 0)
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.cost_reduction.title',
|
||||
'title_params': {},
|
||||
'message_key': 'recommendations.cost_reduction.message',
|
||||
'message_params': {
|
||||
'supplier_name': supplier_name,
|
||||
'ingredient': ingredient,
|
||||
'savings_euros': savings_euros
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _waste_reduction(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Waste reduction recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
product = metadata.get('product', 'producto')
|
||||
waste_reduction_percent = round(metadata.get('waste_reduction_percent', 0), 0)
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.waste_reduction.title',
|
||||
'title_params': {},
|
||||
'message_key': 'recommendations.waste_reduction.message',
|
||||
'message_params': {
|
||||
'product': product,
|
||||
'waste_reduction_percent': waste_reduction_percent
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _quality_improvement(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Quality improvement recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
product = metadata.get('product', 'producto')
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.quality_improvement.title',
|
||||
'title_params': {},
|
||||
'message_key': 'recommendations.quality_improvement.message',
|
||||
'message_params': {
|
||||
'product': product
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _customer_satisfaction(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Customer satisfaction recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
product = metadata.get('product', 'producto')
|
||||
days = metadata.get('days', '')
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.customer_satisfaction.title',
|
||||
'title_params': {},
|
||||
'message_key': 'recommendations.customer_satisfaction.message',
|
||||
'message_params': {
|
||||
'product': product,
|
||||
'days': days
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _energy_optimization(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Energy optimization recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
start_time = metadata.get('start_time', '')
|
||||
end_time = metadata.get('end_time', '')
|
||||
savings_euros = round(metadata.get('savings_euros', 0), 0)
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.energy_optimization.title',
|
||||
'title_params': {},
|
||||
'message_key': 'recommendations.energy_optimization.message',
|
||||
'message_params': {
|
||||
'start_time': start_time,
|
||||
'end_time': end_time,
|
||||
'savings_euros': savings_euros
|
||||
}
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _staff_optimization(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""Staff optimization recommendation"""
|
||||
metadata = enriched.alert_metadata
|
||||
|
||||
days = metadata.get('days', '')
|
||||
hours = metadata.get('hours', '')
|
||||
|
||||
return {
|
||||
'title_key': 'recommendations.staff_optimization.title',
|
||||
'title_params': {},
|
||||
'message_key': 'recommendations.staff_optimization.message',
|
||||
'message_params': {
|
||||
'days': days,
|
||||
'hours': hours
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def generate_contextual_message(enriched: EnrichedAlert) -> Dict[str, Any]:
|
||||
"""
|
||||
Main entry point for contextual message generation with i18n support
|
||||
|
||||
Args:
|
||||
enriched: Fully enriched alert with all context
|
||||
|
||||
Returns:
|
||||
Dict with:
|
||||
- title_key: i18n translation key for title
|
||||
- title_params: parameters for title translation
|
||||
- message_key: i18n translation key for message
|
||||
- message_params: parameters for message translation
|
||||
- fallback_title: fallback if i18n not available
|
||||
- fallback_message: fallback if i18n not available
|
||||
"""
|
||||
return ContextualMessageGenerator.generate_message_data(enriched)
|
||||
@@ -18,6 +18,7 @@ from .suppliers_client import SuppliersServiceClient
|
||||
from .tenant_client import TenantServiceClient
|
||||
from .ai_insights_client import AIInsightsClient
|
||||
from .alerts_client import AlertsServiceClient
|
||||
from .alert_processor_client import AlertProcessorClient, get_alert_processor_client
|
||||
from .procurement_client import ProcurementServiceClient
|
||||
from .distribution_client import DistributionServiceClient
|
||||
|
||||
@@ -158,6 +159,10 @@ def get_distribution_client(config: BaseServiceSettings = None, service_name: st
|
||||
return _client_cache[cache_key]
|
||||
|
||||
|
||||
# Note: get_alert_processor_client is already defined in alert_processor_client.py
|
||||
# and imported above, so we don't need to redefine it here
|
||||
|
||||
|
||||
class ServiceClients:
|
||||
"""Convenient wrapper for all service clients"""
|
||||
|
||||
@@ -267,6 +272,7 @@ __all__ = [
|
||||
'RecipesServiceClient',
|
||||
'SuppliersServiceClient',
|
||||
'AlertsServiceClient',
|
||||
'AlertProcessorClient',
|
||||
'TenantServiceClient',
|
||||
'DistributionServiceClient',
|
||||
'ServiceClients',
|
||||
@@ -280,6 +286,7 @@ __all__ = [
|
||||
'get_recipes_client',
|
||||
'get_suppliers_client',
|
||||
'get_alerts_client',
|
||||
'get_alert_processor_client',
|
||||
'get_tenant_client',
|
||||
'get_procurement_client',
|
||||
'get_distribution_client',
|
||||
|
||||
220
shared/clients/alert_processor_client.py
Normal file
220
shared/clients/alert_processor_client.py
Normal file
@@ -0,0 +1,220 @@
|
||||
# shared/clients/alert_processor_client.py
|
||||
"""
|
||||
Alert Processor Service Client - Inter-service communication
|
||||
Handles communication with the alert processor service for alert lifecycle management
|
||||
"""
|
||||
|
||||
import structlog
|
||||
from typing import Dict, Any, List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
from shared.clients.base_service_client import BaseServiceClient
|
||||
from shared.config.base import BaseServiceSettings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class AlertProcessorClient(BaseServiceClient):
|
||||
"""Client for communicating with the alert processor service via gateway"""
|
||||
|
||||
def __init__(self, config: BaseServiceSettings, calling_service_name: str = "unknown"):
|
||||
super().__init__(calling_service_name, config)
|
||||
|
||||
def get_service_base_path(self) -> str:
|
||||
"""Return the base path for alert processor service APIs"""
|
||||
return "/api/v1"
|
||||
|
||||
# ================================================================
|
||||
# ALERT LIFECYCLE MANAGEMENT
|
||||
# ================================================================
|
||||
|
||||
async def acknowledge_alerts_by_metadata(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
alert_type: str,
|
||||
metadata_filter: Dict[str, Any],
|
||||
acknowledged_by: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Acknowledge all active alerts matching alert type and metadata.
|
||||
|
||||
Used when user actions trigger alert acknowledgment (e.g., approving a PO).
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
alert_type: Alert type to filter (e.g., 'po_approval_needed')
|
||||
metadata_filter: Metadata fields to match (e.g., {'po_id': 'uuid'})
|
||||
acknowledged_by: Optional user ID who acknowledged
|
||||
|
||||
Returns:
|
||||
{
|
||||
"success": true,
|
||||
"acknowledged_count": 2,
|
||||
"alert_ids": ["uuid1", "uuid2"]
|
||||
}
|
||||
"""
|
||||
try:
|
||||
payload = {
|
||||
"alert_type": alert_type,
|
||||
"metadata_filter": metadata_filter
|
||||
}
|
||||
|
||||
if acknowledged_by:
|
||||
payload["acknowledged_by"] = acknowledged_by
|
||||
|
||||
result = await self.post(
|
||||
f"tenants/{tenant_id}/alerts/acknowledge-by-metadata",
|
||||
tenant_id=str(tenant_id),
|
||||
json=payload
|
||||
)
|
||||
|
||||
if result and result.get("success"):
|
||||
logger.info(
|
||||
"Acknowledged alerts by metadata",
|
||||
tenant_id=str(tenant_id),
|
||||
alert_type=alert_type,
|
||||
count=result.get("acknowledged_count", 0),
|
||||
calling_service=self.calling_service_name
|
||||
)
|
||||
|
||||
return result or {"success": False, "acknowledged_count": 0, "alert_ids": []}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error acknowledging alerts by metadata",
|
||||
error=str(e),
|
||||
tenant_id=str(tenant_id),
|
||||
alert_type=alert_type,
|
||||
metadata_filter=metadata_filter,
|
||||
calling_service=self.calling_service_name
|
||||
)
|
||||
return {"success": False, "acknowledged_count": 0, "alert_ids": [], "error": str(e)}
|
||||
|
||||
async def resolve_alerts_by_metadata(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
alert_type: str,
|
||||
metadata_filter: Dict[str, Any],
|
||||
resolved_by: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Resolve all active alerts matching alert type and metadata.
|
||||
|
||||
Used when user actions complete an alert's underlying issue (e.g., marking delivery received).
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
alert_type: Alert type to filter (e.g., 'delivery_overdue')
|
||||
metadata_filter: Metadata fields to match (e.g., {'po_id': 'uuid'})
|
||||
resolved_by: Optional user ID who resolved
|
||||
|
||||
Returns:
|
||||
{
|
||||
"success": true,
|
||||
"resolved_count": 1,
|
||||
"alert_ids": ["uuid1"]
|
||||
}
|
||||
"""
|
||||
try:
|
||||
payload = {
|
||||
"alert_type": alert_type,
|
||||
"metadata_filter": metadata_filter
|
||||
}
|
||||
|
||||
if resolved_by:
|
||||
payload["resolved_by"] = resolved_by
|
||||
|
||||
result = await self.post(
|
||||
f"tenants/{tenant_id}/alerts/resolve-by-metadata",
|
||||
tenant_id=str(tenant_id),
|
||||
json=payload
|
||||
)
|
||||
|
||||
if result and result.get("success"):
|
||||
logger.info(
|
||||
"Resolved alerts by metadata",
|
||||
tenant_id=str(tenant_id),
|
||||
alert_type=alert_type,
|
||||
count=result.get("resolved_count", 0),
|
||||
calling_service=self.calling_service_name
|
||||
)
|
||||
|
||||
return result or {"success": False, "resolved_count": 0, "alert_ids": []}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error resolving alerts by metadata",
|
||||
error=str(e),
|
||||
tenant_id=str(tenant_id),
|
||||
alert_type=alert_type,
|
||||
metadata_filter=metadata_filter,
|
||||
calling_service=self.calling_service_name
|
||||
)
|
||||
return {"success": False, "resolved_count": 0, "alert_ids": [], "error": str(e)}
|
||||
|
||||
async def get_active_alerts(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
priority_level: Optional[str] = None,
|
||||
limit: int = 100
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get active alerts for a tenant.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
priority_level: Optional priority filter (critical, important, standard, info)
|
||||
limit: Maximum number of alerts to return
|
||||
|
||||
Returns:
|
||||
List of alert dictionaries
|
||||
"""
|
||||
try:
|
||||
params = {
|
||||
"status": "active",
|
||||
"limit": limit
|
||||
}
|
||||
|
||||
if priority_level:
|
||||
params["priority_level"] = priority_level
|
||||
|
||||
result = await self.get(
|
||||
f"tenants/{tenant_id}/alerts",
|
||||
tenant_id=str(tenant_id),
|
||||
params=params
|
||||
)
|
||||
|
||||
alerts = result.get("alerts", []) if isinstance(result, dict) else []
|
||||
|
||||
logger.info(
|
||||
"Retrieved active alerts",
|
||||
tenant_id=str(tenant_id),
|
||||
count=len(alerts),
|
||||
calling_service=self.calling_service_name
|
||||
)
|
||||
|
||||
return alerts
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error fetching active alerts",
|
||||
error=str(e),
|
||||
tenant_id=str(tenant_id),
|
||||
calling_service=self.calling_service_name
|
||||
)
|
||||
return []
|
||||
|
||||
|
||||
# Factory function for easy import
|
||||
def get_alert_processor_client(config: BaseServiceSettings, calling_service_name: str) -> AlertProcessorClient:
|
||||
"""
|
||||
Factory function to create an AlertProcessorClient instance.
|
||||
|
||||
Args:
|
||||
config: Service configuration with gateway URL
|
||||
calling_service_name: Name of the service making the call (for logging)
|
||||
|
||||
Returns:
|
||||
AlertProcessorClient instance
|
||||
"""
|
||||
return AlertProcessorClient(config, calling_service_name)
|
||||
@@ -159,17 +159,38 @@ class DistributionServiceClient(BaseServiceClient):
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
response = await self.get(
|
||||
# Use _make_request directly to construct correct URL
|
||||
# Gateway route: /api/v1/tenants/{tenant_id}/distribution/{path}
|
||||
response = await self._make_request(
|
||||
"GET",
|
||||
f"tenants/{tenant_id}/distribution/routes",
|
||||
params=params,
|
||||
tenant_id=tenant_id
|
||||
params=params
|
||||
)
|
||||
|
||||
|
||||
if response:
|
||||
logger.info("Retrieved delivery routes",
|
||||
tenant_id=tenant_id,
|
||||
count=len(response.get("routes", [])))
|
||||
return response.get("routes", []) if response else []
|
||||
# Handle different response formats
|
||||
if isinstance(response, list):
|
||||
# Direct list of routes
|
||||
logger.info("Retrieved delivery routes",
|
||||
tenant_id=tenant_id,
|
||||
count=len(response))
|
||||
return response
|
||||
elif isinstance(response, dict):
|
||||
# Response wrapped in routes key
|
||||
if "routes" in response:
|
||||
logger.info("Retrieved delivery routes",
|
||||
tenant_id=tenant_id,
|
||||
count=len(response.get("routes", [])))
|
||||
return response.get("routes", [])
|
||||
else:
|
||||
# Return the whole dict if it's a single route
|
||||
logger.info("Retrieved delivery routes",
|
||||
tenant_id=tenant_id,
|
||||
count=1)
|
||||
return [response]
|
||||
logger.info("No delivery routes found",
|
||||
tenant_id=tenant_id)
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error("Error getting delivery routes",
|
||||
tenant_id=tenant_id,
|
||||
@@ -193,14 +214,17 @@ class DistributionServiceClient(BaseServiceClient):
|
||||
"""
|
||||
try:
|
||||
response = await self.get(
|
||||
f"tenants/{tenant_id}/distribution/routes/{route_id}",
|
||||
f"distribution/routes/{route_id}",
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
|
||||
if response:
|
||||
logger.info("Retrieved delivery route detail",
|
||||
tenant_id=tenant_id,
|
||||
route_id=route_id)
|
||||
# Ensure we return the route data directly if it's wrapped in a route key
|
||||
if isinstance(response, dict) and "route" in response:
|
||||
return response["route"]
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error("Error getting delivery route detail",
|
||||
@@ -241,17 +265,38 @@ class DistributionServiceClient(BaseServiceClient):
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
response = await self.get(
|
||||
# Use _make_request directly to construct correct URL
|
||||
# Gateway route: /api/v1/tenants/{tenant_id}/distribution/{path}
|
||||
response = await self._make_request(
|
||||
"GET",
|
||||
f"tenants/{tenant_id}/distribution/shipments",
|
||||
params=params,
|
||||
tenant_id=tenant_id
|
||||
params=params
|
||||
)
|
||||
|
||||
|
||||
if response:
|
||||
logger.info("Retrieved shipments",
|
||||
tenant_id=tenant_id,
|
||||
count=len(response.get("shipments", [])))
|
||||
return response.get("shipments", []) if response else []
|
||||
# Handle different response formats
|
||||
if isinstance(response, list):
|
||||
# Direct list of shipments
|
||||
logger.info("Retrieved shipments",
|
||||
tenant_id=tenant_id,
|
||||
count=len(response))
|
||||
return response
|
||||
elif isinstance(response, dict):
|
||||
# Response wrapped in shipments key
|
||||
if "shipments" in response:
|
||||
logger.info("Retrieved shipments",
|
||||
tenant_id=tenant_id,
|
||||
count=len(response.get("shipments", [])))
|
||||
return response.get("shipments", [])
|
||||
else:
|
||||
# Return the whole dict if it's a single shipment
|
||||
logger.info("Retrieved shipments",
|
||||
tenant_id=tenant_id,
|
||||
count=1)
|
||||
return [response]
|
||||
logger.info("No shipments found",
|
||||
tenant_id=tenant_id)
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error("Error getting shipments",
|
||||
tenant_id=tenant_id,
|
||||
@@ -275,14 +320,17 @@ class DistributionServiceClient(BaseServiceClient):
|
||||
"""
|
||||
try:
|
||||
response = await self.get(
|
||||
f"tenants/{tenant_id}/distribution/shipments/{shipment_id}",
|
||||
f"distribution/shipments/{shipment_id}",
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
|
||||
if response:
|
||||
logger.info("Retrieved shipment detail",
|
||||
tenant_id=tenant_id,
|
||||
shipment_id=shipment_id)
|
||||
# Ensure we return the shipment data directly if it's wrapped in a shipment key
|
||||
if isinstance(response, dict) and "shipment" in response:
|
||||
return response["shipment"]
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error("Error getting shipment detail",
|
||||
@@ -320,7 +368,7 @@ class DistributionServiceClient(BaseServiceClient):
|
||||
}
|
||||
|
||||
response = await self.put(
|
||||
f"tenants/{tenant_id}/distribution/shipments/{shipment_id}/status",
|
||||
f"distribution/shipments/{shipment_id}/status",
|
||||
data=payload,
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
@@ -343,57 +391,8 @@ class DistributionServiceClient(BaseServiceClient):
|
||||
# INTERNAL DEMO ENDPOINTS
|
||||
# ================================================================
|
||||
|
||||
async def setup_enterprise_distribution_demo(
|
||||
self,
|
||||
parent_tenant_id: str,
|
||||
child_tenant_ids: List[str],
|
||||
session_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Internal endpoint to setup distribution for enterprise demo
|
||||
|
||||
Args:
|
||||
parent_tenant_id: Parent tenant ID
|
||||
child_tenant_ids: List of child tenant IDs
|
||||
session_id: Demo session ID
|
||||
|
||||
Returns:
|
||||
Distribution setup result
|
||||
"""
|
||||
try:
|
||||
url = f"{self.service_base_url}/api/v1/internal/demo/setup"
|
||||
|
||||
async with self.get_http_client() as client:
|
||||
response = await client.post(
|
||||
url,
|
||||
json={
|
||||
"parent_tenant_id": parent_tenant_id,
|
||||
"child_tenant_ids": child_tenant_ids,
|
||||
"session_id": session_id
|
||||
},
|
||||
headers={
|
||||
"X-Internal-API-Key": self.config.INTERNAL_API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
logger.info("Setup enterprise distribution demo",
|
||||
parent_tenant_id=parent_tenant_id,
|
||||
child_count=len(child_tenant_ids))
|
||||
return result
|
||||
else:
|
||||
logger.error("Failed to setup enterprise distribution demo",
|
||||
status_code=response.status_code,
|
||||
response_text=response.text)
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error setting up enterprise distribution demo",
|
||||
parent_tenant_id=parent_tenant_id,
|
||||
error=str(e))
|
||||
return None
|
||||
# Legacy setup_enterprise_distribution_demo method removed
|
||||
# Distribution now uses standard /internal/demo/clone endpoint via DataCloner
|
||||
|
||||
async def get_shipments_for_date(
|
||||
self,
|
||||
@@ -411,21 +410,45 @@ class DistributionServiceClient(BaseServiceClient):
|
||||
List of shipments for the date
|
||||
"""
|
||||
try:
|
||||
response = await self.get(
|
||||
# Use _make_request directly to construct correct URL
|
||||
# Gateway route: /api/v1/tenants/{tenant_id}/distribution/{path}
|
||||
response = await self._make_request(
|
||||
"GET",
|
||||
f"tenants/{tenant_id}/distribution/shipments",
|
||||
params={
|
||||
"date_from": target_date.isoformat(),
|
||||
"date_to": target_date.isoformat()
|
||||
},
|
||||
tenant_id=tenant_id
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
if response:
|
||||
logger.info("Retrieved shipments for date",
|
||||
tenant_id=tenant_id,
|
||||
target_date=target_date.isoformat(),
|
||||
shipment_count=len(response.get("shipments", [])))
|
||||
return response.get("shipments", []) if response else []
|
||||
# Handle different response formats
|
||||
if isinstance(response, list):
|
||||
# Direct list of shipments
|
||||
logger.info("Retrieved shipments for date",
|
||||
tenant_id=tenant_id,
|
||||
target_date=target_date.isoformat(),
|
||||
shipment_count=len(response))
|
||||
return response
|
||||
elif isinstance(response, dict):
|
||||
# Response wrapped in shipments key
|
||||
if "shipments" in response:
|
||||
logger.info("Retrieved shipments for date",
|
||||
tenant_id=tenant_id,
|
||||
target_date=target_date.isoformat(),
|
||||
shipment_count=len(response.get("shipments", [])))
|
||||
return response.get("shipments", [])
|
||||
else:
|
||||
# Return the whole dict if it's a single shipment
|
||||
logger.info("Retrieved shipments for date",
|
||||
tenant_id=tenant_id,
|
||||
target_date=target_date.isoformat(),
|
||||
shipment_count=1)
|
||||
return [response]
|
||||
logger.info("No shipments found for date",
|
||||
tenant_id=tenant_id,
|
||||
target_date=target_date.isoformat())
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error("Error getting shipments for date",
|
||||
tenant_id=tenant_id,
|
||||
@@ -451,4 +474,4 @@ class DistributionServiceClient(BaseServiceClient):
|
||||
# Factory function for dependency injection
|
||||
def create_distribution_client(config: BaseServiceSettings, service_name: str = "unknown") -> DistributionServiceClient:
|
||||
"""Create distribution service client instance"""
|
||||
return DistributionServiceClient(config, service_name)
|
||||
return DistributionServiceClient(config, service_name)
|
||||
|
||||
@@ -420,9 +420,12 @@ class ForecastServiceClient(BaseServiceClient):
|
||||
if product_id:
|
||||
params["product_id"] = product_id
|
||||
|
||||
return await self.get(
|
||||
"forecasting/enterprise/aggregated",
|
||||
tenant_id=parent_tenant_id,
|
||||
# Use _make_request directly because the base_service_client adds /tenants/{tenant_id}/ prefix
|
||||
# Gateway route is: /api/v1/tenants/{tenant_id}/forecasting/enterprise/{path}
|
||||
# So we need the full path without tenant_id parameter to avoid double prefixing
|
||||
return await self._make_request(
|
||||
"GET",
|
||||
f"tenants/{parent_tenant_id}/forecasting/enterprise/aggregated",
|
||||
params=params
|
||||
)
|
||||
|
||||
|
||||
@@ -655,6 +655,53 @@ class InventoryServiceClient(BaseServiceClient):
|
||||
# DASHBOARD METHODS
|
||||
# ================================================================
|
||||
|
||||
async def get_inventory_summary_batch(
|
||||
self,
|
||||
tenant_ids: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get inventory summaries for multiple tenants in a single request.
|
||||
|
||||
Phase 2 optimization: Eliminates N+1 query patterns for enterprise dashboards.
|
||||
|
||||
Args:
|
||||
tenant_ids: List of tenant IDs to fetch
|
||||
|
||||
Returns:
|
||||
Dict mapping tenant_id -> inventory summary
|
||||
"""
|
||||
try:
|
||||
if not tenant_ids:
|
||||
return {}
|
||||
|
||||
if len(tenant_ids) > 100:
|
||||
logger.warning("Batch request exceeds max tenant limit", requested=len(tenant_ids))
|
||||
tenant_ids = tenant_ids[:100]
|
||||
|
||||
result = await self.post(
|
||||
"inventory/batch/inventory-summary",
|
||||
data={"tenant_ids": tenant_ids},
|
||||
tenant_id=tenant_ids[0] # Use first tenant for auth context
|
||||
)
|
||||
|
||||
summaries = result if isinstance(result, dict) else {}
|
||||
|
||||
logger.info(
|
||||
"Batch retrieved inventory summaries",
|
||||
requested=len(tenant_ids),
|
||||
found=len(summaries)
|
||||
)
|
||||
|
||||
return summaries
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error batch fetching inventory summaries",
|
||||
error=str(e),
|
||||
tenant_count=len(tenant_ids)
|
||||
)
|
||||
return {}
|
||||
|
||||
async def get_stock_status(
|
||||
self,
|
||||
tenant_id: str
|
||||
@@ -692,7 +739,7 @@ class InventoryServiceClient(BaseServiceClient):
|
||||
"""
|
||||
try:
|
||||
return await self.get(
|
||||
"/inventory/sustainability/widget",
|
||||
"/sustainability/widget",
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
except Exception as e:
|
||||
|
||||
@@ -138,7 +138,8 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
async def get_pending_purchase_orders(
|
||||
self,
|
||||
tenant_id: str,
|
||||
limit: int = 50
|
||||
limit: int = 50,
|
||||
enrich_supplier: bool = True
|
||||
) -> Optional[List[Dict[str, Any]]]:
|
||||
"""
|
||||
Get pending purchase orders
|
||||
@@ -146,6 +147,8 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
limit: Maximum number of results
|
||||
enrich_supplier: Whether to include supplier details (default: True)
|
||||
Set to False for faster queries when supplier data will be fetched separately
|
||||
|
||||
Returns:
|
||||
List of pending purchase orders
|
||||
@@ -153,14 +156,19 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
try:
|
||||
response = await self.get(
|
||||
"procurement/purchase-orders",
|
||||
params={"status": "pending_approval", "limit": limit},
|
||||
params={
|
||||
"status": "pending_approval",
|
||||
"limit": limit,
|
||||
"enrich_supplier": enrich_supplier
|
||||
},
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
|
||||
if response:
|
||||
logger.info("Retrieved pending purchase orders",
|
||||
tenant_id=tenant_id,
|
||||
count=len(response))
|
||||
count=len(response),
|
||||
enriched=enrich_supplier)
|
||||
return response if response else []
|
||||
except Exception as e:
|
||||
logger.error("Error getting pending purchase orders",
|
||||
@@ -168,6 +176,60 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
error=str(e))
|
||||
return []
|
||||
|
||||
async def get_purchase_orders_by_supplier(
|
||||
self,
|
||||
tenant_id: str,
|
||||
supplier_id: str,
|
||||
date_from: Optional[date] = None,
|
||||
date_to: Optional[date] = None,
|
||||
status: Optional[str] = None,
|
||||
limit: int = 100
|
||||
) -> Optional[List[Dict[str, Any]]]:
|
||||
"""
|
||||
Get purchase orders for a specific supplier
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
supplier_id: Supplier ID to filter by
|
||||
date_from: Start date for filtering
|
||||
date_to: End date for filtering
|
||||
status: Status filter (e.g., 'approved', 'delivered')
|
||||
limit: Maximum number of results
|
||||
|
||||
Returns:
|
||||
List of purchase orders with items
|
||||
"""
|
||||
try:
|
||||
params = {
|
||||
"supplier_id": supplier_id,
|
||||
"limit": limit
|
||||
}
|
||||
if date_from:
|
||||
params["date_from"] = date_from.isoformat()
|
||||
if date_to:
|
||||
params["date_to"] = date_to.isoformat()
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
response = await self.get(
|
||||
"procurement/purchase-orders",
|
||||
params=params,
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
if response:
|
||||
logger.info("Retrieved purchase orders by supplier",
|
||||
tenant_id=tenant_id,
|
||||
supplier_id=supplier_id,
|
||||
count=len(response))
|
||||
return response if response else []
|
||||
except Exception as e:
|
||||
logger.error("Error getting purchase orders by supplier",
|
||||
tenant_id=tenant_id,
|
||||
supplier_id=supplier_id,
|
||||
error=str(e))
|
||||
return []
|
||||
|
||||
# ================================================================
|
||||
# INTERNAL TRANSFER ENDPOINTS (NEW FOR ENTERPRISE TIER)
|
||||
# ================================================================
|
||||
|
||||
@@ -449,6 +449,53 @@ class ProductionServiceClient(BaseServiceClient):
|
||||
# DASHBOARD METHODS
|
||||
# ================================================================
|
||||
|
||||
async def get_production_summary_batch(
|
||||
self,
|
||||
tenant_ids: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get production summaries for multiple tenants in a single request.
|
||||
|
||||
Phase 2 optimization: Eliminates N+1 query patterns for enterprise dashboards.
|
||||
|
||||
Args:
|
||||
tenant_ids: List of tenant IDs to fetch
|
||||
|
||||
Returns:
|
||||
Dict mapping tenant_id -> production summary
|
||||
"""
|
||||
try:
|
||||
if not tenant_ids:
|
||||
return {}
|
||||
|
||||
if len(tenant_ids) > 100:
|
||||
logger.warning("Batch request exceeds max tenant limit", requested=len(tenant_ids))
|
||||
tenant_ids = tenant_ids[:100]
|
||||
|
||||
result = await self.post(
|
||||
"production/batch/production-summary",
|
||||
data={"tenant_ids": tenant_ids},
|
||||
tenant_id=tenant_ids[0] # Use first tenant for auth context
|
||||
)
|
||||
|
||||
summaries = result if isinstance(result, dict) else {}
|
||||
|
||||
logger.info(
|
||||
"Batch retrieved production summaries",
|
||||
requested=len(tenant_ids),
|
||||
found=len(summaries)
|
||||
)
|
||||
|
||||
return summaries
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error batch fetching production summaries",
|
||||
error=str(e),
|
||||
tenant_count=len(tenant_ids)
|
||||
)
|
||||
return {}
|
||||
|
||||
async def get_todays_batches(
|
||||
self,
|
||||
tenant_id: str
|
||||
|
||||
@@ -215,6 +215,65 @@ class SalesServiceClient(BaseServiceClient):
|
||||
params=params
|
||||
)
|
||||
|
||||
async def get_sales_summary_batch(
|
||||
self,
|
||||
tenant_ids: List[str],
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get sales summaries for multiple tenants in a single request.
|
||||
|
||||
Phase 2 optimization: Eliminates N+1 query patterns for enterprise dashboards.
|
||||
|
||||
Args:
|
||||
tenant_ids: List of tenant IDs to fetch
|
||||
start_date: Start date for summary range
|
||||
end_date: End date for summary range
|
||||
|
||||
Returns:
|
||||
Dict mapping tenant_id -> sales summary
|
||||
"""
|
||||
try:
|
||||
if not tenant_ids:
|
||||
return {}
|
||||
|
||||
if len(tenant_ids) > 100:
|
||||
logger.warning("Batch request exceeds max tenant limit", requested=len(tenant_ids))
|
||||
tenant_ids = tenant_ids[:100]
|
||||
|
||||
data = {
|
||||
"tenant_ids": tenant_ids,
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat()
|
||||
}
|
||||
|
||||
result = await self.post(
|
||||
"sales/batch/sales-summary",
|
||||
data=data,
|
||||
tenant_id=tenant_ids[0] # Use first tenant for auth context
|
||||
)
|
||||
|
||||
summaries = result if isinstance(result, dict) else {}
|
||||
|
||||
logger.info(
|
||||
"Batch retrieved sales summaries",
|
||||
requested=len(tenant_ids),
|
||||
found=len(summaries),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat()
|
||||
)
|
||||
|
||||
return summaries
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error batch fetching sales summaries",
|
||||
error=str(e),
|
||||
tenant_count=len(tenant_ids)
|
||||
)
|
||||
return {}
|
||||
|
||||
# ================================================================
|
||||
# DATA IMPORT
|
||||
# ================================================================
|
||||
|
||||
@@ -62,17 +62,54 @@ class SuppliersServiceClient(BaseServiceClient):
|
||||
params["search_term"] = search
|
||||
if category:
|
||||
params["supplier_type"] = category
|
||||
|
||||
|
||||
result = await self.get("suppliers", tenant_id=tenant_id, params=params)
|
||||
suppliers = result if result else []
|
||||
logger.info("Searched suppliers from suppliers service",
|
||||
logger.info("Searched suppliers from suppliers service",
|
||||
search_term=search, suppliers_count=len(suppliers), tenant_id=tenant_id)
|
||||
return suppliers
|
||||
except Exception as e:
|
||||
logger.error("Error searching suppliers",
|
||||
logger.error("Error searching suppliers",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return []
|
||||
|
||||
|
||||
async def get_suppliers_batch(self, tenant_id: str, supplier_ids: List[str]) -> Optional[List[Dict[str, Any]]]:
|
||||
"""
|
||||
Get multiple suppliers in a single request for performance optimization.
|
||||
|
||||
This method eliminates N+1 query patterns when fetching supplier data
|
||||
for multiple purchase orders or other entities.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
supplier_ids: List of supplier IDs to fetch
|
||||
|
||||
Returns:
|
||||
List of supplier dictionaries or empty list if error
|
||||
"""
|
||||
try:
|
||||
if not supplier_ids:
|
||||
return []
|
||||
|
||||
# Join IDs as comma-separated string
|
||||
ids_param = ",".join(supplier_ids)
|
||||
params = {"ids": ids_param}
|
||||
|
||||
result = await self.get("suppliers/batch", tenant_id=tenant_id, params=params)
|
||||
suppliers = result if result else []
|
||||
|
||||
logger.info("Batch retrieved suppliers from suppliers service",
|
||||
requested_count=len(supplier_ids),
|
||||
found_count=len(suppliers),
|
||||
tenant_id=tenant_id)
|
||||
return suppliers
|
||||
except Exception as e:
|
||||
logger.error("Error batch retrieving suppliers",
|
||||
error=str(e),
|
||||
requested_count=len(supplier_ids),
|
||||
tenant_id=tenant_id)
|
||||
return []
|
||||
|
||||
# ================================================================
|
||||
# SUPPLIER RECOMMENDATIONS
|
||||
# ================================================================
|
||||
@@ -107,186 +144,7 @@ class SuppliersServiceClient(BaseServiceClient):
|
||||
logger.error("Error getting best supplier for ingredient",
|
||||
error=str(e), ingredient_id=ingredient_id, tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# PURCHASE ORDER MANAGEMENT
|
||||
# ================================================================
|
||||
|
||||
async def create_purchase_order(self, tenant_id: str, order_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""Create a new purchase order"""
|
||||
try:
|
||||
result = await self.post("suppliers/purchase-orders", data=order_data, tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Created purchase order",
|
||||
order_id=result.get('id'),
|
||||
supplier_id=order_data.get('supplier_id'),
|
||||
tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error creating purchase order",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
async def get_purchase_orders(self, tenant_id: str, status: Optional[str] = None, supplier_id: Optional[str] = None) -> Optional[List[Dict[str, Any]]]:
|
||||
"""Get purchase orders with optional filtering"""
|
||||
try:
|
||||
params = {}
|
||||
if status:
|
||||
params["status"] = status
|
||||
if supplier_id:
|
||||
params["supplier_id"] = supplier_id
|
||||
|
||||
result = await self.get("suppliers/purchase-orders", tenant_id=tenant_id, params=params)
|
||||
orders = result.get('orders', []) if result else []
|
||||
logger.info("Retrieved purchase orders from suppliers service",
|
||||
orders_count=len(orders), tenant_id=tenant_id)
|
||||
return orders
|
||||
except Exception as e:
|
||||
logger.error("Error getting purchase orders",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return []
|
||||
|
||||
async def update_purchase_order_status(self, tenant_id: str, order_id: str, status: str) -> Optional[Dict[str, Any]]:
|
||||
"""Update purchase order status"""
|
||||
try:
|
||||
data = {"status": status}
|
||||
result = await self.put(f"suppliers/purchase-orders/{order_id}/status", data=data, tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Updated purchase order status",
|
||||
order_id=order_id, status=status, tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error updating purchase order status",
|
||||
error=str(e), order_id=order_id, tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
async def approve_purchase_order(
|
||||
self,
|
||||
tenant_id: str,
|
||||
po_id: str,
|
||||
approval_data: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Auto-approve a purchase order
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
po_id: Purchase Order ID
|
||||
approval_data: Approval data including:
|
||||
- approved_by: User ID or "system" for auto-approval
|
||||
- approval_notes: Notes about the approval
|
||||
- auto_approved: Boolean flag indicating auto-approval
|
||||
- approval_reasons: List of reasons for auto-approval
|
||||
|
||||
Returns:
|
||||
Updated purchase order data or None
|
||||
"""
|
||||
try:
|
||||
# Format the approval request payload
|
||||
payload = {
|
||||
"action": "approve",
|
||||
"notes": approval_data.get("approval_notes", "Auto-approved by system")
|
||||
}
|
||||
|
||||
result = await self.post(
|
||||
f"suppliers/purchase-orders/{po_id}/approve",
|
||||
data=payload,
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
if result:
|
||||
logger.info("Auto-approved purchase order",
|
||||
po_id=po_id,
|
||||
tenant_id=tenant_id,
|
||||
auto_approved=approval_data.get("auto_approved", True))
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error auto-approving purchase order",
|
||||
error=str(e),
|
||||
po_id=po_id,
|
||||
tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
async def get_supplier(self, tenant_id: str, supplier_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get supplier details with performance metrics
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
supplier_id: Supplier ID
|
||||
|
||||
Returns:
|
||||
Supplier data including performance metrics or None
|
||||
"""
|
||||
try:
|
||||
# Use the existing get_supplier_by_id method which returns full supplier data
|
||||
result = await self.get_supplier_by_id(tenant_id, supplier_id)
|
||||
|
||||
if result:
|
||||
logger.info("Retrieved supplier data for auto-approval",
|
||||
supplier_id=supplier_id,
|
||||
tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error getting supplier data",
|
||||
error=str(e),
|
||||
supplier_id=supplier_id,
|
||||
tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# DELIVERY MANAGEMENT
|
||||
# ================================================================
|
||||
|
||||
async def get_deliveries(self, tenant_id: str, status: Optional[str] = None, date: Optional[str] = None) -> Optional[List[Dict[str, Any]]]:
|
||||
"""Get deliveries with optional filtering"""
|
||||
try:
|
||||
params = {}
|
||||
if status:
|
||||
params["status"] = status
|
||||
if date:
|
||||
params["date"] = date
|
||||
|
||||
result = await self.get("suppliers/deliveries", tenant_id=tenant_id, params=params)
|
||||
deliveries = result.get('deliveries', []) if result else []
|
||||
logger.info("Retrieved deliveries from suppliers service",
|
||||
deliveries_count=len(deliveries), tenant_id=tenant_id)
|
||||
return deliveries
|
||||
except Exception as e:
|
||||
logger.error("Error getting deliveries",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return []
|
||||
|
||||
async def update_delivery_status(self, tenant_id: str, delivery_id: str, status: str, notes: Optional[str] = None) -> Optional[Dict[str, Any]]:
|
||||
"""Update delivery status"""
|
||||
try:
|
||||
data = {"status": status}
|
||||
if notes:
|
||||
data["notes"] = notes
|
||||
|
||||
result = await self.put(f"suppliers/deliveries/{delivery_id}/status", data=data, tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Updated delivery status",
|
||||
delivery_id=delivery_id, status=status, tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error updating delivery status",
|
||||
error=str(e), delivery_id=delivery_id, tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
async def get_supplier_order_summaries(self, tenant_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get supplier order summaries for central bakery dashboard"""
|
||||
try:
|
||||
result = await self.get("suppliers/dashboard/order-summaries", tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Retrieved supplier order summaries from suppliers service",
|
||||
tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error getting supplier order summaries",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# PERFORMANCE TRACKING
|
||||
# ================================================================
|
||||
|
||||
@@ -310,7 +310,9 @@ class TenantServiceClient(BaseServiceClient):
|
||||
List of child tenant dictionaries
|
||||
"""
|
||||
try:
|
||||
result = await self.get("children", tenant_id=parent_tenant_id)
|
||||
# Use _make_request directly to avoid double tenant_id in URL
|
||||
# The gateway expects: /api/v1/tenants/{tenant_id}/children
|
||||
result = await self._make_request("GET", f"tenants/{parent_tenant_id}/children")
|
||||
if result:
|
||||
logger.info("Retrieved child tenants",
|
||||
parent_tenant_id=parent_tenant_id,
|
||||
|
||||
@@ -238,7 +238,7 @@ class BaseServiceSettings(BaseSettings):
|
||||
POS_SERVICE_URL: str = os.getenv("POS_SERVICE_URL", "http://pos-service:8000")
|
||||
NOMINATIM_SERVICE_URL: str = os.getenv("NOMINATIM_SERVICE_URL", "http://nominatim:8080")
|
||||
DEMO_SESSION_SERVICE_URL: str = os.getenv("DEMO_SESSION_SERVICE_URL", "http://demo-session-service:8000")
|
||||
ALERT_PROCESSOR_SERVICE_URL: str = os.getenv("ALERT_PROCESSOR_SERVICE_URL", "http://alert-processor-api:8010")
|
||||
ALERT_PROCESSOR_SERVICE_URL: str = os.getenv("ALERT_PROCESSOR_SERVICE_URL", "http://alert-processor:8000")
|
||||
PROCUREMENT_SERVICE_URL: str = os.getenv("PROCUREMENT_SERVICE_URL", "http://procurement-service:8000")
|
||||
ORCHESTRATOR_SERVICE_URL: str = os.getenv("ORCHESTRATOR_SERVICE_URL", "http://orchestrator-service:8000")
|
||||
AI_INSIGHTS_SERVICE_URL: str = os.getenv("AI_INSIGHTS_SERVICE_URL", "http://ai-insights-service:8000")
|
||||
|
||||
191
shared/messaging/README.md
Normal file
191
shared/messaging/README.md
Normal file
@@ -0,0 +1,191 @@
|
||||
# Unified Messaging Architecture
|
||||
|
||||
This document describes the standardized messaging system used across all bakery-ia microservices.
|
||||
|
||||
## Overview
|
||||
|
||||
The unified messaging architecture provides a consistent approach for:
|
||||
- Publishing business events (inventory changes, user actions, etc.)
|
||||
- Publishing user-facing alerts, notifications, and recommendations
|
||||
- Consuming events from other services
|
||||
- Maintaining service-to-service communication patterns
|
||||
|
||||
## Core Components
|
||||
|
||||
### 1. UnifiedEventPublisher
|
||||
The main publisher for all event types, located in `shared/messaging/messaging_client.py`:
|
||||
|
||||
```python
|
||||
from shared.messaging import UnifiedEventPublisher, EVENT_TYPES, RabbitMQClient
|
||||
|
||||
# Initialize
|
||||
rabbitmq_client = RabbitMQClient(settings.RABBITMQ_URL, service_name="my-service")
|
||||
await rabbitmq_client.connect()
|
||||
event_publisher = UnifiedEventPublisher(rabbitmq_client, "my-service")
|
||||
|
||||
# Publish business events
|
||||
await event_publisher.publish_business_event(
|
||||
event_type=EVENT_TYPES.INVENTORY.STOCK_ADDED,
|
||||
tenant_id=tenant_id,
|
||||
data={"ingredient_id": "123", "quantity": 100.0}
|
||||
)
|
||||
|
||||
# Publish alerts (action required)
|
||||
await event_publisher.publish_alert(
|
||||
event_type="procurement.po_approval_needed",
|
||||
tenant_id=tenant_id,
|
||||
severity="high", # urgent, high, medium, low
|
||||
data={"po_id": "456", "supplier_name": "ABC Corp"}
|
||||
)
|
||||
|
||||
# Publish notifications (informational)
|
||||
await event_publisher.publish_notification(
|
||||
event_type="production.batch_completed",
|
||||
tenant_id=tenant_id,
|
||||
data={"batch_id": "789", "product_name": "Bread"}
|
||||
)
|
||||
|
||||
# Publish recommendations (suggestions)
|
||||
await event_publisher.publish_recommendation(
|
||||
event_type="forecasting.demand_surge_predicted",
|
||||
tenant_id=tenant_id,
|
||||
data={"product_name": "Croissants", "surge_percentage": 25.0}
|
||||
)
|
||||
```
|
||||
|
||||
### 2. Event Types Constants
|
||||
Use predefined event types for consistency:
|
||||
|
||||
```python
|
||||
from shared.messaging import EVENT_TYPES
|
||||
|
||||
# Inventory events
|
||||
EVENT_TYPES.INVENTORY.INGREDIENT_CREATED
|
||||
EVENT_TYPES.INVENTORY.STOCK_ADDED
|
||||
EVENT_TYPES.INVENTORY.LOW_STOCK_ALERT
|
||||
|
||||
# Production events
|
||||
EVENT_TYPES.PRODUCTION.BATCH_CREATED
|
||||
EVENT_TYPES.PRODUCTION.BATCH_COMPLETED
|
||||
|
||||
# Procurement events
|
||||
EVENT_TYPES.PROCUREMENT.PO_APPROVED
|
||||
EVENT_TYPES.PROCUREMENT.DELIVERY_SCHEDULED
|
||||
```
|
||||
|
||||
### 3. Service Integration Pattern
|
||||
|
||||
#### In Service Main.py:
|
||||
```python
|
||||
from shared.messaging import UnifiedEventPublisher, ServiceMessagingManager
|
||||
|
||||
class MyService(StandardFastAPIService):
|
||||
def __init__(self):
|
||||
self.messaging_manager = None
|
||||
self.event_publisher = None # For alerts/notifications
|
||||
self.unified_publisher = None # For business events
|
||||
|
||||
super().__init__(
|
||||
service_name="my-service",
|
||||
# ... other params
|
||||
enable_messaging=True
|
||||
)
|
||||
|
||||
async def _setup_messaging(self):
|
||||
try:
|
||||
self.messaging_manager = ServiceMessagingManager("my-service", settings.RABBITMQ_URL)
|
||||
success = await self.messaging_manager.setup()
|
||||
if success:
|
||||
self.event_publisher = self.messaging_manager.publisher
|
||||
self.unified_publisher = self.messaging_manager.publisher
|
||||
|
||||
self.logger.info("Messaging setup completed")
|
||||
else:
|
||||
raise Exception("Failed to setup messaging")
|
||||
except Exception as e:
|
||||
self.logger.error("Messaging setup failed", error=str(e))
|
||||
raise
|
||||
|
||||
async def on_startup(self, app: FastAPI):
|
||||
await super().on_startup(app)
|
||||
|
||||
# Pass publishers to services
|
||||
my_service = MyAlertService(self.event_publisher)
|
||||
my_event_service = MyEventService(self.unified_publisher)
|
||||
|
||||
# Store in app state if needed
|
||||
app.state.my_service = my_service
|
||||
app.state.my_event_service = my_event_service
|
||||
|
||||
async def on_shutdown(self, app: FastAPI):
|
||||
if self.messaging_manager:
|
||||
await self.messaging_manager.cleanup()
|
||||
await super().on_shutdown(app)
|
||||
```
|
||||
|
||||
#### In Service Implementation:
|
||||
```python
|
||||
from shared.messaging import UnifiedEventPublisher
|
||||
|
||||
class MyEventService:
|
||||
def __init__(self, event_publisher: UnifiedEventPublisher):
|
||||
self.publisher = event_publisher
|
||||
|
||||
async def handle_business_logic(self, tenant_id: UUID, data: Dict[str, Any]):
|
||||
# Publish business events
|
||||
await self.publisher.publish_business_event(
|
||||
event_type="mydomain.action_performed",
|
||||
tenant_id=tenant_id,
|
||||
data=data
|
||||
)
|
||||
```
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### Old Pattern (Deprecated):
|
||||
```python
|
||||
# OLD - Don't use this anymore
|
||||
from shared.alerts.base_service import BaseAlertService
|
||||
|
||||
class MyService(BaseAlertService):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
|
||||
async def send_alert(self, tenant_id, data):
|
||||
await self.publish_item(tenant_id, data, item_type="alert")
|
||||
```
|
||||
|
||||
### New Pattern (Recommended):
|
||||
```python
|
||||
# NEW - Use UnifiedEventPublisher for all event types
|
||||
from shared.messaging import UnifiedEventPublisher
|
||||
|
||||
class MyService:
|
||||
def __init__(self, event_publisher: UnifiedEventPublisher):
|
||||
self.publisher = event_publisher
|
||||
|
||||
async def send_alert(self, tenant_id: UUID, data: Dict[str, Any]):
|
||||
await self.publisher.publish_alert(
|
||||
event_type="mydomain.alert_type",
|
||||
tenant_id=tenant_id,
|
||||
severity="high",
|
||||
data=data
|
||||
)
|
||||
```
|
||||
|
||||
## Event Routing
|
||||
|
||||
Events are routed using the following patterns:
|
||||
- **Alerts**: `alert.{domain}.{severity}` (e.g., `alert.inventory.high`)
|
||||
- **Notifications**: `notification.{domain}.info` (e.g., `notification.production.info`)
|
||||
- **Recommendations**: `recommendation.{domain}.medium` (e.g., `recommendation.forecasting.medium`)
|
||||
- **Business Events**: `business.{event_type}` (e.g., `business.inventory_stock_added`)
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Consistent Naming**: Use lowercase, dot-separated event types (e.g., `inventory.stock.added`)
|
||||
2. **Tenant Awareness**: Always include tenant_id for multi-tenant operations
|
||||
3. **Data Minimization**: Include only essential data in events
|
||||
4. **Error Handling**: Always wrap event publishing in try-catch blocks
|
||||
5. **Service Names**: Use consistent service names matching your service definition
|
||||
6. **Lifecycle Management**: Always clean up messaging resources during service shutdown
|
||||
@@ -0,0 +1,21 @@
|
||||
from .messaging_client import (
|
||||
RabbitMQClient,
|
||||
UnifiedEventPublisher,
|
||||
ServiceMessagingManager,
|
||||
initialize_service_publisher,
|
||||
cleanup_service_publisher,
|
||||
EventMessage,
|
||||
EventType,
|
||||
EVENT_TYPES
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'RabbitMQClient',
|
||||
'UnifiedEventPublisher',
|
||||
'ServiceMessagingManager',
|
||||
'initialize_service_publisher',
|
||||
'cleanup_service_publisher',
|
||||
'EventMessage',
|
||||
'EventType',
|
||||
'EVENT_TYPES'
|
||||
]
|
||||
@@ -1,141 +0,0 @@
|
||||
"""
|
||||
shared/messaging/events.py
|
||||
Event definitions for microservices communication
|
||||
"""
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, Any, Optional
|
||||
import uuid
|
||||
|
||||
class BaseEvent:
|
||||
"""Base event class - FIXED"""
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], event_type: str = "", correlation_id: Optional[str] = None):
|
||||
self.service_name = service_name
|
||||
self.data = data
|
||||
self.event_type = event_type
|
||||
self.event_id = str(uuid.uuid4())
|
||||
self.timestamp = datetime.now(timezone.utc)
|
||||
self.correlation_id = correlation_id
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Converts the event object to a dictionary for JSON serialization - FIXED"""
|
||||
return {
|
||||
"service_name": self.service_name,
|
||||
"data": self.data,
|
||||
"event_type": self.event_type,
|
||||
"event_id": self.event_id,
|
||||
"timestamp": self.timestamp.isoformat(), # Convert datetime to ISO string
|
||||
"correlation_id": self.correlation_id
|
||||
}
|
||||
|
||||
# Auth Events - FIXED
|
||||
class UserRegisteredEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="user.registered",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
class UserLoginEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="user.login",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
class UserLogoutEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="user.logout",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
# Training Events
|
||||
class TrainingStartedEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="training.started",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
class TrainingCompletedEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="training.completed",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
class TrainingFailedEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="training.failed",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
# Forecasting Events
|
||||
class ForecastGeneratedEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="forecast.generated",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
# Data Events
|
||||
class DataImportedEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="data.imported",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
# Procurement Events
|
||||
class PurchaseOrderApprovedEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="po.approved",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
class PurchaseOrderRejectedEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="po.rejected",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
class PurchaseOrderSentToSupplierEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="po.sent_to_supplier",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
|
||||
class DeliveryReceivedEvent(BaseEvent):
|
||||
def __init__(self, service_name: str, data: Dict[str, Any], correlation_id: Optional[str] = None):
|
||||
super().__init__(
|
||||
service_name=service_name,
|
||||
data=data,
|
||||
event_type="delivery.received",
|
||||
correlation_id=correlation_id
|
||||
)
|
||||
642
shared/messaging/messaging_client.py
Normal file
642
shared/messaging/messaging_client.py
Normal file
@@ -0,0 +1,642 @@
|
||||
"""
|
||||
Unified RabbitMQ Client and Publisher for Bakery-IA Services
|
||||
|
||||
This module provides a standardized approach for all services to connect to RabbitMQ,
|
||||
publish messages, and handle messaging lifecycle. It combines all messaging
|
||||
functionality into a single, unified interface.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Dict, Any, Callable, Optional, Union
|
||||
from datetime import datetime, date, timezone
|
||||
import uuid
|
||||
import structlog
|
||||
from contextlib import suppress
|
||||
from enum import Enum
|
||||
|
||||
try:
|
||||
import aio_pika
|
||||
from aio_pika import connect_robust, Message, DeliveryMode, ExchangeType
|
||||
AIO_PIKA_AVAILABLE = True
|
||||
except ImportError:
|
||||
AIO_PIKA_AVAILABLE = False
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class EventType(Enum):
|
||||
"""Event type enum for consistent event classification"""
|
||||
BUSINESS = "business" # Business events like inventory changes, user actions
|
||||
ALERT = "alert" # User-facing alerts requiring action
|
||||
NOTIFICATION = "notification" # User-facing informational notifications
|
||||
RECOMMENDATION = "recommendation" # User-facing recommendations
|
||||
SYSTEM = "system" # System-level events
|
||||
|
||||
|
||||
class EVENT_TYPES:
|
||||
"""Static class for event type constants"""
|
||||
class INVENTORY:
|
||||
INGREDIENT_CREATED = "inventory.ingredient.created"
|
||||
STOCK_ADDED = "inventory.stock.added"
|
||||
STOCK_CONSUMED = "inventory.stock.consumed"
|
||||
LOW_STOCK_ALERT = "inventory.alert.low_stock"
|
||||
EXPIRATION_ALERT = "inventory.alert.expiration"
|
||||
STOCK_UPDATED = "inventory.stock.updated"
|
||||
STOCK_TRANSFERRED = "inventory.stock.transferred"
|
||||
STOCK_WASTED = "inventory.stock.wasted"
|
||||
|
||||
class PRODUCTION:
|
||||
BATCH_CREATED = "production.batch.created"
|
||||
BATCH_STARTED = "production.batch.started"
|
||||
BATCH_COMPLETED = "production.batch.completed"
|
||||
EQUIPMENT_STATUS_CHANGED = "production.equipment.status_changed"
|
||||
|
||||
class PROCUREMENT:
|
||||
PO_CREATED = "procurement.po.created"
|
||||
PO_APPROVED = "procurement.po.approved"
|
||||
PO_REJECTED = "procurement.po.rejected"
|
||||
DELIVERY_SCHEDULED = "procurement.delivery.scheduled"
|
||||
DELIVERY_RECEIVED = "procurement.delivery.received"
|
||||
DELIVERY_OVERDUE = "procurement.delivery.overdue"
|
||||
|
||||
class FORECASTING:
|
||||
FORECAST_GENERATED = "forecasting.forecast.generated"
|
||||
FORECAST_UPDATED = "forecasting.forecast.updated"
|
||||
DEMAND_SPIKE_DETECTED = "forecasting.demand.spike_detected"
|
||||
WEATHER_IMPACT_FORECAST = "forecasting.weather.impact_forecast"
|
||||
|
||||
class NOTIFICATION:
|
||||
NOTIFICATION_SENT = "notification.sent"
|
||||
NOTIFICATION_FAILED = "notification.failed"
|
||||
NOTIFICATION_DELIVERED = "notification.delivered"
|
||||
NOTIFICATION_OPENED = "notification.opened"
|
||||
|
||||
class TENANT:
|
||||
TENANT_CREATED = "tenant.created"
|
||||
TENANT_UPDATED = "tenant.updated"
|
||||
TENANT_DELETED = "tenant.deleted"
|
||||
TENANT_MEMBER_ADDED = "tenant.member.added"
|
||||
TENANT_MEMBER_REMOVED = "tenant.member.removed"
|
||||
|
||||
|
||||
def json_serializer(obj):
|
||||
"""JSON serializer for objects not serializable by default json code"""
|
||||
if isinstance(obj, (datetime, date)):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, uuid.UUID):
|
||||
return str(obj)
|
||||
elif hasattr(obj, '__class__') and obj.__class__.__name__ == 'Decimal':
|
||||
# Handle Decimal objects from SQLAlchemy without importing decimal
|
||||
return float(obj)
|
||||
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
|
||||
|
||||
|
||||
class RabbitMQHeartbeatMonitor:
|
||||
"""Monitor to ensure heartbeats are processed during heavy operations"""
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
self._monitor_task = None
|
||||
self._should_monitor = False
|
||||
|
||||
async def start_monitoring(self):
|
||||
"""Start heartbeat monitoring task"""
|
||||
if self._monitor_task and not self._monitor_task.done():
|
||||
return
|
||||
|
||||
self._should_monitor = True
|
||||
self._monitor_task = asyncio.create_task(self._monitor_loop())
|
||||
|
||||
async def stop_monitoring(self):
|
||||
"""Stop heartbeat monitoring task"""
|
||||
self._should_monitor = False
|
||||
if self._monitor_task and not self._monitor_task.done():
|
||||
self._monitor_task.cancel()
|
||||
with suppress(asyncio.CancelledError):
|
||||
await self._monitor_task
|
||||
|
||||
async def _monitor_loop(self):
|
||||
"""Monitor loop that periodically yields control for heartbeat processing"""
|
||||
while self._should_monitor:
|
||||
# Yield control to allow heartbeat processing
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# Verify connection is still alive
|
||||
if self.client.connection and not self.client.connection.is_closed:
|
||||
# Check if connection is still responsive
|
||||
try:
|
||||
# This is a lightweight check to ensure the connection is responsive
|
||||
pass # The heartbeat mechanism in aio_pika handles this internally
|
||||
except Exception as e:
|
||||
logger.warning("Connection check failed", error=str(e))
|
||||
self.client.connected = False
|
||||
break
|
||||
else:
|
||||
logger.warning("Connection is closed, stopping monitor")
|
||||
break
|
||||
|
||||
|
||||
class RabbitMQClient:
|
||||
"""
|
||||
Universal RabbitMQ client for all bakery-ia microservices
|
||||
Handles all messaging patterns with proper fallbacks
|
||||
"""
|
||||
|
||||
def __init__(self, connection_url: str, service_name: str = "unknown"):
|
||||
self.connection_url = connection_url
|
||||
self.service_name = service_name
|
||||
self.connection = None
|
||||
self.channel = None
|
||||
self.connected = False
|
||||
self._reconnect_attempts = 0
|
||||
self._max_reconnect_attempts = 5
|
||||
self.heartbeat_monitor = RabbitMQHeartbeatMonitor(self)
|
||||
|
||||
async def connect(self):
|
||||
"""Connect to RabbitMQ with retry logic"""
|
||||
if not AIO_PIKA_AVAILABLE:
|
||||
logger.warning("aio-pika not available, messaging disabled", service=self.service_name)
|
||||
return False
|
||||
|
||||
try:
|
||||
self.connection = await connect_robust(
|
||||
self.connection_url,
|
||||
heartbeat=600 # Increase heartbeat to 600 seconds (10 minutes) to prevent timeouts
|
||||
)
|
||||
self.channel = await self.connection.channel()
|
||||
await self.channel.set_qos(prefetch_count=100) # Performance optimization
|
||||
|
||||
self.connected = True
|
||||
self._reconnect_attempts = 0
|
||||
|
||||
# Start heartbeat monitoring
|
||||
await self.heartbeat_monitor.start_monitoring()
|
||||
|
||||
logger.info("Connected to RabbitMQ", service=self.service_name)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.connected = False
|
||||
self._reconnect_attempts += 1
|
||||
logger.warning(
|
||||
"Failed to connect to RabbitMQ",
|
||||
service=self.service_name,
|
||||
error=str(e),
|
||||
attempt=self._reconnect_attempts
|
||||
)
|
||||
return False
|
||||
|
||||
async def disconnect(self):
|
||||
"""Disconnect from RabbitMQ with proper channel cleanup"""
|
||||
try:
|
||||
# Stop heartbeat monitoring first
|
||||
await self.heartbeat_monitor.stop_monitoring()
|
||||
|
||||
# Close channel before connection to avoid "unexpected close" warnings
|
||||
if self.channel and not self.channel.is_closed:
|
||||
await self.channel.close()
|
||||
logger.debug("RabbitMQ channel closed", service=self.service_name)
|
||||
|
||||
# Then close connection
|
||||
if self.connection and not self.connection.is_closed:
|
||||
await self.connection.close()
|
||||
logger.info("Disconnected from RabbitMQ", service=self.service_name)
|
||||
|
||||
self.connected = False
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("Error during RabbitMQ disconnect",
|
||||
service=self.service_name,
|
||||
error=str(e))
|
||||
self.connected = False
|
||||
|
||||
async def ensure_connected(self) -> bool:
|
||||
"""Ensure connection is active, reconnect if needed"""
|
||||
if self.connected and self.connection and not self.connection.is_closed:
|
||||
return True
|
||||
|
||||
if self._reconnect_attempts >= self._max_reconnect_attempts:
|
||||
logger.error("Max reconnection attempts reached", service=self.service_name)
|
||||
return False
|
||||
|
||||
return await self.connect()
|
||||
|
||||
async def publish_event(self, exchange_name: str, routing_key: str, event_data: Dict[str, Any],
|
||||
persistent: bool = True) -> bool:
|
||||
"""
|
||||
Universal event publisher with automatic fallback
|
||||
Returns True if published successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Ensure we're connected
|
||||
if not await self.ensure_connected():
|
||||
logger.debug("Event not published - RabbitMQ unavailable",
|
||||
service=self.service_name, routing_key=routing_key)
|
||||
return False
|
||||
|
||||
# Declare exchange
|
||||
exchange = await self.channel.declare_exchange(
|
||||
exchange_name,
|
||||
ExchangeType.TOPIC,
|
||||
durable=True
|
||||
)
|
||||
|
||||
# Prepare message with proper JSON serialization
|
||||
message_body = json.dumps(event_data, default=json_serializer)
|
||||
message = Message(
|
||||
message_body.encode(),
|
||||
delivery_mode=DeliveryMode.PERSISTENT if persistent else DeliveryMode.NOT_PERSISTENT,
|
||||
content_type="application/json",
|
||||
timestamp=datetime.now(),
|
||||
headers={
|
||||
"source_service": self.service_name,
|
||||
"event_id": event_data.get("event_id", str(uuid.uuid4()))
|
||||
}
|
||||
)
|
||||
|
||||
# Publish message
|
||||
await exchange.publish(message, routing_key=routing_key)
|
||||
|
||||
logger.debug("Event published successfully",
|
||||
service=self.service_name,
|
||||
exchange=exchange_name,
|
||||
routing_key=routing_key,
|
||||
size=len(message_body))
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to publish event",
|
||||
service=self.service_name,
|
||||
exchange=exchange_name,
|
||||
routing_key=routing_key,
|
||||
error=str(e))
|
||||
self.connected = False # Force reconnection on next attempt
|
||||
return False
|
||||
|
||||
async def consume_events(self, exchange_name: str, queue_name: str,
|
||||
routing_key: str, callback: Callable) -> bool:
|
||||
"""Universal event consumer"""
|
||||
try:
|
||||
if not await self.ensure_connected():
|
||||
return False
|
||||
|
||||
# Declare exchange
|
||||
exchange = await self.channel.declare_exchange(
|
||||
exchange_name,
|
||||
ExchangeType.TOPIC,
|
||||
durable=True
|
||||
)
|
||||
|
||||
# Declare queue
|
||||
queue = await self.channel.declare_queue(
|
||||
queue_name,
|
||||
durable=True
|
||||
)
|
||||
|
||||
# Bind queue to exchange
|
||||
await queue.bind(exchange, routing_key)
|
||||
|
||||
# Set up consumer
|
||||
await queue.consume(callback)
|
||||
|
||||
logger.info("Started consuming events",
|
||||
service=self.service_name,
|
||||
queue=queue_name,
|
||||
routing_key=routing_key)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to start consuming events",
|
||||
service=self.service_name,
|
||||
error=str(e))
|
||||
return False
|
||||
|
||||
# High-level convenience methods for common patterns
|
||||
async def publish_user_event(self, event_type: str, user_data: Dict[str, Any]) -> bool:
|
||||
"""Publish user-related events"""
|
||||
return await self.publish_event("user.events", f"user.{event_type}", user_data)
|
||||
|
||||
async def publish_training_event(self, event_type: str, training_data: Dict[str, Any]) -> bool:
|
||||
"""Publish training-related events"""
|
||||
return await self.publish_event("training.events", f"training.{event_type}", training_data)
|
||||
|
||||
async def publish_data_event(self, event_type: str, data: Dict[str, Any]) -> bool:
|
||||
"""Publish data-related events"""
|
||||
return await self.publish_event("data.events", f"data.{event_type}", data)
|
||||
|
||||
async def publish_forecast_event(self, event_type: str, forecast_data: Dict[str, Any]) -> bool:
|
||||
"""Publish forecast-related events"""
|
||||
return await self.publish_event("forecast.events", f"forecast.{event_type}", forecast_data)
|
||||
|
||||
|
||||
class EventMessage:
|
||||
"""Standardized event message structure"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
event_type: str,
|
||||
tenant_id: Union[str, uuid.UUID],
|
||||
service_name: str,
|
||||
data: Dict[str, Any],
|
||||
event_class: str = "business", # business, alert, notification, recommendation
|
||||
correlation_id: Optional[str] = None,
|
||||
trace_id: Optional[str] = None,
|
||||
severity: Optional[str] = None, # For alerts: urgent, high, medium, low
|
||||
source: Optional[str] = None
|
||||
):
|
||||
self.event_type = event_type
|
||||
self.tenant_id = str(tenant_id) if isinstance(tenant_id, uuid.UUID) else tenant_id
|
||||
self.service_name = service_name
|
||||
self.data = data
|
||||
self.event_class = event_class
|
||||
self.correlation_id = correlation_id or str(uuid.uuid4())
|
||||
self.trace_id = trace_id or str(uuid.uuid4())
|
||||
self.severity = severity
|
||||
self.source = source or service_name
|
||||
self.timestamp = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for message publishing"""
|
||||
result = {
|
||||
"event_type": self.event_type,
|
||||
"tenant_id": self.tenant_id,
|
||||
"service_name": self.service_name,
|
||||
"data": self.data,
|
||||
"event_class": self.event_class,
|
||||
"correlation_id": self.correlation_id,
|
||||
"trace_id": self.trace_id,
|
||||
"timestamp": self.timestamp
|
||||
}
|
||||
|
||||
if self.severity:
|
||||
result["severity"] = self.severity
|
||||
if self.source:
|
||||
result["source"] = self.source
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class UnifiedEventPublisher:
|
||||
"""Unified publisher for all event types - business events, alerts, notifications, recommendations"""
|
||||
|
||||
def __init__(self, rabbitmq_client: RabbitMQClient, service_name: str):
|
||||
self.rabbitmq = rabbitmq_client
|
||||
self.service_name = service_name
|
||||
self.exchange = "events.exchange"
|
||||
|
||||
async def publish_event(
|
||||
self,
|
||||
event_type: str,
|
||||
tenant_id: Union[str, uuid.UUID],
|
||||
data: Dict[str, Any],
|
||||
event_class: str = "business",
|
||||
severity: Optional[str] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Publish a standardized event using the unified messaging pattern.
|
||||
|
||||
Args:
|
||||
event_type: Type of event (e.g., 'inventory.ingredient.created')
|
||||
tenant_id: Tenant identifier
|
||||
data: Event payload data
|
||||
event_class: One of 'business', 'alert', 'notification', 'recommendation'
|
||||
severity: Alert severity (for alert events only)
|
||||
"""
|
||||
# Determine event domain and event type separately for alert processor
|
||||
# The event_type should be just the specific event name, domain should be extracted separately
|
||||
if '.' in event_type and event_class in ["alert", "notification", "recommendation"]:
|
||||
# For events like "inventory.critical_stock_shortage", split into domain and event
|
||||
parts = event_type.split('.', 1) # Split only on first dot
|
||||
event_domain = parts[0]
|
||||
actual_event_type = parts[1]
|
||||
else:
|
||||
# For simple event types or business events, use as-is
|
||||
event_domain = "general" if event_class == "business" else self.service_name
|
||||
actual_event_type = event_type
|
||||
|
||||
# For the message payload that goes to alert processor, use the expected MinimalEvent format
|
||||
if event_class in ["alert", "notification", "recommendation"]:
|
||||
# Format for alert processor (uses MinimalEvent schema)
|
||||
event_payload = {
|
||||
"tenant_id": str(tenant_id),
|
||||
"event_class": event_class,
|
||||
"event_domain": event_domain,
|
||||
"event_type": actual_event_type, # Just the specific event name, not domain.event_name
|
||||
"service": self.service_name, # Changed from service_name to service
|
||||
"metadata": data, # Changed from data to metadata
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
if severity:
|
||||
event_payload["severity"] = severity # Include severity for alerts
|
||||
else:
|
||||
# Format for business events (standard format)
|
||||
event_payload = {
|
||||
"event_type": event_type,
|
||||
"tenant_id": str(tenant_id),
|
||||
"service_name": self.service_name,
|
||||
"data": data,
|
||||
"event_class": event_class,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
if severity:
|
||||
event_payload["severity"] = severity
|
||||
|
||||
# Determine routing key based on event class
|
||||
# For routing, we can still use the original event_type format since it's for routing purposes
|
||||
if event_class == "alert":
|
||||
routing_key = f"alert.{event_domain}.{severity or 'medium'}"
|
||||
elif event_class == "notification":
|
||||
routing_key = f"notification.{event_domain}.info"
|
||||
elif event_class == "recommendation":
|
||||
routing_key = f"recommendation.{event_domain}.medium"
|
||||
else: # business events
|
||||
routing_key = f"business.{event_type.replace('.', '_')}"
|
||||
|
||||
try:
|
||||
success = await self.rabbitmq.publish_event(
|
||||
exchange_name=self.exchange,
|
||||
routing_key=routing_key,
|
||||
event_data=event_payload
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info(
|
||||
"event_published",
|
||||
tenant_id=str(tenant_id),
|
||||
event_type=event_type,
|
||||
event_class=event_class,
|
||||
severity=severity
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"event_publish_failed",
|
||||
tenant_id=str(tenant_id),
|
||||
event_type=event_type
|
||||
)
|
||||
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"event_publish_error",
|
||||
tenant_id=str(tenant_id),
|
||||
event_type=event_type,
|
||||
error=str(e)
|
||||
)
|
||||
return False
|
||||
|
||||
# Business event methods
|
||||
async def publish_business_event(
|
||||
self,
|
||||
event_type: str,
|
||||
tenant_id: Union[str, uuid.UUID],
|
||||
data: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Publish a business event (inventory changes, user actions, etc.)"""
|
||||
return await self.publish_event(
|
||||
event_type=event_type,
|
||||
tenant_id=tenant_id,
|
||||
data=data,
|
||||
event_class="business"
|
||||
)
|
||||
|
||||
# Alert methods
|
||||
async def publish_alert(
|
||||
self,
|
||||
event_type: str,
|
||||
tenant_id: Union[str, uuid.UUID],
|
||||
severity: str, # urgent, high, medium, low
|
||||
data: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Publish an alert (actionable by user)"""
|
||||
return await self.publish_event(
|
||||
event_type=event_type,
|
||||
tenant_id=tenant_id,
|
||||
data=data,
|
||||
event_class="alert",
|
||||
severity=severity
|
||||
)
|
||||
|
||||
# Notification methods
|
||||
async def publish_notification(
|
||||
self,
|
||||
event_type: str,
|
||||
tenant_id: Union[str, uuid.UUID],
|
||||
data: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Publish a notification (informational to user)"""
|
||||
return await self.publish_event(
|
||||
event_type=event_type,
|
||||
tenant_id=tenant_id,
|
||||
data=data,
|
||||
event_class="notification"
|
||||
)
|
||||
|
||||
# Recommendation methods
|
||||
async def publish_recommendation(
|
||||
self,
|
||||
event_type: str,
|
||||
tenant_id: Union[str, uuid.UUID],
|
||||
data: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Publish a recommendation (suggestion to user)"""
|
||||
return await self.publish_event(
|
||||
event_type=event_type,
|
||||
tenant_id=tenant_id,
|
||||
data=data,
|
||||
event_class="recommendation"
|
||||
)
|
||||
|
||||
|
||||
class ServiceMessagingManager:
|
||||
"""Manager class to handle messaging lifecycle for services"""
|
||||
|
||||
def __init__(self, service_name: str, rabbitmq_url: str):
|
||||
self.service_name = service_name
|
||||
self.rabbitmq_url = rabbitmq_url
|
||||
self.rabbitmq_client = None
|
||||
self.publisher = None
|
||||
|
||||
async def setup(self):
|
||||
"""Setup the messaging system for the service"""
|
||||
try:
|
||||
self.rabbitmq_client = RabbitMQClient(self.rabbitmq_url, self.service_name)
|
||||
success = await self.rabbitmq_client.connect()
|
||||
if success:
|
||||
self.publisher = UnifiedEventPublisher(self.rabbitmq_client, self.service_name)
|
||||
logger.info(f"{self.service_name} messaging manager setup completed")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"{self.service_name} messaging manager setup failed")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error during {self.service_name} messaging manager setup", error=str(e))
|
||||
return False
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup the messaging system for the service"""
|
||||
try:
|
||||
if self.rabbitmq_client:
|
||||
await self.rabbitmq_client.disconnect()
|
||||
logger.info(f"{self.service_name} messaging manager cleanup completed")
|
||||
return True
|
||||
return True # If no client to clean up, consider it successful
|
||||
except Exception as e:
|
||||
logger.error(f"Error during {self.service_name} messaging manager cleanup", error=str(e))
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_ready(self):
|
||||
"""Check if the messaging system is ready for use"""
|
||||
return (self.publisher is not None and
|
||||
self.rabbitmq_client is not None and
|
||||
self.rabbitmq_client.connected)
|
||||
|
||||
|
||||
# Utility functions for easy service integration
|
||||
async def initialize_service_publisher(service_name: str, rabbitmq_url: str):
|
||||
"""
|
||||
Initialize a service-specific publisher using the unified messaging system.
|
||||
|
||||
Args:
|
||||
service_name: Name of the service (e.g., 'notification-service', 'forecasting-service')
|
||||
rabbitmq_url: RabbitMQ connection URL
|
||||
|
||||
Returns:
|
||||
UnifiedEventPublisher instance or None if initialization failed
|
||||
"""
|
||||
try:
|
||||
rabbitmq_client = RabbitMQClient(rabbitmq_url, service_name)
|
||||
success = await rabbitmq_client.connect()
|
||||
if success:
|
||||
publisher = UnifiedEventPublisher(rabbitmq_client, service_name)
|
||||
logger.info(f"{service_name} unified messaging publisher initialized")
|
||||
return rabbitmq_client, publisher
|
||||
else:
|
||||
logger.warning(f"{service_name} unified messaging publisher failed to connect")
|
||||
return None, None
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize {service_name} unified messaging publisher", error=str(e))
|
||||
return None, None
|
||||
|
||||
|
||||
async def cleanup_service_publisher(rabbitmq_client):
|
||||
"""
|
||||
Cleanup messaging for a service.
|
||||
|
||||
Args:
|
||||
rabbitmq_client: The RabbitMQ client to disconnect
|
||||
|
||||
Returns:
|
||||
True if cleanup was successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
if rabbitmq_client:
|
||||
await rabbitmq_client.disconnect()
|
||||
logger.info("Service messaging cleanup completed")
|
||||
return True
|
||||
return True # If no client to clean up, consider it successful
|
||||
except Exception as e:
|
||||
logger.error("Error during service messaging cleanup", error=str(e))
|
||||
return False
|
||||
@@ -1,266 +0,0 @@
|
||||
"""
|
||||
RabbitMQ messaging client for microservices - FIXED VERSION
|
||||
"""
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Dict, Any, Callable, Optional
|
||||
from datetime import datetime, date
|
||||
import uuid
|
||||
import structlog
|
||||
from contextlib import suppress
|
||||
|
||||
try:
|
||||
import aio_pika
|
||||
from aio_pika import connect_robust, Message, DeliveryMode, ExchangeType
|
||||
AIO_PIKA_AVAILABLE = True
|
||||
except ImportError:
|
||||
AIO_PIKA_AVAILABLE = False
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
class HeartbeatMonitor:
|
||||
"""Monitor to ensure heartbeats are processed during heavy operations"""
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
self._monitor_task = None
|
||||
self._should_monitor = False
|
||||
|
||||
async def start_monitoring(self):
|
||||
"""Start heartbeat monitoring task"""
|
||||
if self._monitor_task and not self._monitor_task.done():
|
||||
return
|
||||
|
||||
self._should_monitor = True
|
||||
self._monitor_task = asyncio.create_task(self._monitor_loop())
|
||||
|
||||
async def stop_monitoring(self):
|
||||
"""Stop heartbeat monitoring task"""
|
||||
self._should_monitor = False
|
||||
if self._monitor_task and not self._monitor_task.done():
|
||||
self._monitor_task.cancel()
|
||||
with suppress(asyncio.CancelledError):
|
||||
await self._monitor_task
|
||||
|
||||
async def _monitor_loop(self):
|
||||
"""Monitor loop that periodically yields control for heartbeat processing"""
|
||||
while self._should_monitor:
|
||||
# Yield control to allow heartbeat processing
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# Verify connection is still alive
|
||||
if self.client.connection and not self.client.connection.is_closed:
|
||||
# Check if connection is still responsive
|
||||
try:
|
||||
# This is a lightweight check to ensure the connection is responsive
|
||||
pass # The heartbeat mechanism in aio_pika handles this internally
|
||||
except Exception as e:
|
||||
logger.warning("Connection check failed", error=str(e))
|
||||
self.client.connected = False
|
||||
break
|
||||
else:
|
||||
logger.warning("Connection is closed, stopping monitor")
|
||||
break
|
||||
|
||||
def json_serializer(obj):
|
||||
"""JSON serializer for objects not serializable by default json code"""
|
||||
if isinstance(obj, (datetime, date)):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, uuid.UUID):
|
||||
return str(obj)
|
||||
elif hasattr(obj, '__class__') and obj.__class__.__name__ == 'Decimal':
|
||||
# Handle Decimal objects from SQLAlchemy without importing decimal
|
||||
return float(obj)
|
||||
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
|
||||
|
||||
class RabbitMQClient:
|
||||
"""
|
||||
Universal RabbitMQ client for all microservices
|
||||
Handles all messaging patterns with proper fallbacks
|
||||
"""
|
||||
|
||||
def __init__(self, connection_url: str, service_name: str = "unknown"):
|
||||
self.connection_url = connection_url
|
||||
self.service_name = service_name
|
||||
self.connection = None
|
||||
self.channel = None
|
||||
self.connected = False
|
||||
self._reconnect_attempts = 0
|
||||
self._max_reconnect_attempts = 5
|
||||
self.heartbeat_monitor = HeartbeatMonitor(self)
|
||||
|
||||
async def connect(self):
|
||||
"""Connect to RabbitMQ with retry logic"""
|
||||
if not AIO_PIKA_AVAILABLE:
|
||||
logger.warning("aio-pika not available, messaging disabled", service=self.service_name)
|
||||
return False
|
||||
|
||||
try:
|
||||
self.connection = await connect_robust(
|
||||
self.connection_url,
|
||||
heartbeat=600 # Increase heartbeat to 600 seconds (10 minutes) to prevent timeouts
|
||||
)
|
||||
self.channel = await self.connection.channel()
|
||||
await self.channel.set_qos(prefetch_count=100) # Performance optimization
|
||||
|
||||
self.connected = True
|
||||
self._reconnect_attempts = 0
|
||||
|
||||
# Start heartbeat monitoring
|
||||
await self.heartbeat_monitor.start_monitoring()
|
||||
|
||||
logger.info("Connected to RabbitMQ", service=self.service_name)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.connected = False
|
||||
self._reconnect_attempts += 1
|
||||
logger.warning(
|
||||
"Failed to connect to RabbitMQ",
|
||||
service=self.service_name,
|
||||
error=str(e),
|
||||
attempt=self._reconnect_attempts
|
||||
)
|
||||
return False
|
||||
|
||||
async def disconnect(self):
|
||||
"""Disconnect from RabbitMQ with proper channel cleanup"""
|
||||
try:
|
||||
# Stop heartbeat monitoring first
|
||||
await self.heartbeat_monitor.stop_monitoring()
|
||||
|
||||
# Close channel before connection to avoid "unexpected close" warnings
|
||||
if self.channel and not self.channel.is_closed:
|
||||
await self.channel.close()
|
||||
logger.debug("RabbitMQ channel closed", service=self.service_name)
|
||||
|
||||
# Then close connection
|
||||
if self.connection and not self.connection.is_closed:
|
||||
await self.connection.close()
|
||||
logger.info("Disconnected from RabbitMQ", service=self.service_name)
|
||||
|
||||
self.connected = False
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("Error during RabbitMQ disconnect",
|
||||
service=self.service_name,
|
||||
error=str(e))
|
||||
self.connected = False
|
||||
|
||||
async def ensure_connected(self) -> bool:
|
||||
"""Ensure connection is active, reconnect if needed"""
|
||||
if self.connected and self.connection and not self.connection.is_closed:
|
||||
return True
|
||||
|
||||
if self._reconnect_attempts >= self._max_reconnect_attempts:
|
||||
logger.error("Max reconnection attempts reached", service=self.service_name)
|
||||
return False
|
||||
|
||||
return await self.connect()
|
||||
|
||||
async def publish_event(self, exchange_name: str, routing_key: str, event_data: Dict[str, Any],
|
||||
persistent: bool = True) -> bool:
|
||||
"""
|
||||
Universal event publisher with automatic fallback
|
||||
Returns True if published successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Ensure we're connected
|
||||
if not await self.ensure_connected():
|
||||
logger.debug("Event not published - RabbitMQ unavailable",
|
||||
service=self.service_name, routing_key=routing_key)
|
||||
return False
|
||||
|
||||
# Declare exchange
|
||||
exchange = await self.channel.declare_exchange(
|
||||
exchange_name,
|
||||
ExchangeType.TOPIC,
|
||||
durable=True
|
||||
)
|
||||
|
||||
# Prepare message with proper JSON serialization
|
||||
message_body = json.dumps(event_data, default=json_serializer)
|
||||
message = Message(
|
||||
message_body.encode(),
|
||||
delivery_mode=DeliveryMode.PERSISTENT if persistent else DeliveryMode.NOT_PERSISTENT,
|
||||
content_type="application/json",
|
||||
timestamp=datetime.now(),
|
||||
headers={
|
||||
"source_service": self.service_name,
|
||||
"event_id": event_data.get("event_id", str(uuid.uuid4()))
|
||||
}
|
||||
)
|
||||
|
||||
# Publish message
|
||||
await exchange.publish(message, routing_key=routing_key)
|
||||
|
||||
logger.debug("Event published successfully",
|
||||
service=self.service_name,
|
||||
exchange=exchange_name,
|
||||
routing_key=routing_key,
|
||||
size=len(message_body))
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to publish event",
|
||||
service=self.service_name,
|
||||
exchange=exchange_name,
|
||||
routing_key=routing_key,
|
||||
error=str(e))
|
||||
self.connected = False # Force reconnection on next attempt
|
||||
return False
|
||||
|
||||
async def consume_events(self, exchange_name: str, queue_name: str,
|
||||
routing_key: str, callback: Callable) -> bool:
|
||||
"""Universal event consumer"""
|
||||
try:
|
||||
if not await self.ensure_connected():
|
||||
return False
|
||||
|
||||
# Declare exchange
|
||||
exchange = await self.channel.declare_exchange(
|
||||
exchange_name,
|
||||
ExchangeType.TOPIC,
|
||||
durable=True
|
||||
)
|
||||
|
||||
# Declare queue
|
||||
queue = await self.channel.declare_queue(
|
||||
queue_name,
|
||||
durable=True
|
||||
)
|
||||
|
||||
# Bind queue to exchange
|
||||
await queue.bind(exchange, routing_key)
|
||||
|
||||
# Set up consumer
|
||||
await queue.consume(callback)
|
||||
|
||||
logger.info("Started consuming events",
|
||||
service=self.service_name,
|
||||
queue=queue_name,
|
||||
routing_key=routing_key)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to start consuming events",
|
||||
service=self.service_name,
|
||||
error=str(e))
|
||||
return False
|
||||
|
||||
# High-level convenience methods for common patterns
|
||||
async def publish_user_event(self, event_type: str, user_data: Dict[str, Any]) -> bool:
|
||||
"""Publish user-related events"""
|
||||
return await self.publish_event("user.events", f"user.{event_type}", user_data)
|
||||
|
||||
async def publish_training_event(self, event_type: str, training_data: Dict[str, Any]) -> bool:
|
||||
"""Publish training-related events"""
|
||||
return await self.publish_event("training.events", f"training.{event_type}", training_data)
|
||||
|
||||
async def publish_data_event(self, event_type: str, data: Dict[str, Any]) -> bool:
|
||||
"""Publish data-related events"""
|
||||
return await self.publish_event("data.events", f"data.{event_type}", data)
|
||||
|
||||
async def publish_forecast_event(self, event_type: str, forecast_data: Dict[str, Any]) -> bool:
|
||||
"""Publish forecast-related events"""
|
||||
return await self.publish_event("forecast.events", f"forecast.{event_type}", forecast_data)
|
||||
@@ -165,7 +165,7 @@ class EnrichedAlert(BaseModel):
|
||||
trend_context: Optional[TrendContext] = Field(None, description="Trend analysis (if trend warning)")
|
||||
|
||||
# AI Reasoning
|
||||
ai_reasoning_summary: Optional[str] = Field(None, description="Plain language AI reasoning")
|
||||
ai_reasoning_i18n: Optional[Dict[str, Any]] = Field(None, description="i18n-ready AI reasoning with key and params")
|
||||
reasoning_data: Optional[Dict[str, Any]] = Field(None, description="Structured reasoning from orchestrator")
|
||||
confidence_score: Optional[float] = Field(None, description="AI confidence 0-1")
|
||||
|
||||
|
||||
146
shared/schemas/events.py
Normal file
146
shared/schemas/events.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
Minimal event schemas for services to emit events.
|
||||
|
||||
Services send minimal event data with only event_type and metadata.
|
||||
All enrichment, i18n generation, and priority calculation happens
|
||||
in the alert_processor service.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Dict, Any, Literal, Optional
|
||||
from datetime import datetime
|
||||
from uuid import UUID
|
||||
|
||||
|
||||
class MinimalEvent(BaseModel):
|
||||
"""
|
||||
Minimal event structure sent by services.
|
||||
|
||||
Services only need to provide:
|
||||
- tenant_id: Who this event belongs to
|
||||
- event_class: alert, notification, or recommendation
|
||||
- event_domain: Business domain (inventory, production, supply_chain, etc.)
|
||||
- event_type: Specific event identifier (critical_stock_shortage, production_delay, etc.)
|
||||
- service: Source service name
|
||||
- metadata: Dictionary with event-specific data
|
||||
|
||||
The alert_processor service enriches this with:
|
||||
- i18n keys and parameters
|
||||
- Priority score and level
|
||||
- Orchestrator context (AI actions)
|
||||
- Business impact analysis
|
||||
- Urgency assessment
|
||||
- User agency determination
|
||||
- Smart actions
|
||||
"""
|
||||
|
||||
tenant_id: str = Field(..., description="Tenant UUID as string")
|
||||
event_class: Literal["alert", "notification", "recommendation"] = Field(
|
||||
...,
|
||||
description="Event classification - alert requires action, notification is FYI, recommendation is suggestion"
|
||||
)
|
||||
event_domain: str = Field(
|
||||
...,
|
||||
description="Business domain: inventory, production, supply_chain, demand, operations, distribution"
|
||||
)
|
||||
event_type: str = Field(
|
||||
...,
|
||||
description="Specific event type identifier, e.g., critical_stock_shortage, production_delay, po_approval_needed"
|
||||
)
|
||||
service: str = Field(..., description="Source service name, e.g., inventory, production, procurement")
|
||||
metadata: Dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Event-specific data - structure varies by event_type"
|
||||
)
|
||||
timestamp: Optional[datetime] = Field(
|
||||
default=None,
|
||||
description="Event timestamp, set automatically if not provided"
|
||||
)
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
json_schema_extra = {
|
||||
"examples": [
|
||||
{
|
||||
"tenant_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"event_class": "alert",
|
||||
"event_domain": "inventory",
|
||||
"event_type": "critical_stock_shortage",
|
||||
"service": "inventory",
|
||||
"metadata": {
|
||||
"ingredient_id": "123e4567-e89b-12d3-a456-426614174000",
|
||||
"ingredient_name": "Flour",
|
||||
"current_stock": 5.2,
|
||||
"required_stock": 10.0,
|
||||
"shortage_amount": 4.8,
|
||||
"supplier_name": "Flour Supplier Co.",
|
||||
"lead_time_days": 3,
|
||||
"po_id": "PO-12345",
|
||||
"po_amount": 2500.00,
|
||||
"po_status": "pending_approval",
|
||||
"delivery_date": "2025-12-10"
|
||||
}
|
||||
},
|
||||
{
|
||||
"tenant_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"event_class": "alert",
|
||||
"event_domain": "production",
|
||||
"event_type": "production_delay",
|
||||
"service": "production",
|
||||
"metadata": {
|
||||
"batch_id": "987fbc97-4bed-5078-9f07-9141ba07c9f3",
|
||||
"product_name": "Croissant",
|
||||
"batch_number": "B-2025-001",
|
||||
"delay_minutes": 45,
|
||||
"affected_orders": 3,
|
||||
"customer_names": ["Customer A", "Customer B"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"tenant_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"event_class": "notification",
|
||||
"event_domain": "supply_chain",
|
||||
"event_type": "po_approved",
|
||||
"service": "procurement",
|
||||
"metadata": {
|
||||
"po_id": "PO-12345",
|
||||
"po_number": "PO-2025-001",
|
||||
"supplier_name": "Flour Supplier Co.",
|
||||
"total_amount": 2500.00,
|
||||
"currency": "EUR",
|
||||
"approved_at": "2025-12-05T10:30:00Z",
|
||||
"approved_by": "user@example.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# Event Domain Constants
|
||||
class EventDomain:
|
||||
"""Standard event domains"""
|
||||
INVENTORY = "inventory"
|
||||
PRODUCTION = "production"
|
||||
SUPPLY_CHAIN = "supply_chain"
|
||||
DEMAND = "demand"
|
||||
OPERATIONS = "operations"
|
||||
DISTRIBUTION = "distribution"
|
||||
FINANCE = "finance"
|
||||
|
||||
|
||||
# Event Class Constants
|
||||
class EventClass:
|
||||
"""Event classifications"""
|
||||
ALERT = "alert" # Requires user decision/action
|
||||
NOTIFICATION = "notification" # Informational, no action needed
|
||||
RECOMMENDATION = "recommendation" # Optimization suggestion
|
||||
|
||||
|
||||
# Severity Levels (for routing)
|
||||
class Severity:
|
||||
"""Alert severity levels for routing"""
|
||||
URGENT = "urgent" # Immediate attention required
|
||||
HIGH = "high" # Important, address soon
|
||||
MEDIUM = "medium" # Standard priority
|
||||
LOW = "low" # Minor, can wait
|
||||
INFO = "info" # Informational only
|
||||
@@ -10,8 +10,9 @@ from typing import Optional
|
||||
|
||||
# Base reference date for all demo seed data
|
||||
# All seed scripts should use this as the "logical seed date"
|
||||
# IMPORTANT: Must match the actual dates in seed data (production batches start Jan 8, 2025)
|
||||
BASE_REFERENCE_DATE = datetime(2025, 1, 8, 6, 0, 0, tzinfo=timezone.utc)
|
||||
# IMPORTANT: This should be set to approximately the current date to ensure demo data appears current
|
||||
# Updated to December 1, 2025 to align with current date
|
||||
BASE_REFERENCE_DATE = datetime(2025, 12, 1, 6, 0, 0, tzinfo=timezone.utc)
|
||||
|
||||
|
||||
def adjust_date_for_demo(
|
||||
|
||||
Reference in New Issue
Block a user