Initial commit - production deployment
This commit is contained in:
0
services/alert_processor/app/__init__.py
Normal file
0
services/alert_processor/app/__init__.py
Normal file
0
services/alert_processor/app/api/__init__.py
Normal file
0
services/alert_processor/app/api/__init__.py
Normal file
430
services/alert_processor/app/api/alerts.py
Normal file
430
services/alert_processor/app/api/alerts.py
Normal file
@@ -0,0 +1,430 @@
|
||||
"""
|
||||
Alert API endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, HTTPException
|
||||
from typing import List, Optional
|
||||
from uuid import UUID
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
import structlog
|
||||
|
||||
from app.core.database import get_db
|
||||
from app.repositories.event_repository import EventRepository
|
||||
from app.schemas.events import EventResponse, EventSummary
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/alerts", response_model=List[EventResponse])
|
||||
async def get_alerts(
|
||||
tenant_id: UUID,
|
||||
event_class: Optional[str] = Query(None, description="Filter by event class"),
|
||||
priority_level: Optional[List[str]] = Query(None, description="Filter by priority levels"),
|
||||
status: Optional[List[str]] = Query(None, description="Filter by status values"),
|
||||
event_domain: Optional[str] = Query(None, description="Filter by domain"),
|
||||
limit: int = Query(50, le=100, description="Max results"),
|
||||
offset: int = Query(0, description="Pagination offset"),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get filtered list of events.
|
||||
|
||||
Query Parameters:
|
||||
- event_class: alert, notification, recommendation
|
||||
- priority_level: critical, important, standard, info
|
||||
- status: active, acknowledged, resolved, dismissed
|
||||
- event_domain: inventory, production, supply_chain, etc.
|
||||
- limit: Max 100 results
|
||||
- offset: For pagination
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
events = await repo.get_events(
|
||||
tenant_id=tenant_id,
|
||||
event_class=event_class,
|
||||
priority_level=priority_level,
|
||||
status=status,
|
||||
event_domain=event_domain,
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
# Convert to response models
|
||||
return [repo._event_to_response(event) for event in events]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("get_alerts_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to retrieve alerts")
|
||||
|
||||
|
||||
@router.get("/alerts/summary", response_model=EventSummary)
|
||||
async def get_alerts_summary(
|
||||
tenant_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get summary statistics for dashboard.
|
||||
|
||||
Returns counts by:
|
||||
- Status (active, acknowledged, resolved)
|
||||
- Priority level (critical, important, standard, info)
|
||||
- Domain (inventory, production, etc.)
|
||||
- Type class (action_needed, prevented_issue, etc.)
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
summary = await repo.get_summary(tenant_id)
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error("get_summary_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to retrieve summary")
|
||||
|
||||
|
||||
@router.get("/alerts/{alert_id}", response_model=EventResponse)
|
||||
async def get_alert(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Get single alert by ID"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
# Verify tenant ownership
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
return repo._event_to_response(event)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("get_alert_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to retrieve alert")
|
||||
|
||||
|
||||
@router.post("/alerts/{alert_id}/acknowledge", response_model=EventResponse)
|
||||
async def acknowledge_alert(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Mark alert as acknowledged.
|
||||
|
||||
Sets status to 'acknowledged' and records timestamp.
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify ownership first
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# Acknowledge
|
||||
updated_event = await repo.acknowledge_event(alert_id)
|
||||
return repo._event_to_response(updated_event)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("acknowledge_alert_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to acknowledge alert")
|
||||
|
||||
|
||||
@router.post("/alerts/{alert_id}/resolve", response_model=EventResponse)
|
||||
async def resolve_alert(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Mark alert as resolved.
|
||||
|
||||
Sets status to 'resolved' and records timestamp.
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify ownership first
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# Resolve
|
||||
updated_event = await repo.resolve_event(alert_id)
|
||||
return repo._event_to_response(updated_event)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("resolve_alert_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to resolve alert")
|
||||
|
||||
|
||||
@router.post("/alerts/{alert_id}/dismiss", response_model=EventResponse)
|
||||
async def dismiss_alert(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Mark alert as dismissed.
|
||||
|
||||
Sets status to 'dismissed'.
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify ownership first
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# Dismiss
|
||||
updated_event = await repo.dismiss_event(alert_id)
|
||||
return repo._event_to_response(updated_event)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("dismiss_alert_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to dismiss alert")
|
||||
|
||||
|
||||
@router.post("/alerts/{alert_id}/cancel-auto-action")
|
||||
async def cancel_auto_action(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Cancel an alert's auto-action (escalation countdown).
|
||||
|
||||
Changes type_class from 'escalation' to 'action_needed' if auto-action was pending.
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify ownership first
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# Cancel auto-action (you'll need to implement this in repository)
|
||||
# For now, return success response
|
||||
return {
|
||||
"success": True,
|
||||
"event_id": str(alert_id),
|
||||
"message": "Auto-action cancelled successfully",
|
||||
"updated_type_class": "action_needed"
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("cancel_auto_action_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to cancel auto-action")
|
||||
|
||||
|
||||
@router.post("/alerts/bulk-acknowledge")
|
||||
async def bulk_acknowledge_alerts(
|
||||
tenant_id: UUID,
|
||||
request_body: dict,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Acknowledge multiple alerts by metadata filter.
|
||||
|
||||
Request body:
|
||||
{
|
||||
"alert_type": "critical_stock_shortage",
|
||||
"metadata_filter": {"ingredient_id": "123"}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
alert_type = request_body.get("alert_type")
|
||||
metadata_filter = request_body.get("metadata_filter", {})
|
||||
|
||||
if not alert_type:
|
||||
raise HTTPException(status_code=400, detail="alert_type is required")
|
||||
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Get matching alerts
|
||||
events = await repo.get_events(
|
||||
tenant_id=tenant_id,
|
||||
event_class="alert",
|
||||
status=["active"],
|
||||
limit=100
|
||||
)
|
||||
|
||||
# Filter by type and metadata
|
||||
matching_ids = []
|
||||
for event in events:
|
||||
if event.event_type == alert_type:
|
||||
# Check if metadata matches
|
||||
matches = all(
|
||||
event.event_metadata.get(key) == value
|
||||
for key, value in metadata_filter.items()
|
||||
)
|
||||
if matches:
|
||||
matching_ids.append(event.id)
|
||||
|
||||
# Acknowledge all matching
|
||||
acknowledged_count = 0
|
||||
for event_id in matching_ids:
|
||||
try:
|
||||
await repo.acknowledge_event(event_id)
|
||||
acknowledged_count += 1
|
||||
except Exception:
|
||||
pass # Continue with others
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"acknowledged_count": acknowledged_count,
|
||||
"alert_ids": [str(id) for id in matching_ids]
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("bulk_acknowledge_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to bulk acknowledge alerts")
|
||||
|
||||
|
||||
@router.post("/alerts/bulk-resolve")
|
||||
async def bulk_resolve_alerts(
|
||||
tenant_id: UUID,
|
||||
request_body: dict,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Resolve multiple alerts by metadata filter.
|
||||
|
||||
Request body:
|
||||
{
|
||||
"alert_type": "critical_stock_shortage",
|
||||
"metadata_filter": {"ingredient_id": "123"}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
alert_type = request_body.get("alert_type")
|
||||
metadata_filter = request_body.get("metadata_filter", {})
|
||||
|
||||
if not alert_type:
|
||||
raise HTTPException(status_code=400, detail="alert_type is required")
|
||||
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Get matching alerts
|
||||
events = await repo.get_events(
|
||||
tenant_id=tenant_id,
|
||||
event_class="alert",
|
||||
status=["active", "acknowledged"],
|
||||
limit=100
|
||||
)
|
||||
|
||||
# Filter by type and metadata
|
||||
matching_ids = []
|
||||
for event in events:
|
||||
if event.event_type == alert_type:
|
||||
# Check if metadata matches
|
||||
matches = all(
|
||||
event.event_metadata.get(key) == value
|
||||
for key, value in metadata_filter.items()
|
||||
)
|
||||
if matches:
|
||||
matching_ids.append(event.id)
|
||||
|
||||
# Resolve all matching
|
||||
resolved_count = 0
|
||||
for event_id in matching_ids:
|
||||
try:
|
||||
await repo.resolve_event(event_id)
|
||||
resolved_count += 1
|
||||
except Exception:
|
||||
pass # Continue with others
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"resolved_count": resolved_count,
|
||||
"alert_ids": [str(id) for id in matching_ids]
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("bulk_resolve_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to bulk resolve alerts")
|
||||
|
||||
|
||||
@router.post("/events/{event_id}/interactions")
|
||||
async def record_interaction(
|
||||
tenant_id: UUID,
|
||||
event_id: UUID,
|
||||
request_body: dict,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Record user interaction with an event (for analytics).
|
||||
|
||||
Request body:
|
||||
{
|
||||
"interaction_type": "viewed" | "clicked" | "dismissed" | "acted_upon",
|
||||
"interaction_metadata": {...}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
interaction_type = request_body.get("interaction_type")
|
||||
interaction_metadata = request_body.get("interaction_metadata", {})
|
||||
|
||||
if not interaction_type:
|
||||
raise HTTPException(status_code=400, detail="interaction_type is required")
|
||||
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify event exists and belongs to tenant
|
||||
event = await repo.get_event_by_id(event_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Event not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# For now, just return success
|
||||
# In the future, you could store interactions in a separate table
|
||||
logger.info(
|
||||
"interaction_recorded",
|
||||
event_id=str(event_id),
|
||||
interaction_type=interaction_type,
|
||||
metadata=interaction_metadata
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"interaction_id": str(event_id), # Would be a real ID in production
|
||||
"event_id": str(event_id),
|
||||
"interaction_type": interaction_type
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("record_interaction_failed", error=str(e), event_id=str(event_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to record interaction")
|
||||
70
services/alert_processor/app/api/sse.py
Normal file
70
services/alert_processor/app/api/sse.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
Server-Sent Events (SSE) API endpoint.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi.responses import StreamingResponse
|
||||
from uuid import UUID
|
||||
from redis.asyncio import Redis
|
||||
import structlog
|
||||
|
||||
from shared.redis_utils import get_redis_client
|
||||
from app.services.sse_service import SSEService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/sse/alerts/{tenant_id}")
|
||||
async def stream_alerts(tenant_id: UUID):
|
||||
"""
|
||||
Stream real-time alerts via Server-Sent Events (SSE).
|
||||
|
||||
Usage (frontend):
|
||||
```javascript
|
||||
const eventSource = new EventSource('/api/v1/sse/alerts/{tenant_id}');
|
||||
eventSource.onmessage = (event) => {
|
||||
const alert = JSON.parse(event.data);
|
||||
console.log('New alert:', alert);
|
||||
};
|
||||
```
|
||||
|
||||
Response format:
|
||||
```
|
||||
data: {"id": "...", "event_type": "...", ...}
|
||||
|
||||
data: {"id": "...", "event_type": "...", ...}
|
||||
|
||||
```
|
||||
"""
|
||||
# Get Redis client from shared utilities
|
||||
redis = await get_redis_client()
|
||||
try:
|
||||
sse_service = SSEService(redis)
|
||||
|
||||
async def event_generator():
|
||||
"""Generator for SSE stream"""
|
||||
try:
|
||||
async for message in sse_service.subscribe_to_tenant(str(tenant_id)):
|
||||
# Format as SSE message
|
||||
yield f"data: {message}\n\n"
|
||||
|
||||
except Exception as e:
|
||||
logger.error("sse_stream_error", error=str(e), tenant_id=str(tenant_id))
|
||||
# Send error message and close
|
||||
yield f"event: error\ndata: {str(e)}\n\n"
|
||||
|
||||
return StreamingResponse(
|
||||
event_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no" # Disable nginx buffering
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("sse_setup_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to setup SSE stream")
|
||||
0
services/alert_processor/app/consumer/__init__.py
Normal file
0
services/alert_processor/app/consumer/__init__.py
Normal file
295
services/alert_processor/app/consumer/event_consumer.py
Normal file
295
services/alert_processor/app/consumer/event_consumer.py
Normal file
@@ -0,0 +1,295 @@
|
||||
"""
|
||||
RabbitMQ event consumer.
|
||||
|
||||
Consumes minimal events from services and processes them through
|
||||
the enrichment pipeline.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
from aio_pika import connect_robust, IncomingMessage, Connection, Channel
|
||||
import structlog
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.database import AsyncSessionLocal
|
||||
from shared.messaging import MinimalEvent
|
||||
from app.services.enrichment_orchestrator import EnrichmentOrchestrator
|
||||
from app.repositories.event_repository import EventRepository
|
||||
from shared.clients.notification_client import create_notification_client
|
||||
from app.services.sse_service import SSEService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class EventConsumer:
|
||||
"""
|
||||
RabbitMQ consumer for processing events.
|
||||
|
||||
Workflow:
|
||||
1. Receive minimal event from service
|
||||
2. Enrich with context (AI, priority, impact, etc.)
|
||||
3. Store in database
|
||||
4. Send to notification service
|
||||
5. Publish to SSE stream
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.connection: Connection = None
|
||||
self.channel: Channel = None
|
||||
self.enricher = EnrichmentOrchestrator()
|
||||
self.notification_client = create_notification_client(settings)
|
||||
self.sse_svc = SSEService()
|
||||
|
||||
async def start(self):
|
||||
"""Start consuming events from RabbitMQ"""
|
||||
try:
|
||||
# Connect to RabbitMQ
|
||||
self.connection = await connect_robust(
|
||||
settings.RABBITMQ_URL,
|
||||
client_properties={"connection_name": "alert-processor"}
|
||||
)
|
||||
|
||||
self.channel = await self.connection.channel()
|
||||
await self.channel.set_qos(prefetch_count=10)
|
||||
|
||||
# Declare queue
|
||||
queue = await self.channel.declare_queue(
|
||||
settings.RABBITMQ_QUEUE,
|
||||
durable=True
|
||||
)
|
||||
|
||||
# Bind to events exchange with routing patterns
|
||||
exchange = await self.channel.declare_exchange(
|
||||
settings.RABBITMQ_EXCHANGE,
|
||||
"topic",
|
||||
durable=True
|
||||
)
|
||||
|
||||
# Bind to alert, notification, and recommendation events
|
||||
await queue.bind(exchange, routing_key="alert.#")
|
||||
await queue.bind(exchange, routing_key="notification.#")
|
||||
await queue.bind(exchange, routing_key="recommendation.#")
|
||||
|
||||
# Start consuming
|
||||
await queue.consume(self.process_message)
|
||||
|
||||
logger.info(
|
||||
"event_consumer_started",
|
||||
queue=settings.RABBITMQ_QUEUE,
|
||||
exchange=settings.RABBITMQ_EXCHANGE
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("consumer_start_failed", error=str(e))
|
||||
raise
|
||||
|
||||
async def process_message(self, message: IncomingMessage):
|
||||
"""
|
||||
Process incoming event message.
|
||||
|
||||
Steps:
|
||||
1. Parse message
|
||||
2. Validate as MinimalEvent
|
||||
3. Enrich event
|
||||
4. Store in database
|
||||
5. Send notification
|
||||
6. Publish to SSE
|
||||
7. Acknowledge message
|
||||
"""
|
||||
async with message.process():
|
||||
try:
|
||||
# Parse message
|
||||
data = json.loads(message.body.decode())
|
||||
event = MinimalEvent(**data)
|
||||
|
||||
logger.info(
|
||||
"event_received",
|
||||
event_type=event.event_type,
|
||||
event_class=event.event_class,
|
||||
tenant_id=event.tenant_id
|
||||
)
|
||||
|
||||
# Enrich the event
|
||||
enriched_event = await self.enricher.enrich_event(event)
|
||||
|
||||
# Check for duplicate alerts before storing
|
||||
async with AsyncSessionLocal() as session:
|
||||
repo = EventRepository(session)
|
||||
|
||||
# Check for duplicate if it's an alert
|
||||
if event.event_class == "alert":
|
||||
from uuid import UUID
|
||||
duplicate_event = await repo.check_duplicate_alert(
|
||||
tenant_id=UUID(event.tenant_id),
|
||||
event_type=event.event_type,
|
||||
entity_links=enriched_event.entity_links,
|
||||
event_metadata=enriched_event.event_metadata,
|
||||
time_window_hours=24 # Check for duplicates in last 24 hours
|
||||
)
|
||||
|
||||
if duplicate_event:
|
||||
logger.info(
|
||||
"Duplicate alert detected, skipping",
|
||||
event_type=event.event_type,
|
||||
tenant_id=event.tenant_id,
|
||||
duplicate_event_id=str(duplicate_event.id)
|
||||
)
|
||||
# Update the existing event's metadata instead of creating a new one
|
||||
# This could include updating delay times, affected orders, etc.
|
||||
duplicate_event.event_metadata = enriched_event.event_metadata
|
||||
duplicate_event.updated_at = datetime.now(timezone.utc)
|
||||
duplicate_event.priority_score = enriched_event.priority_score
|
||||
duplicate_event.priority_level = enriched_event.priority_level
|
||||
|
||||
# Update other relevant fields that might have changed
|
||||
duplicate_event.urgency = enriched_event.urgency.dict() if enriched_event.urgency else None
|
||||
duplicate_event.business_impact = enriched_event.business_impact.dict() if enriched_event.business_impact else None
|
||||
|
||||
await session.commit()
|
||||
await session.refresh(duplicate_event)
|
||||
|
||||
# Send notification for updated event
|
||||
await self._send_notification(duplicate_event)
|
||||
|
||||
# Publish to SSE
|
||||
await self.sse_svc.publish_event(duplicate_event)
|
||||
|
||||
logger.info(
|
||||
"Duplicate alert updated",
|
||||
event_id=str(duplicate_event.id),
|
||||
event_type=event.event_type,
|
||||
priority_level=duplicate_event.priority_level,
|
||||
priority_score=duplicate_event.priority_score
|
||||
)
|
||||
return # Exit early since we handled the duplicate
|
||||
else:
|
||||
logger.info(
|
||||
"New unique alert, proceeding with creation",
|
||||
event_type=event.event_type,
|
||||
tenant_id=event.tenant_id
|
||||
)
|
||||
|
||||
# Store in database (if not a duplicate)
|
||||
stored_event = await repo.create_event(enriched_event)
|
||||
|
||||
# Send to notification service (if alert)
|
||||
if event.event_class == "alert":
|
||||
await self._send_notification(stored_event)
|
||||
|
||||
# Publish to SSE
|
||||
await self.sse_svc.publish_event(stored_event)
|
||||
|
||||
logger.info(
|
||||
"event_processed",
|
||||
event_id=stored_event.id,
|
||||
event_type=event.event_type,
|
||||
priority_level=stored_event.priority_level,
|
||||
priority_score=stored_event.priority_score
|
||||
)
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(
|
||||
"message_parse_failed",
|
||||
error=str(e),
|
||||
message_body=message.body[:200]
|
||||
)
|
||||
# Don't requeue - bad message format
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"event_processing_failed",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
# Message will be requeued automatically due to exception
|
||||
|
||||
async def _send_notification(self, event):
|
||||
"""
|
||||
Send notification using the shared notification client.
|
||||
|
||||
Args:
|
||||
event: The event to send as a notification
|
||||
"""
|
||||
try:
|
||||
# Prepare notification message
|
||||
# Use i18n title and message from the event as the notification content
|
||||
title = event.i18n_title_key if event.i18n_title_key else f"Alert: {event.event_type}"
|
||||
message = event.i18n_message_key if event.i18n_message_key else f"New alert: {event.event_type}"
|
||||
|
||||
# Add parameters to make it more informative
|
||||
if event.i18n_title_params:
|
||||
title += f" - {event.i18n_title_params}"
|
||||
if event.i18n_message_params:
|
||||
message += f" - {event.i18n_message_params}"
|
||||
|
||||
# Prepare metadata from the event
|
||||
metadata = {
|
||||
"event_id": str(event.id),
|
||||
"event_type": event.event_type,
|
||||
"event_domain": event.event_domain,
|
||||
"priority_score": event.priority_score,
|
||||
"priority_level": event.priority_level,
|
||||
"status": event.status,
|
||||
"created_at": event.created_at.isoformat() if event.created_at else None,
|
||||
"type_class": event.type_class,
|
||||
"smart_actions": event.smart_actions,
|
||||
"entity_links": event.entity_links
|
||||
}
|
||||
|
||||
# Determine notification priority based on event priority
|
||||
priority_map = {
|
||||
"critical": "urgent",
|
||||
"important": "high",
|
||||
"standard": "normal",
|
||||
"info": "low"
|
||||
}
|
||||
priority = priority_map.get(event.priority_level, "normal")
|
||||
|
||||
# Send notification using shared client
|
||||
result = await self.notification_client.send_notification(
|
||||
tenant_id=str(event.tenant_id),
|
||||
notification_type="in_app", # Using in-app notification by default
|
||||
message=message,
|
||||
subject=title,
|
||||
priority=priority,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
if result:
|
||||
logger.info(
|
||||
"notification_sent_via_shared_client",
|
||||
event_id=str(event.id),
|
||||
tenant_id=str(event.tenant_id),
|
||||
priority_level=event.priority_level
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"notification_failed_via_shared_client",
|
||||
event_id=str(event.id),
|
||||
tenant_id=str(event.tenant_id)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"notification_error_via_shared_client",
|
||||
error=str(e),
|
||||
event_id=str(event.id),
|
||||
tenant_id=str(event.tenant_id)
|
||||
)
|
||||
# Don't re-raise - we don't want to fail the entire event processing
|
||||
# if notification sending fails
|
||||
|
||||
async def stop(self):
|
||||
"""Stop consumer and close connections"""
|
||||
try:
|
||||
if self.channel:
|
||||
await self.channel.close()
|
||||
logger.info("rabbitmq_channel_closed")
|
||||
|
||||
if self.connection:
|
||||
await self.connection.close()
|
||||
logger.info("rabbitmq_connection_closed")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("consumer_stop_failed", error=str(e))
|
||||
0
services/alert_processor/app/core/__init__.py
Normal file
0
services/alert_processor/app/core/__init__.py
Normal file
51
services/alert_processor/app/core/config.py
Normal file
51
services/alert_processor/app/core/config.py
Normal file
@@ -0,0 +1,51 @@
|
||||
"""
|
||||
Configuration settings for alert processor service.
|
||||
"""
|
||||
|
||||
import os
|
||||
from shared.config.base import BaseServiceSettings
|
||||
|
||||
|
||||
class Settings(BaseServiceSettings):
|
||||
"""Application settings"""
|
||||
|
||||
# Service info - override defaults
|
||||
SERVICE_NAME: str = "alert-processor"
|
||||
APP_NAME: str = "Alert Processor Service"
|
||||
DESCRIPTION: str = "Central alert and recommendation processor"
|
||||
VERSION: str = "2.0.0"
|
||||
|
||||
# Alert processor specific settings
|
||||
RABBITMQ_EXCHANGE: str = "events.exchange"
|
||||
RABBITMQ_QUEUE: str = "alert_processor.queue"
|
||||
REDIS_SSE_PREFIX: str = "alerts"
|
||||
ORCHESTRATOR_TIMEOUT: int = 10
|
||||
NOTIFICATION_TIMEOUT: int = 5
|
||||
CACHE_ENABLED: bool = True
|
||||
CACHE_TTL_SECONDS: int = 300
|
||||
|
||||
@property
|
||||
def NOTIFICATION_URL(self) -> str:
|
||||
"""Get notification service URL for backwards compatibility"""
|
||||
return self.NOTIFICATION_SERVICE_URL
|
||||
|
||||
# Database configuration (secure approach - build from components)
|
||||
@property
|
||||
def DATABASE_URL(self) -> str:
|
||||
"""Build database URL from secure components"""
|
||||
# Try complete URL first (for backward compatibility)
|
||||
complete_url = os.getenv("ALERT_PROCESSOR_DATABASE_URL")
|
||||
if complete_url:
|
||||
return complete_url
|
||||
|
||||
# Build from components (secure approach)
|
||||
user = os.getenv("ALERT_PROCESSOR_DB_USER", "alert_processor_user")
|
||||
password = os.getenv("ALERT_PROCESSOR_DB_PASSWORD", "alert_processor_pass123")
|
||||
host = os.getenv("ALERT_PROCESSOR_DB_HOST", "alert-processor-db-service")
|
||||
port = os.getenv("ALERT_PROCESSOR_DB_PORT", "5432")
|
||||
name = os.getenv("ALERT_PROCESSOR_DB_NAME", "alert_processor_db")
|
||||
|
||||
return f"postgresql+asyncpg://{user}:{password}@{host}:{port}/{name}"
|
||||
|
||||
|
||||
settings = Settings()
|
||||
48
services/alert_processor/app/core/database.py
Normal file
48
services/alert_processor/app/core/database.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""
|
||||
Database connection and session management for Alert Processor Service
|
||||
"""
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
from .config import settings
|
||||
|
||||
from shared.database.base import DatabaseManager
|
||||
|
||||
# Initialize database manager
|
||||
database_manager = DatabaseManager(
|
||||
database_url=settings.DATABASE_URL,
|
||||
service_name=settings.SERVICE_NAME,
|
||||
pool_size=settings.DB_POOL_SIZE,
|
||||
max_overflow=settings.DB_MAX_OVERFLOW,
|
||||
echo=settings.DEBUG
|
||||
)
|
||||
|
||||
# Create async session factory
|
||||
AsyncSessionLocal = async_sessionmaker(
|
||||
database_manager.async_engine,
|
||||
class_=AsyncSession,
|
||||
expire_on_commit=False,
|
||||
autocommit=False,
|
||||
autoflush=False,
|
||||
)
|
||||
|
||||
|
||||
async def get_db() -> AsyncSession:
|
||||
"""
|
||||
Dependency to get database session.
|
||||
Used in FastAPI endpoints via Depends(get_db).
|
||||
"""
|
||||
async with AsyncSessionLocal() as session:
|
||||
try:
|
||||
yield session
|
||||
finally:
|
||||
await session.close()
|
||||
|
||||
|
||||
async def init_db():
|
||||
"""Initialize database (create tables if needed)"""
|
||||
await database_manager.create_all()
|
||||
|
||||
|
||||
async def close_db():
|
||||
"""Close database connections"""
|
||||
await database_manager.close()
|
||||
1
services/alert_processor/app/enrichment/__init__.py
Normal file
1
services/alert_processor/app/enrichment/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Enrichment components for alert processing."""
|
||||
156
services/alert_processor/app/enrichment/business_impact.py
Normal file
156
services/alert_processor/app/enrichment/business_impact.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
Business impact analyzer for alerts.
|
||||
|
||||
Calculates financial impact, affected orders, customer impact, and other
|
||||
business metrics from event metadata.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class BusinessImpactAnalyzer:
|
||||
"""Analyze business impact from event metadata"""
|
||||
|
||||
def analyze(self, event_type: str, metadata: Dict[str, Any]) -> dict:
|
||||
"""
|
||||
Analyze business impact for an event.
|
||||
|
||||
Returns dict with:
|
||||
- financial_impact_eur: Direct financial cost
|
||||
- affected_orders: Number of orders impacted
|
||||
- affected_customers: List of customer names
|
||||
- production_delay_hours: Hours of production delay
|
||||
- estimated_revenue_loss_eur: Potential revenue loss
|
||||
- customer_impact: high/medium/low
|
||||
- waste_risk_kg: Potential waste in kg
|
||||
"""
|
||||
|
||||
impact = {
|
||||
"financial_impact_eur": 0,
|
||||
"affected_orders": 0,
|
||||
"affected_customers": [],
|
||||
"production_delay_hours": 0,
|
||||
"estimated_revenue_loss_eur": 0,
|
||||
"customer_impact": "low",
|
||||
"waste_risk_kg": 0
|
||||
}
|
||||
|
||||
# Stock-related impacts
|
||||
if "stock" in event_type or "shortage" in event_type:
|
||||
impact.update(self._analyze_stock_impact(metadata))
|
||||
|
||||
# Production-related impacts
|
||||
elif "production" in event_type or "delay" in event_type or "equipment" in event_type:
|
||||
impact.update(self._analyze_production_impact(metadata))
|
||||
|
||||
# Procurement-related impacts
|
||||
elif "po_" in event_type or "delivery" in event_type:
|
||||
impact.update(self._analyze_procurement_impact(metadata))
|
||||
|
||||
# Quality-related impacts
|
||||
elif "quality" in event_type or "expired" in event_type:
|
||||
impact.update(self._analyze_quality_impact(metadata))
|
||||
|
||||
return impact
|
||||
|
||||
def _analyze_stock_impact(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""Analyze impact of stock-related alerts"""
|
||||
impact = {}
|
||||
|
||||
# Calculate financial impact
|
||||
shortage_amount = metadata.get("shortage_amount", 0)
|
||||
unit_cost = metadata.get("unit_cost", 5) # Default €5/kg
|
||||
impact["financial_impact_eur"] = float(shortage_amount) * unit_cost
|
||||
|
||||
# Affected orders from metadata
|
||||
impact["affected_orders"] = metadata.get("affected_orders", 0)
|
||||
|
||||
# Customer impact based on affected orders
|
||||
if impact["affected_orders"] > 5:
|
||||
impact["customer_impact"] = "high"
|
||||
elif impact["affected_orders"] > 2:
|
||||
impact["customer_impact"] = "medium"
|
||||
|
||||
# Revenue loss (estimated)
|
||||
avg_order_value = 50 # €50 per order
|
||||
impact["estimated_revenue_loss_eur"] = impact["affected_orders"] * avg_order_value
|
||||
|
||||
return impact
|
||||
|
||||
def _analyze_production_impact(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""Analyze impact of production-related alerts"""
|
||||
impact = {}
|
||||
|
||||
# Delay minutes to hours
|
||||
delay_minutes = metadata.get("delay_minutes", 0)
|
||||
impact["production_delay_hours"] = round(delay_minutes / 60, 1)
|
||||
|
||||
# Affected orders and customers
|
||||
impact["affected_orders"] = metadata.get("affected_orders", 0)
|
||||
|
||||
customer_names = metadata.get("customer_names", [])
|
||||
impact["affected_customers"] = customer_names
|
||||
|
||||
# Customer impact based on delay
|
||||
if delay_minutes > 120: # 2+ hours
|
||||
impact["customer_impact"] = "high"
|
||||
elif delay_minutes > 60: # 1+ hours
|
||||
impact["customer_impact"] = "medium"
|
||||
|
||||
# Financial impact: hourly production cost
|
||||
hourly_cost = 100 # €100/hour operational cost
|
||||
impact["financial_impact_eur"] = impact["production_delay_hours"] * hourly_cost
|
||||
|
||||
# Revenue loss
|
||||
if impact["affected_orders"] > 0:
|
||||
avg_order_value = 50
|
||||
impact["estimated_revenue_loss_eur"] = impact["affected_orders"] * avg_order_value
|
||||
|
||||
return impact
|
||||
|
||||
def _analyze_procurement_impact(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""Analyze impact of procurement-related alerts"""
|
||||
impact = {}
|
||||
|
||||
# Extract potential_loss_eur from reasoning_data.parameters
|
||||
reasoning_data = metadata.get("reasoning_data", {})
|
||||
parameters = reasoning_data.get("parameters", {})
|
||||
potential_loss_eur = parameters.get("potential_loss_eur")
|
||||
|
||||
# Use potential loss from reasoning as financial impact (what's at risk)
|
||||
# Fallback to PO amount only if reasoning data is not available
|
||||
if potential_loss_eur is not None:
|
||||
impact["financial_impact_eur"] = float(potential_loss_eur)
|
||||
else:
|
||||
po_amount = metadata.get("po_amount", metadata.get("total_amount", 0))
|
||||
impact["financial_impact_eur"] = float(po_amount)
|
||||
|
||||
# Days overdue affects customer impact
|
||||
days_overdue = metadata.get("days_overdue", 0)
|
||||
if days_overdue > 3:
|
||||
impact["customer_impact"] = "high"
|
||||
elif days_overdue > 1:
|
||||
impact["customer_impact"] = "medium"
|
||||
|
||||
return impact
|
||||
|
||||
def _analyze_quality_impact(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""Analyze impact of quality-related alerts"""
|
||||
impact = {}
|
||||
|
||||
# Expired products
|
||||
expired_count = metadata.get("expired_count", 0)
|
||||
total_value = metadata.get("total_value", 0)
|
||||
|
||||
impact["financial_impact_eur"] = float(total_value)
|
||||
impact["waste_risk_kg"] = metadata.get("total_quantity_kg", 0)
|
||||
|
||||
if expired_count > 5:
|
||||
impact["customer_impact"] = "high"
|
||||
elif expired_count > 2:
|
||||
impact["customer_impact"] = "medium"
|
||||
|
||||
return impact
|
||||
244
services/alert_processor/app/enrichment/message_generator.py
Normal file
244
services/alert_processor/app/enrichment/message_generator.py
Normal file
@@ -0,0 +1,244 @@
|
||||
"""
|
||||
Message generator for creating i18n message keys and parameters.
|
||||
|
||||
Converts minimal event metadata into structured i18n format for frontend translation.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
from datetime import datetime
|
||||
from app.utils.message_templates import ALERT_TEMPLATES, NOTIFICATION_TEMPLATES, RECOMMENDATION_TEMPLATES
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class MessageGenerator:
|
||||
"""Generates i18n message keys and parameters from event metadata"""
|
||||
|
||||
def generate_message(self, event_type: str, metadata: Dict[str, Any], event_class: str = "alert") -> dict:
|
||||
"""
|
||||
Generate i18n structure for frontend.
|
||||
|
||||
Args:
|
||||
event_type: Alert/notification/recommendation type
|
||||
metadata: Event metadata dictionary
|
||||
event_class: One of: alert, notification, recommendation
|
||||
|
||||
Returns:
|
||||
Dictionary with title_key, title_params, message_key, message_params
|
||||
"""
|
||||
|
||||
# Select appropriate template collection
|
||||
if event_class == "notification":
|
||||
templates = NOTIFICATION_TEMPLATES
|
||||
elif event_class == "recommendation":
|
||||
templates = RECOMMENDATION_TEMPLATES
|
||||
else:
|
||||
templates = ALERT_TEMPLATES
|
||||
|
||||
template = templates.get(event_type)
|
||||
|
||||
if not template:
|
||||
logger.warning("no_template_found", event_type=event_type, event_class=event_class)
|
||||
return self._generate_fallback(event_type, metadata)
|
||||
|
||||
# Build parameters from metadata
|
||||
title_params = self._build_params(template["title_params"], metadata)
|
||||
message_params = self._build_params(template["message_params"], metadata)
|
||||
|
||||
# Select message variant based on context
|
||||
message_key = self._select_message_variant(
|
||||
template["message_variants"],
|
||||
metadata
|
||||
)
|
||||
|
||||
return {
|
||||
"title_key": template["title_key"],
|
||||
"title_params": title_params,
|
||||
"message_key": message_key,
|
||||
"message_params": message_params
|
||||
}
|
||||
|
||||
def _generate_fallback(self, event_type: str, metadata: Dict[str, Any]) -> dict:
|
||||
"""Generate fallback message structure when template not found"""
|
||||
return {
|
||||
"title_key": "alerts.generic.title",
|
||||
"title_params": {},
|
||||
"message_key": "alerts.generic.message",
|
||||
"message_params": {
|
||||
"event_type": event_type,
|
||||
"metadata_summary": self._summarize_metadata(metadata)
|
||||
}
|
||||
}
|
||||
|
||||
def _summarize_metadata(self, metadata: Dict[str, Any]) -> str:
|
||||
"""Create human-readable summary of metadata"""
|
||||
# Take first 3 fields
|
||||
items = list(metadata.items())[:3]
|
||||
summary_parts = [f"{k}: {v}" for k, v in items]
|
||||
return ", ".join(summary_parts)
|
||||
|
||||
def _build_params(self, param_mapping: dict, metadata: dict) -> dict:
|
||||
"""
|
||||
Extract and transform parameters from metadata.
|
||||
|
||||
param_mapping format: {"display_param_name": "metadata_key"}
|
||||
"""
|
||||
params = {}
|
||||
|
||||
for param_key, metadata_key in param_mapping.items():
|
||||
if metadata_key in metadata:
|
||||
value = metadata[metadata_key]
|
||||
|
||||
# Apply transformations based on parameter suffix
|
||||
if param_key.endswith("_kg"):
|
||||
value = round(float(value), 1)
|
||||
elif param_key.endswith("_eur"):
|
||||
value = round(float(value), 2)
|
||||
elif param_key.endswith("_percentage"):
|
||||
value = round(float(value), 1)
|
||||
elif param_key.endswith("_date"):
|
||||
value = self._format_date(value)
|
||||
elif param_key.endswith("_day_name"):
|
||||
value = self._format_day_name(value)
|
||||
elif param_key.endswith("_datetime"):
|
||||
value = self._format_datetime(value)
|
||||
|
||||
params[param_key] = value
|
||||
|
||||
return params
|
||||
|
||||
def _select_message_variant(self, variants: dict, metadata: dict) -> str:
|
||||
"""
|
||||
Select appropriate message variant based on metadata context.
|
||||
|
||||
Checks for specific conditions in priority order.
|
||||
"""
|
||||
|
||||
# Check for PO-related variants
|
||||
if "po_id" in metadata:
|
||||
if metadata.get("po_status") == "pending_approval":
|
||||
variant = variants.get("with_po_pending")
|
||||
if variant:
|
||||
return variant
|
||||
else:
|
||||
variant = variants.get("with_po_created")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
# Check for time-based variants
|
||||
if "hours_until" in metadata:
|
||||
variant = variants.get("with_hours")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
if "production_date" in metadata or "planned_date" in metadata:
|
||||
variant = variants.get("with_date")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
# Check for customer-related variants
|
||||
if "customer_names" in metadata and metadata.get("customer_names"):
|
||||
variant = variants.get("with_customers")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
# Check for order-related variants
|
||||
if "affected_orders" in metadata and metadata.get("affected_orders", 0) > 0:
|
||||
variant = variants.get("with_orders")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
# Check for supplier contact variants
|
||||
if "supplier_contact" in metadata:
|
||||
variant = variants.get("with_supplier")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
# Check for batch-related variants
|
||||
if "affected_batches" in metadata and metadata.get("affected_batches", 0) > 0:
|
||||
variant = variants.get("with_batches")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
# Check for product names list variants
|
||||
if "product_names" in metadata and metadata.get("product_names"):
|
||||
variant = variants.get("with_names")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
# Check for time duration variants
|
||||
if "hours_overdue" in metadata:
|
||||
variant = variants.get("with_hours")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
if "days_overdue" in metadata:
|
||||
variant = variants.get("with_days")
|
||||
if variant:
|
||||
return variant
|
||||
|
||||
# Default to generic variant
|
||||
return variants.get("generic", variants[list(variants.keys())[0]])
|
||||
|
||||
def _format_date(self, date_value: Any) -> str:
|
||||
"""
|
||||
Format date for display.
|
||||
|
||||
Accepts:
|
||||
- ISO string: "2025-12-10"
|
||||
- datetime object
|
||||
- date object
|
||||
|
||||
Returns: ISO format "YYYY-MM-DD"
|
||||
"""
|
||||
if isinstance(date_value, str):
|
||||
# Already a string, might be ISO format
|
||||
try:
|
||||
dt = datetime.fromisoformat(date_value.replace('Z', '+00:00'))
|
||||
return dt.date().isoformat()
|
||||
except:
|
||||
return date_value
|
||||
|
||||
if isinstance(date_value, datetime):
|
||||
return date_value.date().isoformat()
|
||||
|
||||
if hasattr(date_value, 'isoformat'):
|
||||
return date_value.isoformat()
|
||||
|
||||
return str(date_value)
|
||||
|
||||
def _format_day_name(self, date_value: Any) -> str:
|
||||
"""
|
||||
Format day name with date.
|
||||
|
||||
Example: "miércoles 10 de diciembre"
|
||||
|
||||
Note: Frontend will handle localization.
|
||||
For now, return ISO date and let frontend format.
|
||||
"""
|
||||
iso_date = self._format_date(date_value)
|
||||
|
||||
try:
|
||||
dt = datetime.fromisoformat(iso_date)
|
||||
# Frontend will use this to format in user's language
|
||||
return iso_date
|
||||
except:
|
||||
return iso_date
|
||||
|
||||
def _format_datetime(self, datetime_value: Any) -> str:
|
||||
"""
|
||||
Format datetime for display.
|
||||
|
||||
Returns: ISO 8601 format with timezone
|
||||
"""
|
||||
if isinstance(datetime_value, str):
|
||||
return datetime_value
|
||||
|
||||
if isinstance(datetime_value, datetime):
|
||||
return datetime_value.isoformat()
|
||||
|
||||
if hasattr(datetime_value, 'isoformat'):
|
||||
return datetime_value.isoformat()
|
||||
|
||||
return str(datetime_value)
|
||||
165
services/alert_processor/app/enrichment/orchestrator_client.py
Normal file
165
services/alert_processor/app/enrichment/orchestrator_client.py
Normal file
@@ -0,0 +1,165 @@
|
||||
"""
|
||||
Orchestrator client for querying AI action context.
|
||||
|
||||
Queries the orchestrator service to determine if AI has already
|
||||
addressed the issue and what actions were taken.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, Optional
|
||||
import httpx
|
||||
import structlog
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class OrchestratorClient:
|
||||
"""HTTP client for querying orchestrator service"""
|
||||
|
||||
def __init__(self, base_url: str = "http://orchestrator-service:8000"):
|
||||
"""
|
||||
Initialize orchestrator client.
|
||||
|
||||
Args:
|
||||
base_url: Base URL of orchestrator service
|
||||
"""
|
||||
self.base_url = base_url
|
||||
self.timeout = 10.0 # 10 second timeout
|
||||
|
||||
async def get_context(
|
||||
self,
|
||||
tenant_id: str,
|
||||
event_type: str,
|
||||
metadata: Dict[str, Any]
|
||||
) -> dict:
|
||||
"""
|
||||
Query orchestrator for AI action context.
|
||||
|
||||
Returns dict with:
|
||||
- already_addressed: Boolean - did AI handle this?
|
||||
- action_type: Type of action taken
|
||||
- action_id: ID of the action
|
||||
- action_summary: Human-readable summary
|
||||
- reasoning: AI reasoning for the action
|
||||
- confidence: Confidence score (0-1)
|
||||
- estimated_savings_eur: Estimated savings
|
||||
- prevented_issue: What issue was prevented
|
||||
- created_at: When action was created
|
||||
"""
|
||||
|
||||
context = {
|
||||
"already_addressed": False,
|
||||
"confidence": 0.8 # Default confidence
|
||||
}
|
||||
|
||||
try:
|
||||
# Build query based on event type and metadata
|
||||
query_params = self._build_query_params(event_type, metadata)
|
||||
|
||||
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
||||
response = await client.get(
|
||||
f"{self.base_url}/api/internal/recent-actions",
|
||||
params={
|
||||
"tenant_id": tenant_id,
|
||||
**query_params
|
||||
},
|
||||
headers={
|
||||
"x-internal-service": "alert-intelligence"
|
||||
}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
context.update(self._parse_response(data, event_type, metadata))
|
||||
|
||||
elif response.status_code == 404:
|
||||
# No recent actions found - that's okay
|
||||
logger.debug("no_orchestrator_actions", tenant_id=tenant_id, event_type=event_type)
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
"orchestrator_query_failed",
|
||||
status_code=response.status_code,
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
except httpx.TimeoutException:
|
||||
logger.warning("orchestrator_timeout", tenant_id=tenant_id, event_type=event_type)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("orchestrator_query_error", error=str(e), tenant_id=tenant_id)
|
||||
|
||||
return context
|
||||
|
||||
def _build_query_params(self, event_type: str, metadata: Dict[str, Any]) -> dict:
|
||||
"""Build query parameters based on event type"""
|
||||
params = {}
|
||||
|
||||
# For stock-related alerts, query for PO actions
|
||||
if "stock" in event_type or "shortage" in event_type:
|
||||
if metadata.get("ingredient_id"):
|
||||
params["related_entity_type"] = "ingredient"
|
||||
params["related_entity_id"] = metadata["ingredient_id"]
|
||||
params["action_types"] = "purchase_order_created,purchase_order_approved"
|
||||
|
||||
# For production delays, query for batch adjustments
|
||||
elif "production" in event_type or "delay" in event_type:
|
||||
if metadata.get("batch_id"):
|
||||
params["related_entity_type"] = "production_batch"
|
||||
params["related_entity_id"] = metadata["batch_id"]
|
||||
params["action_types"] = "production_adjusted,batch_rescheduled"
|
||||
|
||||
# For PO approval, check if already approved
|
||||
elif "po_approval" in event_type:
|
||||
if metadata.get("po_id"):
|
||||
params["related_entity_type"] = "purchase_order"
|
||||
params["related_entity_id"] = metadata["po_id"]
|
||||
params["action_types"] = "purchase_order_approved,purchase_order_rejected"
|
||||
|
||||
# Look for recent actions (last 24 hours)
|
||||
params["since_hours"] = 24
|
||||
|
||||
return params
|
||||
|
||||
def _parse_response(
|
||||
self,
|
||||
data: dict,
|
||||
event_type: str,
|
||||
metadata: Dict[str, Any]
|
||||
) -> dict:
|
||||
"""Parse orchestrator response into context"""
|
||||
|
||||
if not data or not data.get("actions"):
|
||||
return {"already_addressed": False}
|
||||
|
||||
# Get most recent action
|
||||
actions = data.get("actions", [])
|
||||
if not actions:
|
||||
return {"already_addressed": False}
|
||||
|
||||
most_recent = actions[0]
|
||||
|
||||
context = {
|
||||
"already_addressed": True,
|
||||
"action_type": most_recent.get("action_type"),
|
||||
"action_id": most_recent.get("id"),
|
||||
"action_summary": most_recent.get("summary", ""),
|
||||
"reasoning": most_recent.get("reasoning", {}),
|
||||
"confidence": most_recent.get("confidence", 0.8),
|
||||
"created_at": most_recent.get("created_at"),
|
||||
"action_status": most_recent.get("status", "completed")
|
||||
}
|
||||
|
||||
# Extract specific fields based on action type
|
||||
if most_recent.get("action_type") == "purchase_order_created":
|
||||
context["estimated_savings_eur"] = most_recent.get("estimated_savings_eur", 0)
|
||||
context["prevented_issue"] = "stockout"
|
||||
|
||||
if most_recent.get("delivery_date"):
|
||||
context["delivery_date"] = most_recent["delivery_date"]
|
||||
|
||||
elif most_recent.get("action_type") == "production_adjusted":
|
||||
context["prevented_issue"] = "production_delay"
|
||||
context["adjustment_type"] = most_recent.get("adjustment_type")
|
||||
|
||||
return context
|
||||
256
services/alert_processor/app/enrichment/priority_scorer.py
Normal file
256
services/alert_processor/app/enrichment/priority_scorer.py
Normal file
@@ -0,0 +1,256 @@
|
||||
"""
|
||||
Multi-factor priority scoring for alerts.
|
||||
|
||||
Calculates priority score (0-100) based on:
|
||||
- Business impact (40%): Financial impact, affected orders, customer impact
|
||||
- Urgency (30%): Time until consequence, deadlines
|
||||
- User agency (20%): Can user fix it? External dependencies?
|
||||
- Confidence (10%): AI confidence in assessment
|
||||
|
||||
Also applies escalation boosts for age and deadline proximity.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class PriorityScorer:
|
||||
"""Calculate multi-factor priority score (0-100)"""
|
||||
|
||||
# Weights for priority calculation
|
||||
BUSINESS_IMPACT_WEIGHT = 0.4
|
||||
URGENCY_WEIGHT = 0.3
|
||||
USER_AGENCY_WEIGHT = 0.2
|
||||
CONFIDENCE_WEIGHT = 0.1
|
||||
|
||||
# Priority thresholds
|
||||
CRITICAL_THRESHOLD = 90
|
||||
IMPORTANT_THRESHOLD = 70
|
||||
STANDARD_THRESHOLD = 50
|
||||
|
||||
def calculate_priority(
|
||||
self,
|
||||
business_impact: dict,
|
||||
urgency: dict,
|
||||
user_agency: dict,
|
||||
orchestrator_context: dict
|
||||
) -> int:
|
||||
"""
|
||||
Calculate weighted priority score.
|
||||
|
||||
Args:
|
||||
business_impact: Business impact context
|
||||
urgency: Urgency context
|
||||
user_agency: User agency context
|
||||
orchestrator_context: AI orchestrator context
|
||||
|
||||
Returns:
|
||||
Priority score (0-100)
|
||||
"""
|
||||
|
||||
# Score each dimension (0-100)
|
||||
impact_score = self._score_business_impact(business_impact)
|
||||
urgency_score = self._score_urgency(urgency)
|
||||
agency_score = self._score_user_agency(user_agency)
|
||||
confidence_score = orchestrator_context.get("confidence", 0.8) * 100
|
||||
|
||||
# Weighted average
|
||||
total_score = (
|
||||
impact_score * self.BUSINESS_IMPACT_WEIGHT +
|
||||
urgency_score * self.URGENCY_WEIGHT +
|
||||
agency_score * self.USER_AGENCY_WEIGHT +
|
||||
confidence_score * self.CONFIDENCE_WEIGHT
|
||||
)
|
||||
|
||||
# Apply escalation boost if needed
|
||||
escalation_boost = self._calculate_escalation_boost(urgency)
|
||||
total_score = min(100, total_score + escalation_boost)
|
||||
|
||||
score = int(total_score)
|
||||
|
||||
logger.debug(
|
||||
"priority_calculated",
|
||||
score=score,
|
||||
impact_score=impact_score,
|
||||
urgency_score=urgency_score,
|
||||
agency_score=agency_score,
|
||||
confidence_score=confidence_score,
|
||||
escalation_boost=escalation_boost
|
||||
)
|
||||
|
||||
return score
|
||||
|
||||
def _score_business_impact(self, impact: dict) -> int:
|
||||
"""
|
||||
Score business impact (0-100).
|
||||
|
||||
Considers:
|
||||
- Financial impact in EUR
|
||||
- Number of affected orders
|
||||
- Customer impact level
|
||||
- Production delays
|
||||
- Revenue at risk
|
||||
"""
|
||||
score = 50 # Base score
|
||||
|
||||
# Financial impact
|
||||
financial_impact = impact.get("financial_impact_eur", 0)
|
||||
if financial_impact > 1000:
|
||||
score += 30
|
||||
elif financial_impact > 500:
|
||||
score += 20
|
||||
elif financial_impact > 100:
|
||||
score += 10
|
||||
|
||||
# Affected orders
|
||||
affected_orders = impact.get("affected_orders", 0)
|
||||
if affected_orders > 10:
|
||||
score += 15
|
||||
elif affected_orders > 5:
|
||||
score += 10
|
||||
elif affected_orders > 0:
|
||||
score += 5
|
||||
|
||||
# Customer impact
|
||||
customer_impact = impact.get("customer_impact", "low")
|
||||
if customer_impact == "high":
|
||||
score += 15
|
||||
elif customer_impact == "medium":
|
||||
score += 5
|
||||
|
||||
# Production delay hours
|
||||
production_delay_hours = impact.get("production_delay_hours", 0)
|
||||
if production_delay_hours > 4:
|
||||
score += 10
|
||||
elif production_delay_hours > 2:
|
||||
score += 5
|
||||
|
||||
# Revenue loss
|
||||
revenue_loss = impact.get("estimated_revenue_loss_eur", 0)
|
||||
if revenue_loss > 500:
|
||||
score += 10
|
||||
elif revenue_loss > 200:
|
||||
score += 5
|
||||
|
||||
return min(100, score)
|
||||
|
||||
def _score_urgency(self, urgency: dict) -> int:
|
||||
"""
|
||||
Score urgency (0-100).
|
||||
|
||||
Considers:
|
||||
- Time until consequence
|
||||
- Can it wait until tomorrow?
|
||||
- Deadline proximity
|
||||
- Peak hour relevance
|
||||
"""
|
||||
score = 50 # Base score
|
||||
|
||||
# Time until consequence
|
||||
hours_until = urgency.get("hours_until_consequence", 24)
|
||||
if hours_until < 2:
|
||||
score += 40
|
||||
elif hours_until < 6:
|
||||
score += 30
|
||||
elif hours_until < 12:
|
||||
score += 20
|
||||
elif hours_until < 24:
|
||||
score += 10
|
||||
|
||||
# Can it wait?
|
||||
if not urgency.get("can_wait_until_tomorrow", True):
|
||||
score += 10
|
||||
|
||||
# Deadline present
|
||||
if urgency.get("deadline_utc"):
|
||||
score += 5
|
||||
|
||||
# Peak hour relevant (production/demand related)
|
||||
if urgency.get("peak_hour_relevant", False):
|
||||
score += 5
|
||||
|
||||
return min(100, score)
|
||||
|
||||
def _score_user_agency(self, agency: dict) -> int:
|
||||
"""
|
||||
Score user agency (0-100).
|
||||
|
||||
Higher score when user CAN fix the issue.
|
||||
Lower score when blocked or requires external parties.
|
||||
|
||||
Considers:
|
||||
- Can user fix it?
|
||||
- Requires external party?
|
||||
- Has blockers?
|
||||
- Suggested workarounds available?
|
||||
"""
|
||||
score = 50 # Base score
|
||||
|
||||
# Can user fix?
|
||||
if agency.get("can_user_fix", False):
|
||||
score += 30
|
||||
else:
|
||||
score -= 20
|
||||
|
||||
# Requires external party?
|
||||
if agency.get("requires_external_party", False):
|
||||
score -= 10
|
||||
|
||||
# Has blockers?
|
||||
blockers = agency.get("blockers", [])
|
||||
score -= len(blockers) * 5
|
||||
|
||||
# Has suggested workaround?
|
||||
if agency.get("suggested_workaround"):
|
||||
score += 5
|
||||
|
||||
return max(0, min(100, score))
|
||||
|
||||
def _calculate_escalation_boost(self, urgency: dict) -> int:
|
||||
"""
|
||||
Calculate escalation boost for pending alerts.
|
||||
|
||||
Boosts priority for:
|
||||
- Age-based escalation (pending >48h, >72h)
|
||||
- Deadline proximity (<6h, <24h)
|
||||
|
||||
Maximum boost: +30 points
|
||||
"""
|
||||
boost = 0
|
||||
|
||||
# Age-based escalation
|
||||
hours_pending = urgency.get("hours_pending", 0)
|
||||
if hours_pending > 72:
|
||||
boost += 20
|
||||
elif hours_pending > 48:
|
||||
boost += 10
|
||||
|
||||
# Deadline proximity
|
||||
hours_until = urgency.get("hours_until_consequence", 24)
|
||||
if hours_until < 6:
|
||||
boost += 30
|
||||
elif hours_until < 24:
|
||||
boost += 15
|
||||
|
||||
# Cap at +30
|
||||
return min(30, boost)
|
||||
|
||||
def get_priority_level(self, score: int) -> str:
|
||||
"""
|
||||
Convert numeric score to priority level.
|
||||
|
||||
- 90-100: critical
|
||||
- 70-89: important
|
||||
- 50-69: standard
|
||||
- 0-49: info
|
||||
"""
|
||||
if score >= self.CRITICAL_THRESHOLD:
|
||||
return "critical"
|
||||
elif score >= self.IMPORTANT_THRESHOLD:
|
||||
return "important"
|
||||
elif score >= self.STANDARD_THRESHOLD:
|
||||
return "standard"
|
||||
else:
|
||||
return "info"
|
||||
304
services/alert_processor/app/enrichment/smart_actions.py
Normal file
304
services/alert_processor/app/enrichment/smart_actions.py
Normal file
@@ -0,0 +1,304 @@
|
||||
"""
|
||||
Smart action generator for alerts.
|
||||
|
||||
Generates actionable buttons with deep links, phone numbers,
|
||||
and other interactive elements based on alert type and metadata.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class SmartActionGenerator:
|
||||
"""Generate smart action buttons for alerts"""
|
||||
|
||||
def generate_actions(
|
||||
self,
|
||||
event_type: str,
|
||||
metadata: Dict[str, Any],
|
||||
orchestrator_context: dict
|
||||
) -> List[dict]:
|
||||
"""
|
||||
Generate smart actions for an event.
|
||||
|
||||
Each action has:
|
||||
- action_type: Identifier for frontend handling
|
||||
- label_key: i18n key for button label
|
||||
- label_params: Parameters for label translation
|
||||
- variant: primary/secondary/danger/ghost
|
||||
- disabled: Boolean
|
||||
- disabled_reason_key: i18n key if disabled
|
||||
- consequence_key: i18n key for confirmation dialog
|
||||
- url: Deep link or tel: or mailto:
|
||||
- metadata: Additional data for action
|
||||
"""
|
||||
|
||||
actions = []
|
||||
|
||||
# If AI already addressed, show "View Action" button
|
||||
if orchestrator_context and orchestrator_context.get("already_addressed"):
|
||||
actions.append(self._create_view_action(orchestrator_context))
|
||||
return actions
|
||||
|
||||
# Generate actions based on event type
|
||||
if "po_approval" in event_type:
|
||||
actions.extend(self._create_po_approval_actions(metadata))
|
||||
|
||||
elif "stock" in event_type or "shortage" in event_type:
|
||||
actions.extend(self._create_stock_actions(metadata))
|
||||
|
||||
elif "production" in event_type or "delay" in event_type:
|
||||
actions.extend(self._create_production_actions(metadata))
|
||||
|
||||
elif "equipment" in event_type:
|
||||
actions.extend(self._create_equipment_actions(metadata))
|
||||
|
||||
elif "delivery" in event_type or "overdue" in event_type:
|
||||
actions.extend(self._create_delivery_actions(metadata))
|
||||
|
||||
elif "temperature" in event_type:
|
||||
actions.extend(self._create_temperature_actions(metadata))
|
||||
|
||||
# Always add common actions
|
||||
actions.extend(self._create_common_actions())
|
||||
|
||||
return actions
|
||||
|
||||
def _create_view_action(self, orchestrator_context: dict) -> dict:
|
||||
"""Create action to view what AI did"""
|
||||
return {
|
||||
"action_type": "open_reasoning",
|
||||
"label_key": "actions.view_ai_action",
|
||||
"label_params": {},
|
||||
"variant": "primary",
|
||||
"disabled": False,
|
||||
"metadata": {
|
||||
"action_id": orchestrator_context.get("action_id"),
|
||||
"action_type": orchestrator_context.get("action_type")
|
||||
}
|
||||
}
|
||||
|
||||
def _create_po_approval_actions(self, metadata: Dict[str, Any]) -> List[dict]:
|
||||
"""Create actions for PO approval alerts"""
|
||||
po_id = metadata.get("po_id")
|
||||
po_amount = metadata.get("total_amount", metadata.get("po_amount", 0))
|
||||
|
||||
return [
|
||||
{
|
||||
"action_type": "approve_po",
|
||||
"label_key": "actions.approve_po",
|
||||
"label_params": {"amount": po_amount},
|
||||
"variant": "primary",
|
||||
"disabled": False,
|
||||
"consequence_key": "actions.approve_po_consequence",
|
||||
"url": f"/app/procurement/purchase-orders/{po_id}",
|
||||
"metadata": {"po_id": po_id, "amount": po_amount}
|
||||
},
|
||||
{
|
||||
"action_type": "reject_po",
|
||||
"label_key": "actions.reject_po",
|
||||
"label_params": {},
|
||||
"variant": "danger",
|
||||
"disabled": False,
|
||||
"consequence_key": "actions.reject_po_consequence",
|
||||
"url": f"/app/procurement/purchase-orders/{po_id}",
|
||||
"metadata": {"po_id": po_id}
|
||||
},
|
||||
{
|
||||
"action_type": "modify_po",
|
||||
"label_key": "actions.modify_po",
|
||||
"label_params": {},
|
||||
"variant": "secondary",
|
||||
"disabled": False,
|
||||
"url": f"/app/procurement/purchase-orders/{po_id}/edit",
|
||||
"metadata": {"po_id": po_id}
|
||||
}
|
||||
]
|
||||
|
||||
def _create_stock_actions(self, metadata: Dict[str, Any]) -> List[dict]:
|
||||
"""Create actions for stock-related alerts"""
|
||||
actions = []
|
||||
|
||||
# If supplier info available, add call button
|
||||
if metadata.get("supplier_contact"):
|
||||
actions.append({
|
||||
"action_type": "call_supplier",
|
||||
"label_key": "actions.call_supplier",
|
||||
"label_params": {
|
||||
"supplier": metadata.get("supplier_name", "Supplier"),
|
||||
"phone": metadata.get("supplier_contact")
|
||||
},
|
||||
"variant": "primary",
|
||||
"disabled": False,
|
||||
"url": f"tel:{metadata['supplier_contact']}",
|
||||
"metadata": {
|
||||
"supplier_name": metadata.get("supplier_name"),
|
||||
"phone": metadata.get("supplier_contact")
|
||||
}
|
||||
})
|
||||
|
||||
# If PO exists, add view PO button
|
||||
if metadata.get("po_id"):
|
||||
if metadata.get("po_status") == "pending_approval":
|
||||
actions.append({
|
||||
"action_type": "approve_po",
|
||||
"label_key": "actions.approve_po",
|
||||
"label_params": {"amount": metadata.get("po_amount", 0)},
|
||||
"variant": "primary",
|
||||
"disabled": False,
|
||||
"url": f"/app/procurement/purchase-orders/{metadata['po_id']}",
|
||||
"metadata": {"po_id": metadata["po_id"]}
|
||||
})
|
||||
else:
|
||||
actions.append({
|
||||
"action_type": "view_po",
|
||||
"label_key": "actions.view_po",
|
||||
"label_params": {"po_number": metadata.get("po_number", metadata["po_id"])},
|
||||
"variant": "secondary",
|
||||
"disabled": False,
|
||||
"url": f"/app/procurement/purchase-orders/{metadata['po_id']}",
|
||||
"metadata": {"po_id": metadata["po_id"]}
|
||||
})
|
||||
|
||||
# Add create PO button if no PO exists
|
||||
else:
|
||||
actions.append({
|
||||
"action_type": "create_po",
|
||||
"label_key": "actions.create_po",
|
||||
"label_params": {},
|
||||
"variant": "primary",
|
||||
"disabled": False,
|
||||
"url": f"/app/procurement/purchase-orders/new?ingredient_id={metadata.get('ingredient_id')}",
|
||||
"metadata": {"ingredient_id": metadata.get("ingredient_id")}
|
||||
})
|
||||
|
||||
return actions
|
||||
|
||||
def _create_production_actions(self, metadata: Dict[str, Any]) -> List[dict]:
|
||||
"""Create actions for production-related alerts"""
|
||||
actions = []
|
||||
|
||||
if metadata.get("batch_id"):
|
||||
actions.append({
|
||||
"action_type": "view_batch",
|
||||
"label_key": "actions.view_batch",
|
||||
"label_params": {"batch_number": metadata.get("batch_number", "")},
|
||||
"variant": "primary",
|
||||
"disabled": False,
|
||||
"url": f"/app/production/batches/{metadata['batch_id']}",
|
||||
"metadata": {"batch_id": metadata["batch_id"]}
|
||||
})
|
||||
|
||||
actions.append({
|
||||
"action_type": "adjust_production",
|
||||
"label_key": "actions.adjust_production",
|
||||
"label_params": {},
|
||||
"variant": "secondary",
|
||||
"disabled": False,
|
||||
"url": f"/app/production/batches/{metadata['batch_id']}/adjust",
|
||||
"metadata": {"batch_id": metadata["batch_id"]}
|
||||
})
|
||||
|
||||
return actions
|
||||
|
||||
def _create_equipment_actions(self, metadata: Dict[str, Any]) -> List[dict]:
|
||||
"""Create actions for equipment-related alerts"""
|
||||
return [
|
||||
{
|
||||
"action_type": "view_equipment",
|
||||
"label_key": "actions.view_equipment",
|
||||
"label_params": {"equipment_name": metadata.get("equipment_name", "")},
|
||||
"variant": "primary",
|
||||
"disabled": False,
|
||||
"url": f"/app/production/equipment/{metadata.get('equipment_id')}",
|
||||
"metadata": {"equipment_id": metadata.get("equipment_id")}
|
||||
},
|
||||
{
|
||||
"action_type": "schedule_maintenance",
|
||||
"label_key": "actions.schedule_maintenance",
|
||||
"label_params": {},
|
||||
"variant": "secondary",
|
||||
"disabled": False,
|
||||
"url": f"/app/production/equipment/{metadata.get('equipment_id')}/maintenance",
|
||||
"metadata": {"equipment_id": metadata.get("equipment_id")}
|
||||
}
|
||||
]
|
||||
|
||||
def _create_delivery_actions(self, metadata: Dict[str, Any]) -> List[dict]:
|
||||
"""Create actions for delivery-related alerts"""
|
||||
actions = []
|
||||
|
||||
if metadata.get("supplier_contact"):
|
||||
actions.append({
|
||||
"action_type": "call_supplier",
|
||||
"label_key": "actions.call_supplier",
|
||||
"label_params": {
|
||||
"supplier": metadata.get("supplier_name", "Supplier"),
|
||||
"phone": metadata.get("supplier_contact")
|
||||
},
|
||||
"variant": "primary",
|
||||
"disabled": False,
|
||||
"url": f"tel:{metadata['supplier_contact']}",
|
||||
"metadata": {
|
||||
"supplier_name": metadata.get("supplier_name"),
|
||||
"phone": metadata.get("supplier_contact")
|
||||
}
|
||||
})
|
||||
|
||||
if metadata.get("po_id"):
|
||||
actions.append({
|
||||
"action_type": "view_po",
|
||||
"label_key": "actions.view_po",
|
||||
"label_params": {"po_number": metadata.get("po_number", "")},
|
||||
"variant": "secondary",
|
||||
"disabled": False,
|
||||
"url": f"/app/procurement/purchase-orders/{metadata['po_id']}",
|
||||
"metadata": {"po_id": metadata["po_id"]}
|
||||
})
|
||||
|
||||
return actions
|
||||
|
||||
def _create_temperature_actions(self, metadata: Dict[str, Any]) -> List[dict]:
|
||||
"""Create actions for temperature breach alerts"""
|
||||
return [
|
||||
{
|
||||
"action_type": "view_sensor",
|
||||
"label_key": "actions.view_sensor",
|
||||
"label_params": {"location": metadata.get("location", "")},
|
||||
"variant": "primary",
|
||||
"disabled": False,
|
||||
"url": f"/app/inventory/sensors/{metadata.get('sensor_id')}",
|
||||
"metadata": {"sensor_id": metadata.get("sensor_id")}
|
||||
},
|
||||
{
|
||||
"action_type": "acknowledge_breach",
|
||||
"label_key": "actions.acknowledge_breach",
|
||||
"label_params": {},
|
||||
"variant": "secondary",
|
||||
"disabled": False,
|
||||
"metadata": {"sensor_id": metadata.get("sensor_id")}
|
||||
}
|
||||
]
|
||||
|
||||
def _create_common_actions(self) -> List[dict]:
|
||||
"""Create common actions available for all alerts"""
|
||||
return [
|
||||
{
|
||||
"action_type": "snooze",
|
||||
"label_key": "actions.snooze",
|
||||
"label_params": {"hours": 4},
|
||||
"variant": "ghost",
|
||||
"disabled": False,
|
||||
"metadata": {"snooze_hours": 4}
|
||||
},
|
||||
{
|
||||
"action_type": "dismiss",
|
||||
"label_key": "actions.dismiss",
|
||||
"label_params": {},
|
||||
"variant": "ghost",
|
||||
"disabled": False,
|
||||
"metadata": {}
|
||||
}
|
||||
]
|
||||
173
services/alert_processor/app/enrichment/urgency_analyzer.py
Normal file
173
services/alert_processor/app/enrichment/urgency_analyzer.py
Normal file
@@ -0,0 +1,173 @@
|
||||
"""
|
||||
Urgency analyzer for alerts.
|
||||
|
||||
Assesses time sensitivity, deadlines, and determines if action can wait.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
from datetime import datetime, timedelta, timezone
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class UrgencyAnalyzer:
|
||||
"""Analyze urgency from event metadata"""
|
||||
|
||||
def analyze(self, event_type: str, metadata: Dict[str, Any]) -> dict:
|
||||
"""
|
||||
Analyze urgency for an event.
|
||||
|
||||
Returns dict with:
|
||||
- hours_until_consequence: Time until impact occurs
|
||||
- can_wait_until_tomorrow: Boolean
|
||||
- deadline_utc: ISO datetime if deadline exists
|
||||
- peak_hour_relevant: Boolean
|
||||
- hours_pending: Age of alert
|
||||
"""
|
||||
|
||||
urgency = {
|
||||
"hours_until_consequence": 24, # Default: 24 hours
|
||||
"can_wait_until_tomorrow": True,
|
||||
"deadline_utc": None,
|
||||
"peak_hour_relevant": False,
|
||||
"hours_pending": 0
|
||||
}
|
||||
|
||||
# Calculate based on event type
|
||||
if "critical" in event_type or "urgent" in event_type:
|
||||
urgency["hours_until_consequence"] = 2
|
||||
urgency["can_wait_until_tomorrow"] = False
|
||||
|
||||
elif "production" in event_type:
|
||||
urgency.update(self._analyze_production_urgency(metadata))
|
||||
|
||||
elif "stock" in event_type or "shortage" in event_type:
|
||||
urgency.update(self._analyze_stock_urgency(metadata))
|
||||
|
||||
elif "delivery" in event_type or "overdue" in event_type:
|
||||
urgency.update(self._analyze_delivery_urgency(metadata))
|
||||
|
||||
elif "po_approval" in event_type:
|
||||
urgency.update(self._analyze_po_approval_urgency(metadata))
|
||||
|
||||
# Check for explicit deadlines
|
||||
if "required_delivery_date" in metadata:
|
||||
urgency.update(self._calculate_deadline_urgency(metadata["required_delivery_date"]))
|
||||
|
||||
if "production_date" in metadata:
|
||||
urgency.update(self._calculate_deadline_urgency(metadata["production_date"]))
|
||||
|
||||
if "expected_date" in metadata:
|
||||
urgency.update(self._calculate_deadline_urgency(metadata["expected_date"]))
|
||||
|
||||
return urgency
|
||||
|
||||
def _analyze_production_urgency(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""Analyze urgency for production alerts"""
|
||||
urgency = {}
|
||||
|
||||
delay_minutes = metadata.get("delay_minutes", 0)
|
||||
|
||||
if delay_minutes > 120:
|
||||
urgency["hours_until_consequence"] = 1
|
||||
urgency["can_wait_until_tomorrow"] = False
|
||||
elif delay_minutes > 60:
|
||||
urgency["hours_until_consequence"] = 4
|
||||
urgency["can_wait_until_tomorrow"] = False
|
||||
else:
|
||||
urgency["hours_until_consequence"] = 8
|
||||
|
||||
# Production is peak-hour sensitive
|
||||
urgency["peak_hour_relevant"] = True
|
||||
|
||||
return urgency
|
||||
|
||||
def _analyze_stock_urgency(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""Analyze urgency for stock alerts"""
|
||||
urgency = {}
|
||||
|
||||
# Hours until needed
|
||||
if "hours_until" in metadata:
|
||||
urgency["hours_until_consequence"] = metadata["hours_until"]
|
||||
urgency["can_wait_until_tomorrow"] = urgency["hours_until_consequence"] > 24
|
||||
|
||||
# Days until expiry
|
||||
elif "days_until_expiry" in metadata:
|
||||
days = metadata["days_until_expiry"]
|
||||
if days <= 1:
|
||||
urgency["hours_until_consequence"] = days * 24
|
||||
urgency["can_wait_until_tomorrow"] = False
|
||||
else:
|
||||
urgency["hours_until_consequence"] = days * 24
|
||||
|
||||
return urgency
|
||||
|
||||
def _analyze_delivery_urgency(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""Analyze urgency for delivery alerts"""
|
||||
urgency = {}
|
||||
|
||||
days_overdue = metadata.get("days_overdue", 0)
|
||||
|
||||
if days_overdue > 3:
|
||||
urgency["hours_until_consequence"] = 2
|
||||
urgency["can_wait_until_tomorrow"] = False
|
||||
elif days_overdue > 1:
|
||||
urgency["hours_until_consequence"] = 8
|
||||
urgency["can_wait_until_tomorrow"] = False
|
||||
|
||||
return urgency
|
||||
|
||||
def _analyze_po_approval_urgency(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""
|
||||
Analyze urgency for PO approval alerts.
|
||||
|
||||
Uses stockout time (when you run out of stock) instead of delivery date
|
||||
to determine true urgency.
|
||||
"""
|
||||
urgency = {}
|
||||
|
||||
# Extract min_depletion_hours from reasoning_data.parameters
|
||||
reasoning_data = metadata.get("reasoning_data", {})
|
||||
parameters = reasoning_data.get("parameters", {})
|
||||
min_depletion_hours = parameters.get("min_depletion_hours")
|
||||
|
||||
if min_depletion_hours is not None:
|
||||
urgency["hours_until_consequence"] = max(0, round(min_depletion_hours, 1))
|
||||
urgency["can_wait_until_tomorrow"] = min_depletion_hours > 24
|
||||
|
||||
# Set deadline_utc to when stock runs out
|
||||
now = datetime.now(timezone.utc)
|
||||
stockout_time = now + timedelta(hours=min_depletion_hours)
|
||||
urgency["deadline_utc"] = stockout_time.isoformat()
|
||||
|
||||
logger.info(
|
||||
"po_approval_urgency_calculated",
|
||||
min_depletion_hours=min_depletion_hours,
|
||||
stockout_deadline=urgency["deadline_utc"],
|
||||
can_wait=urgency["can_wait_until_tomorrow"]
|
||||
)
|
||||
|
||||
return urgency
|
||||
|
||||
def _calculate_deadline_urgency(self, deadline_str: str) -> dict:
|
||||
"""Calculate urgency based on deadline"""
|
||||
try:
|
||||
if isinstance(deadline_str, str):
|
||||
deadline = datetime.fromisoformat(deadline_str.replace('Z', '+00:00'))
|
||||
else:
|
||||
deadline = deadline_str
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
time_until = deadline - now
|
||||
|
||||
hours_until = time_until.total_seconds() / 3600
|
||||
|
||||
return {
|
||||
"deadline_utc": deadline.isoformat(),
|
||||
"hours_until_consequence": max(0, round(hours_until, 1)),
|
||||
"can_wait_until_tomorrow": hours_until > 24
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("deadline_parse_failed", deadline=deadline_str, error=str(e))
|
||||
return {}
|
||||
116
services/alert_processor/app/enrichment/user_agency.py
Normal file
116
services/alert_processor/app/enrichment/user_agency.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""
|
||||
User agency analyzer for alerts.
|
||||
|
||||
Determines whether user can fix the issue, what blockers exist,
|
||||
and if external parties are required.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class UserAgencyAnalyzer:
|
||||
"""Analyze user's ability to act on alerts"""
|
||||
|
||||
def analyze(
|
||||
self,
|
||||
event_type: str,
|
||||
metadata: Dict[str, Any],
|
||||
orchestrator_context: dict
|
||||
) -> dict:
|
||||
"""
|
||||
Analyze user agency for an event.
|
||||
|
||||
Returns dict with:
|
||||
- can_user_fix: Boolean - can user resolve this?
|
||||
- requires_external_party: Boolean
|
||||
- external_party_name: Name of required party
|
||||
- external_party_contact: Contact info
|
||||
- blockers: List of blocking factors
|
||||
- suggested_workaround: Optional workaround suggestion
|
||||
"""
|
||||
|
||||
agency = {
|
||||
"can_user_fix": True,
|
||||
"requires_external_party": False,
|
||||
"external_party_name": None,
|
||||
"external_party_contact": None,
|
||||
"blockers": [],
|
||||
"suggested_workaround": None
|
||||
}
|
||||
|
||||
# If orchestrator already addressed it, user agency is low
|
||||
if orchestrator_context and orchestrator_context.get("already_addressed"):
|
||||
agency["can_user_fix"] = False
|
||||
agency["blockers"].append("ai_already_handled")
|
||||
return agency
|
||||
|
||||
# Analyze based on event type
|
||||
if "po_approval" in event_type:
|
||||
agency["can_user_fix"] = True
|
||||
|
||||
elif "delivery" in event_type or "supplier" in event_type:
|
||||
agency.update(self._analyze_supplier_agency(metadata))
|
||||
|
||||
elif "equipment" in event_type:
|
||||
agency.update(self._analyze_equipment_agency(metadata))
|
||||
|
||||
elif "stock" in event_type:
|
||||
agency.update(self._analyze_stock_agency(metadata, orchestrator_context))
|
||||
|
||||
return agency
|
||||
|
||||
def _analyze_supplier_agency(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""Analyze agency for supplier-related alerts"""
|
||||
agency = {
|
||||
"requires_external_party": True,
|
||||
"external_party_name": metadata.get("supplier_name"),
|
||||
"external_party_contact": metadata.get("supplier_contact")
|
||||
}
|
||||
|
||||
# User can contact supplier but can't directly fix
|
||||
if not metadata.get("supplier_contact"):
|
||||
agency["blockers"].append("no_supplier_contact")
|
||||
|
||||
return agency
|
||||
|
||||
def _analyze_equipment_agency(self, metadata: Dict[str, Any]) -> dict:
|
||||
"""Analyze agency for equipment-related alerts"""
|
||||
agency = {}
|
||||
|
||||
equipment_type = metadata.get("equipment_type", "")
|
||||
|
||||
if "oven" in equipment_type.lower() or "mixer" in equipment_type.lower():
|
||||
agency["requires_external_party"] = True
|
||||
agency["external_party_name"] = "Maintenance Team"
|
||||
agency["blockers"].append("requires_technician")
|
||||
|
||||
return agency
|
||||
|
||||
def _analyze_stock_agency(
|
||||
self,
|
||||
metadata: Dict[str, Any],
|
||||
orchestrator_context: dict
|
||||
) -> dict:
|
||||
"""Analyze agency for stock-related alerts"""
|
||||
agency = {}
|
||||
|
||||
# If PO exists, user just needs to approve
|
||||
if metadata.get("po_id"):
|
||||
if metadata.get("po_status") == "pending_approval":
|
||||
agency["can_user_fix"] = True
|
||||
agency["suggested_workaround"] = "Approve pending PO"
|
||||
else:
|
||||
agency["blockers"].append("waiting_for_delivery")
|
||||
agency["requires_external_party"] = True
|
||||
agency["external_party_name"] = metadata.get("supplier_name")
|
||||
|
||||
# If no PO, user needs to create one
|
||||
elif metadata.get("supplier_name"):
|
||||
agency["can_user_fix"] = True
|
||||
agency["requires_external_party"] = True
|
||||
agency["external_party_name"] = metadata.get("supplier_name")
|
||||
|
||||
return agency
|
||||
100
services/alert_processor/app/main.py
Normal file
100
services/alert_processor/app/main.py
Normal file
@@ -0,0 +1,100 @@
|
||||
"""
|
||||
Alert Processor Service v2.0
|
||||
|
||||
Main FastAPI application with RabbitMQ consumer lifecycle management.
|
||||
"""
|
||||
|
||||
import structlog
|
||||
|
||||
from app.core.config import settings
|
||||
from app.consumer.event_consumer import EventConsumer
|
||||
from app.api import alerts, sse
|
||||
from shared.redis_utils import initialize_redis, close_redis
|
||||
from shared.service_base import StandardFastAPIService
|
||||
|
||||
# Initialize logger
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Global consumer instance
|
||||
consumer: EventConsumer = None
|
||||
|
||||
|
||||
class AlertProcessorService(StandardFastAPIService):
|
||||
"""Alert Processor Service with standardized monitoring setup and RabbitMQ consumer"""
|
||||
|
||||
async def on_startup(self, app):
|
||||
"""Custom startup logic for Alert Processor"""
|
||||
global consumer
|
||||
|
||||
# Initialize Redis connection
|
||||
await initialize_redis(
|
||||
settings.REDIS_URL,
|
||||
db=settings.REDIS_DB,
|
||||
max_connections=settings.REDIS_MAX_CONNECTIONS
|
||||
)
|
||||
logger.info("redis_initialized")
|
||||
|
||||
# Start RabbitMQ consumer
|
||||
consumer = EventConsumer()
|
||||
await consumer.start()
|
||||
logger.info("rabbitmq_consumer_started")
|
||||
|
||||
await super().on_startup(app)
|
||||
|
||||
async def on_shutdown(self, app):
|
||||
"""Custom shutdown logic for Alert Processor"""
|
||||
global consumer
|
||||
|
||||
await super().on_shutdown(app)
|
||||
|
||||
# Stop RabbitMQ consumer
|
||||
if consumer:
|
||||
await consumer.stop()
|
||||
logger.info("rabbitmq_consumer_stopped")
|
||||
|
||||
# Close Redis
|
||||
await close_redis()
|
||||
logger.info("redis_closed")
|
||||
|
||||
|
||||
# Create service instance
|
||||
service = AlertProcessorService(
|
||||
service_name="alert-processor",
|
||||
app_name="Alert Processor Service",
|
||||
description="Event processing, enrichment, and alert management system",
|
||||
version=settings.VERSION,
|
||||
log_level=getattr(settings, 'LOG_LEVEL', 'INFO'),
|
||||
cors_origins=["*"], # Configure appropriately for production
|
||||
api_prefix="/api/v1",
|
||||
enable_metrics=True,
|
||||
enable_health_checks=True,
|
||||
enable_tracing=True,
|
||||
enable_cors=True
|
||||
)
|
||||
|
||||
# Create FastAPI app
|
||||
app = service.create_app(debug=settings.DEBUG)
|
||||
|
||||
# Add service-specific routers
|
||||
app.include_router(
|
||||
alerts.router,
|
||||
prefix="/api/v1/tenants/{tenant_id}",
|
||||
tags=["alerts"]
|
||||
)
|
||||
|
||||
app.include_router(
|
||||
sse.router,
|
||||
prefix="/api/v1",
|
||||
tags=["sse"]
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(
|
||||
"app.main:app",
|
||||
host="0.0.0.0",
|
||||
port=8000,
|
||||
reload=settings.DEBUG
|
||||
)
|
||||
0
services/alert_processor/app/models/__init__.py
Normal file
0
services/alert_processor/app/models/__init__.py
Normal file
84
services/alert_processor/app/models/events.py
Normal file
84
services/alert_processor/app/models/events.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""
|
||||
SQLAlchemy models for events table.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, String, Integer, DateTime, Float, Index
|
||||
from sqlalchemy.dialects.postgresql import UUID, JSONB
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from datetime import datetime, timezone
|
||||
import uuid
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class Event(Base):
|
||||
"""Unified event table for alerts, notifications, recommendations"""
|
||||
__tablename__ = "events"
|
||||
|
||||
# Core fields
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
created_at = Column(
|
||||
DateTime(timezone=True),
|
||||
default=lambda: datetime.now(timezone.utc),
|
||||
nullable=False
|
||||
)
|
||||
updated_at = Column(
|
||||
DateTime(timezone=True),
|
||||
default=lambda: datetime.now(timezone.utc),
|
||||
onupdate=lambda: datetime.now(timezone.utc),
|
||||
nullable=False
|
||||
)
|
||||
|
||||
# Classification
|
||||
event_class = Column(String(50), nullable=False)
|
||||
event_domain = Column(String(50), nullable=False, index=True)
|
||||
event_type = Column(String(100), nullable=False, index=True)
|
||||
service = Column(String(50), nullable=False)
|
||||
|
||||
# i18n content (NO hardcoded title/message)
|
||||
i18n_title_key = Column(String(200), nullable=False)
|
||||
i18n_title_params = Column(JSONB, nullable=False, default=dict)
|
||||
i18n_message_key = Column(String(200), nullable=False)
|
||||
i18n_message_params = Column(JSONB, nullable=False, default=dict)
|
||||
|
||||
# Priority
|
||||
priority_score = Column(Integer, nullable=False, default=50, index=True)
|
||||
priority_level = Column(String(20), nullable=False, index=True)
|
||||
type_class = Column(String(50), nullable=False, index=True)
|
||||
|
||||
# Enrichment contexts (JSONB)
|
||||
orchestrator_context = Column(JSONB, nullable=True)
|
||||
business_impact = Column(JSONB, nullable=True)
|
||||
urgency = Column(JSONB, nullable=True)
|
||||
user_agency = Column(JSONB, nullable=True)
|
||||
trend_context = Column(JSONB, nullable=True)
|
||||
|
||||
# Smart actions
|
||||
smart_actions = Column(JSONB, nullable=False, default=list)
|
||||
|
||||
# AI reasoning
|
||||
ai_reasoning_summary_key = Column(String(200), nullable=True)
|
||||
ai_reasoning_summary_params = Column(JSONB, nullable=True)
|
||||
ai_reasoning_details = Column(JSONB, nullable=True)
|
||||
confidence_score = Column(Float, nullable=True)
|
||||
|
||||
# Entity references
|
||||
entity_links = Column(JSONB, nullable=False, default=dict)
|
||||
|
||||
# Status
|
||||
status = Column(String(20), nullable=False, default="active", index=True)
|
||||
resolved_at = Column(DateTime(timezone=True), nullable=True)
|
||||
acknowledged_at = Column(DateTime(timezone=True), nullable=True)
|
||||
|
||||
# Metadata
|
||||
event_metadata = Column(JSONB, nullable=False, default=dict)
|
||||
|
||||
# Indexes for dashboard queries
|
||||
__table_args__ = (
|
||||
Index('idx_events_tenant_status', 'tenant_id', 'status'),
|
||||
Index('idx_events_tenant_priority', 'tenant_id', 'priority_score'),
|
||||
Index('idx_events_tenant_class', 'tenant_id', 'event_class'),
|
||||
Index('idx_events_tenant_created', 'tenant_id', 'created_at'),
|
||||
Index('idx_events_type_class_status', 'type_class', 'status'),
|
||||
)
|
||||
407
services/alert_processor/app/repositories/event_repository.py
Normal file
407
services/alert_processor/app/repositories/event_repository.py
Normal file
@@ -0,0 +1,407 @@
|
||||
"""
|
||||
Event repository for database operations.
|
||||
"""
|
||||
|
||||
from typing import List, Optional, Dict, Any
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timezone
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, func, and_, desc
|
||||
from sqlalchemy.dialects.postgresql import insert
|
||||
import structlog
|
||||
|
||||
from app.models.events import Event
|
||||
from app.schemas.events import EnrichedEvent, EventSummary, EventResponse, I18nContent, SmartAction
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class EventRepository:
|
||||
"""Repository for event database operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession):
|
||||
self.session = session
|
||||
|
||||
async def create_event(self, enriched_event: EnrichedEvent) -> Event:
|
||||
"""
|
||||
Store enriched event in database.
|
||||
|
||||
Args:
|
||||
enriched_event: Enriched event with all context
|
||||
|
||||
Returns:
|
||||
Stored Event model
|
||||
"""
|
||||
|
||||
# Convert enriched event to database model
|
||||
event = Event(
|
||||
id=enriched_event.id,
|
||||
tenant_id=UUID(enriched_event.tenant_id),
|
||||
event_class=enriched_event.event_class,
|
||||
event_domain=enriched_event.event_domain,
|
||||
event_type=enriched_event.event_type,
|
||||
service=enriched_event.service,
|
||||
|
||||
# i18n content
|
||||
i18n_title_key=enriched_event.i18n.title_key,
|
||||
i18n_title_params=enriched_event.i18n.title_params,
|
||||
i18n_message_key=enriched_event.i18n.message_key,
|
||||
i18n_message_params=enriched_event.i18n.message_params,
|
||||
|
||||
# Priority
|
||||
priority_score=enriched_event.priority_score,
|
||||
priority_level=enriched_event.priority_level,
|
||||
type_class=enriched_event.type_class,
|
||||
|
||||
# Enrichment contexts
|
||||
orchestrator_context=enriched_event.orchestrator_context.dict() if enriched_event.orchestrator_context else None,
|
||||
business_impact=enriched_event.business_impact.dict() if enriched_event.business_impact else None,
|
||||
urgency=enriched_event.urgency.dict() if enriched_event.urgency else None,
|
||||
user_agency=enriched_event.user_agency.dict() if enriched_event.user_agency else None,
|
||||
trend_context=enriched_event.trend_context,
|
||||
|
||||
# Smart actions
|
||||
smart_actions=[action.dict() for action in enriched_event.smart_actions],
|
||||
|
||||
# AI reasoning
|
||||
ai_reasoning_summary_key=enriched_event.ai_reasoning_summary_key,
|
||||
ai_reasoning_summary_params=enriched_event.ai_reasoning_summary_params,
|
||||
ai_reasoning_details=enriched_event.ai_reasoning_details,
|
||||
confidence_score=enriched_event.confidence_score,
|
||||
|
||||
# Entity links
|
||||
entity_links=enriched_event.entity_links,
|
||||
|
||||
# Status
|
||||
status=enriched_event.status,
|
||||
|
||||
# Metadata
|
||||
event_metadata=enriched_event.event_metadata
|
||||
)
|
||||
|
||||
self.session.add(event)
|
||||
await self.session.commit()
|
||||
await self.session.refresh(event)
|
||||
|
||||
logger.info("event_stored", event_id=event.id, event_type=event.event_type)
|
||||
|
||||
return event
|
||||
|
||||
async def get_events(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
event_class: Optional[str] = None,
|
||||
priority_level: Optional[List[str]] = None,
|
||||
status: Optional[List[str]] = None,
|
||||
event_domain: Optional[str] = None,
|
||||
limit: int = 50,
|
||||
offset: int = 0
|
||||
) -> List[Event]:
|
||||
"""
|
||||
Get filtered list of events.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
event_class: Filter by event class (alert, notification, recommendation)
|
||||
priority_level: Filter by priority levels
|
||||
status: Filter by status values
|
||||
event_domain: Filter by domain
|
||||
limit: Max results
|
||||
offset: Pagination offset
|
||||
|
||||
Returns:
|
||||
List of Event models
|
||||
"""
|
||||
|
||||
query = select(Event).where(Event.tenant_id == tenant_id)
|
||||
|
||||
# Apply filters
|
||||
if event_class:
|
||||
query = query.where(Event.event_class == event_class)
|
||||
|
||||
if priority_level:
|
||||
query = query.where(Event.priority_level.in_(priority_level))
|
||||
|
||||
if status:
|
||||
query = query.where(Event.status.in_(status))
|
||||
|
||||
if event_domain:
|
||||
query = query.where(Event.event_domain == event_domain)
|
||||
|
||||
# Order by priority and creation time
|
||||
query = query.order_by(
|
||||
desc(Event.priority_score),
|
||||
desc(Event.created_at)
|
||||
)
|
||||
|
||||
# Pagination
|
||||
query = query.limit(limit).offset(offset)
|
||||
|
||||
result = await self.session.execute(query)
|
||||
events = result.scalars().all()
|
||||
|
||||
return list(events)
|
||||
|
||||
async def get_event_by_id(self, event_id: UUID) -> Optional[Event]:
|
||||
"""Get single event by ID"""
|
||||
query = select(Event).where(Event.id == event_id)
|
||||
result = await self.session.execute(query)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
async def check_duplicate_alert(self, tenant_id: UUID, event_type: str, entity_links: Dict, event_metadata: Dict, time_window_hours: int = 24) -> Optional[Event]:
|
||||
"""
|
||||
Check if a similar alert already exists within the time window.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
event_type: Type of event (e.g., 'production_delay', 'critical_stock_shortage')
|
||||
entity_links: Entity references (e.g., batch_id, po_id, ingredient_id)
|
||||
event_metadata: Event metadata for comparison
|
||||
time_window_hours: Time window in hours to check for duplicates
|
||||
|
||||
Returns:
|
||||
Existing event if duplicate found, None otherwise
|
||||
"""
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
# Calculate time threshold
|
||||
time_threshold = datetime.now(timezone.utc) - timedelta(hours=time_window_hours)
|
||||
|
||||
# Build query to find potential duplicates
|
||||
query = select(Event).where(
|
||||
and_(
|
||||
Event.tenant_id == tenant_id,
|
||||
Event.event_type == event_type,
|
||||
Event.status == "active", # Only check active alerts
|
||||
Event.created_at >= time_threshold
|
||||
)
|
||||
)
|
||||
|
||||
result = await self.session.execute(query)
|
||||
potential_duplicates = result.scalars().all()
|
||||
|
||||
# Compare each potential duplicate for semantic similarity
|
||||
for event in potential_duplicates:
|
||||
# Check if entity links match (same batch, PO, ingredient, etc.)
|
||||
if self._entities_match(event.entity_links, entity_links):
|
||||
# For production delays, check if it's the same batch with similar delay
|
||||
if event_type == "production_delay":
|
||||
if self._production_delay_match(event.event_metadata, event_metadata):
|
||||
return event
|
||||
|
||||
# For critical stock shortages, check if it's the same ingredient
|
||||
elif event_type == "critical_stock_shortage":
|
||||
if self._stock_shortage_match(event.event_metadata, event_metadata):
|
||||
return event
|
||||
|
||||
# For delivery overdue alerts, check if it's the same PO
|
||||
elif event_type == "delivery_overdue":
|
||||
if self._delivery_overdue_match(event.event_metadata, event_metadata):
|
||||
return event
|
||||
|
||||
# For general matching based on metadata
|
||||
else:
|
||||
if self._metadata_match(event.event_metadata, event_metadata):
|
||||
return event
|
||||
|
||||
return None
|
||||
|
||||
def _entities_match(self, existing_links: Dict, new_links: Dict) -> bool:
|
||||
"""Check if entity links match between two events."""
|
||||
if not existing_links or not new_links:
|
||||
return False
|
||||
|
||||
# Check for common entity types
|
||||
common_entities = ['production_batch', 'purchase_order', 'ingredient', 'supplier', 'equipment']
|
||||
|
||||
for entity in common_entities:
|
||||
if entity in existing_links and entity in new_links:
|
||||
if existing_links[entity] == new_links[entity]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _production_delay_match(self, existing_meta: Dict, new_meta: Dict) -> bool:
|
||||
"""Check if production delay alerts match."""
|
||||
# Same batch_id indicates same production issue
|
||||
return (existing_meta.get('batch_id') == new_meta.get('batch_id') and
|
||||
existing_meta.get('product_name') == new_meta.get('product_name'))
|
||||
|
||||
def _stock_shortage_match(self, existing_meta: Dict, new_meta: Dict) -> bool:
|
||||
"""Check if stock shortage alerts match."""
|
||||
# Same ingredient_id indicates same shortage issue
|
||||
return existing_meta.get('ingredient_id') == new_meta.get('ingredient_id')
|
||||
|
||||
def _delivery_overdue_match(self, existing_meta: Dict, new_meta: Dict) -> bool:
|
||||
"""Check if delivery overdue alerts match."""
|
||||
# Same PO indicates same delivery issue
|
||||
return existing_meta.get('po_id') == new_meta.get('po_id')
|
||||
|
||||
def _metadata_match(self, existing_meta: Dict, new_meta: Dict) -> bool:
|
||||
"""Generic metadata matching for other alert types."""
|
||||
# Check for common identifying fields
|
||||
common_fields = ['batch_id', 'po_id', 'ingredient_id', 'supplier_id', 'equipment_id']
|
||||
|
||||
for field in common_fields:
|
||||
if field in existing_meta and field in new_meta:
|
||||
if existing_meta[field] == new_meta[field]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def get_summary(self, tenant_id: UUID) -> EventSummary:
|
||||
"""
|
||||
Get summary statistics for dashboard.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
|
||||
Returns:
|
||||
EventSummary with counts and statistics
|
||||
"""
|
||||
|
||||
# Count by status
|
||||
status_query = select(
|
||||
Event.status,
|
||||
func.count(Event.id).label('count')
|
||||
).where(
|
||||
Event.tenant_id == tenant_id
|
||||
).group_by(Event.status)
|
||||
|
||||
status_result = await self.session.execute(status_query)
|
||||
status_counts = {row.status: row.count for row in status_result}
|
||||
|
||||
# Count by priority
|
||||
priority_query = select(
|
||||
Event.priority_level,
|
||||
func.count(Event.id).label('count')
|
||||
).where(
|
||||
and_(
|
||||
Event.tenant_id == tenant_id,
|
||||
Event.status == "active"
|
||||
)
|
||||
).group_by(Event.priority_level)
|
||||
|
||||
priority_result = await self.session.execute(priority_query)
|
||||
priority_counts = {row.priority_level: row.count for row in priority_result}
|
||||
|
||||
# Count by domain
|
||||
domain_query = select(
|
||||
Event.event_domain,
|
||||
func.count(Event.id).label('count')
|
||||
).where(
|
||||
and_(
|
||||
Event.tenant_id == tenant_id,
|
||||
Event.status == "active"
|
||||
)
|
||||
).group_by(Event.event_domain)
|
||||
|
||||
domain_result = await self.session.execute(domain_query)
|
||||
domain_counts = {row.event_domain: row.count for row in domain_result}
|
||||
|
||||
# Count by type class
|
||||
type_class_query = select(
|
||||
Event.type_class,
|
||||
func.count(Event.id).label('count')
|
||||
).where(
|
||||
and_(
|
||||
Event.tenant_id == tenant_id,
|
||||
Event.status == "active"
|
||||
)
|
||||
).group_by(Event.type_class)
|
||||
|
||||
type_class_result = await self.session.execute(type_class_query)
|
||||
type_class_counts = {row.type_class: row.count for row in type_class_result}
|
||||
|
||||
return EventSummary(
|
||||
total_active=status_counts.get("active", 0),
|
||||
total_acknowledged=status_counts.get("acknowledged", 0),
|
||||
total_resolved=status_counts.get("resolved", 0),
|
||||
by_priority=priority_counts,
|
||||
by_domain=domain_counts,
|
||||
by_type_class=type_class_counts,
|
||||
critical_alerts=priority_counts.get("critical", 0),
|
||||
important_alerts=priority_counts.get("important", 0)
|
||||
)
|
||||
|
||||
async def acknowledge_event(self, event_id: UUID) -> Event:
|
||||
"""Mark event as acknowledged"""
|
||||
event = await self.get_event_by_id(event_id)
|
||||
|
||||
if not event:
|
||||
raise ValueError(f"Event {event_id} not found")
|
||||
|
||||
event.status = "acknowledged"
|
||||
event.acknowledged_at = datetime.now(timezone.utc)
|
||||
|
||||
await self.session.commit()
|
||||
await self.session.refresh(event)
|
||||
|
||||
logger.info("event_acknowledged", event_id=event_id)
|
||||
|
||||
return event
|
||||
|
||||
async def resolve_event(self, event_id: UUID) -> Event:
|
||||
"""Mark event as resolved"""
|
||||
event = await self.get_event_by_id(event_id)
|
||||
|
||||
if not event:
|
||||
raise ValueError(f"Event {event_id} not found")
|
||||
|
||||
event.status = "resolved"
|
||||
event.resolved_at = datetime.now(timezone.utc)
|
||||
|
||||
await self.session.commit()
|
||||
await self.session.refresh(event)
|
||||
|
||||
logger.info("event_resolved", event_id=event_id)
|
||||
|
||||
return event
|
||||
|
||||
async def dismiss_event(self, event_id: UUID) -> Event:
|
||||
"""Mark event as dismissed"""
|
||||
event = await self.get_event_by_id(event_id)
|
||||
|
||||
if not event:
|
||||
raise ValueError(f"Event {event_id} not found")
|
||||
|
||||
event.status = "dismissed"
|
||||
|
||||
await self.session.commit()
|
||||
await self.session.refresh(event)
|
||||
|
||||
logger.info("event_dismissed", event_id=event_id)
|
||||
|
||||
return event
|
||||
|
||||
def _event_to_response(self, event: Event) -> EventResponse:
|
||||
"""Convert Event model to EventResponse"""
|
||||
return EventResponse(
|
||||
id=event.id,
|
||||
tenant_id=event.tenant_id,
|
||||
created_at=event.created_at,
|
||||
event_class=event.event_class,
|
||||
event_domain=event.event_domain,
|
||||
event_type=event.event_type,
|
||||
i18n=I18nContent(
|
||||
title_key=event.i18n_title_key,
|
||||
title_params=event.i18n_title_params,
|
||||
message_key=event.i18n_message_key,
|
||||
message_params=event.i18n_message_params
|
||||
),
|
||||
priority_score=event.priority_score,
|
||||
priority_level=event.priority_level,
|
||||
type_class=event.type_class,
|
||||
smart_actions=[SmartAction(**action) for action in event.smart_actions],
|
||||
status=event.status,
|
||||
orchestrator_context=event.orchestrator_context,
|
||||
business_impact=event.business_impact,
|
||||
urgency=event.urgency,
|
||||
user_agency=event.user_agency,
|
||||
ai_reasoning_summary_key=event.ai_reasoning_summary_key,
|
||||
ai_reasoning_summary_params=event.ai_reasoning_summary_params,
|
||||
ai_reasoning_details=event.ai_reasoning_details,
|
||||
confidence_score=event.confidence_score,
|
||||
entity_links=event.entity_links,
|
||||
event_metadata=event.event_metadata
|
||||
)
|
||||
0
services/alert_processor/app/schemas/__init__.py
Normal file
0
services/alert_processor/app/schemas/__init__.py
Normal file
180
services/alert_processor/app/schemas/events.py
Normal file
180
services/alert_processor/app/schemas/events.py
Normal file
@@ -0,0 +1,180 @@
|
||||
"""
|
||||
Pydantic schemas for enriched events.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Dict, Any, List, Optional, Literal
|
||||
from datetime import datetime
|
||||
from uuid import UUID
|
||||
|
||||
|
||||
class I18nContent(BaseModel):
|
||||
"""i18n content structure"""
|
||||
title_key: str
|
||||
title_params: Dict[str, Any] = {}
|
||||
message_key: str
|
||||
message_params: Dict[str, Any] = {}
|
||||
|
||||
|
||||
class SmartAction(BaseModel):
|
||||
"""Smart action button"""
|
||||
action_type: str
|
||||
label_key: str
|
||||
label_params: Dict[str, Any] = {}
|
||||
variant: Literal["primary", "secondary", "danger", "ghost"]
|
||||
disabled: bool = False
|
||||
disabled_reason_key: Optional[str] = None
|
||||
consequence_key: Optional[str] = None
|
||||
url: Optional[str] = None
|
||||
metadata: Dict[str, Any] = {}
|
||||
|
||||
|
||||
class BusinessImpact(BaseModel):
|
||||
"""Business impact context"""
|
||||
financial_impact_eur: float = 0
|
||||
affected_orders: int = 0
|
||||
affected_customers: List[str] = []
|
||||
production_delay_hours: float = 0
|
||||
estimated_revenue_loss_eur: float = 0
|
||||
customer_impact: Literal["low", "medium", "high"] = "low"
|
||||
waste_risk_kg: float = 0
|
||||
|
||||
|
||||
class Urgency(BaseModel):
|
||||
"""Urgency context"""
|
||||
hours_until_consequence: float = 24
|
||||
can_wait_until_tomorrow: bool = True
|
||||
deadline_utc: Optional[str] = None
|
||||
peak_hour_relevant: bool = False
|
||||
hours_pending: float = 0
|
||||
|
||||
|
||||
class UserAgency(BaseModel):
|
||||
"""User agency context"""
|
||||
can_user_fix: bool = True
|
||||
requires_external_party: bool = False
|
||||
external_party_name: Optional[str] = None
|
||||
external_party_contact: Optional[str] = None
|
||||
blockers: List[str] = []
|
||||
suggested_workaround: Optional[str] = None
|
||||
|
||||
|
||||
class OrchestratorContext(BaseModel):
|
||||
"""AI orchestrator context"""
|
||||
already_addressed: bool = False
|
||||
action_id: Optional[str] = None
|
||||
action_type: Optional[str] = None
|
||||
action_summary: Optional[str] = None
|
||||
reasoning: Optional[str] = None
|
||||
confidence: float = 0.8
|
||||
|
||||
|
||||
class EnrichedEvent(BaseModel):
|
||||
"""Complete enriched event with all context"""
|
||||
|
||||
# Core fields
|
||||
id: str
|
||||
tenant_id: str
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
|
||||
# Classification
|
||||
event_class: Literal["alert", "notification", "recommendation"]
|
||||
event_domain: str
|
||||
event_type: str
|
||||
service: str
|
||||
|
||||
# i18n content
|
||||
i18n: I18nContent
|
||||
|
||||
# Priority
|
||||
priority_score: int = Field(ge=0, le=100)
|
||||
priority_level: Literal["critical", "important", "standard", "info"]
|
||||
type_class: str
|
||||
|
||||
# Enrichment contexts
|
||||
orchestrator_context: Optional[OrchestratorContext] = None
|
||||
business_impact: Optional[BusinessImpact] = None
|
||||
urgency: Optional[Urgency] = None
|
||||
user_agency: Optional[UserAgency] = None
|
||||
trend_context: Optional[Dict[str, Any]] = None
|
||||
|
||||
# Smart actions
|
||||
smart_actions: List[SmartAction] = []
|
||||
|
||||
# AI reasoning
|
||||
ai_reasoning_summary_key: Optional[str] = None
|
||||
ai_reasoning_summary_params: Optional[Dict[str, Any]] = None
|
||||
ai_reasoning_details: Optional[Dict[str, Any]] = None
|
||||
confidence_score: Optional[float] = None
|
||||
|
||||
# Entity references
|
||||
entity_links: Dict[str, str] = {}
|
||||
|
||||
# Status
|
||||
status: Literal["active", "acknowledged", "resolved", "dismissed"] = "active"
|
||||
resolved_at: Optional[datetime] = None
|
||||
acknowledged_at: Optional[datetime] = None
|
||||
|
||||
# Original metadata
|
||||
event_metadata: Dict[str, Any] = {}
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class EventResponse(BaseModel):
|
||||
"""Event response for API"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
created_at: datetime
|
||||
event_class: str
|
||||
event_domain: str
|
||||
event_type: str
|
||||
i18n: I18nContent
|
||||
priority_score: int
|
||||
priority_level: str
|
||||
type_class: str
|
||||
smart_actions: List[SmartAction]
|
||||
status: str
|
||||
|
||||
# Optional enrichment contexts (only if present)
|
||||
orchestrator_context: Optional[Dict[str, Any]] = None
|
||||
business_impact: Optional[Dict[str, Any]] = None
|
||||
urgency: Optional[Dict[str, Any]] = None
|
||||
user_agency: Optional[Dict[str, Any]] = None
|
||||
|
||||
# AI reasoning
|
||||
ai_reasoning_summary_key: Optional[str] = None
|
||||
ai_reasoning_summary_params: Optional[Dict[str, Any]] = None
|
||||
ai_reasoning_details: Optional[Dict[str, Any]] = None
|
||||
confidence_score: Optional[float] = None
|
||||
|
||||
entity_links: Dict[str, str] = {}
|
||||
event_metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class EventSummary(BaseModel):
|
||||
"""Summary statistics for dashboard"""
|
||||
total_active: int
|
||||
total_acknowledged: int
|
||||
total_resolved: int
|
||||
by_priority: Dict[str, int]
|
||||
by_domain: Dict[str, int]
|
||||
by_type_class: Dict[str, int]
|
||||
critical_alerts: int
|
||||
important_alerts: int
|
||||
|
||||
|
||||
class EventFilter(BaseModel):
|
||||
"""Filter criteria for event queries"""
|
||||
tenant_id: UUID
|
||||
event_class: Optional[str] = None
|
||||
priority_level: Optional[List[str]] = None
|
||||
status: Optional[List[str]] = None
|
||||
event_domain: Optional[str] = None
|
||||
limit: int = Field(default=50, le=100)
|
||||
offset: int = 0
|
||||
0
services/alert_processor/app/services/__init__.py
Normal file
0
services/alert_processor/app/services/__init__.py
Normal file
246
services/alert_processor/app/services/enrichment_orchestrator.py
Normal file
246
services/alert_processor/app/services/enrichment_orchestrator.py
Normal file
@@ -0,0 +1,246 @@
|
||||
"""
|
||||
Enrichment orchestrator service.
|
||||
|
||||
Coordinates the complete enrichment pipeline for events.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
import structlog
|
||||
from uuid import uuid4
|
||||
|
||||
from shared.messaging import MinimalEvent
|
||||
from app.schemas.events import EnrichedEvent, I18nContent, BusinessImpact, Urgency, UserAgency, OrchestratorContext
|
||||
from app.enrichment.message_generator import MessageGenerator
|
||||
from app.enrichment.priority_scorer import PriorityScorer
|
||||
from app.enrichment.orchestrator_client import OrchestratorClient
|
||||
from app.enrichment.smart_actions import SmartActionGenerator
|
||||
from app.enrichment.business_impact import BusinessImpactAnalyzer
|
||||
from app.enrichment.urgency_analyzer import UrgencyAnalyzer
|
||||
from app.enrichment.user_agency import UserAgencyAnalyzer
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class EnrichmentOrchestrator:
|
||||
"""Coordinates the enrichment pipeline for events"""
|
||||
|
||||
def __init__(self):
|
||||
self.message_gen = MessageGenerator()
|
||||
self.priority_scorer = PriorityScorer()
|
||||
self.orchestrator_client = OrchestratorClient()
|
||||
self.action_gen = SmartActionGenerator()
|
||||
self.impact_analyzer = BusinessImpactAnalyzer()
|
||||
self.urgency_analyzer = UrgencyAnalyzer()
|
||||
self.agency_analyzer = UserAgencyAnalyzer()
|
||||
|
||||
async def enrich_event(self, event: MinimalEvent) -> EnrichedEvent:
|
||||
"""
|
||||
Run complete enrichment pipeline.
|
||||
|
||||
Steps:
|
||||
1. Generate i18n message keys and parameters
|
||||
2. Query orchestrator for AI context
|
||||
3. Analyze business impact
|
||||
4. Assess urgency
|
||||
5. Determine user agency
|
||||
6. Calculate priority score (0-100)
|
||||
7. Determine priority level
|
||||
8. Generate smart actions
|
||||
9. Determine type class
|
||||
10. Build enriched event
|
||||
|
||||
Args:
|
||||
event: Minimal event from service
|
||||
|
||||
Returns:
|
||||
Enriched event with all context
|
||||
"""
|
||||
|
||||
logger.info("enrichment_started", event_type=event.event_type, tenant_id=event.tenant_id)
|
||||
|
||||
# 1. Generate i18n message keys and parameters
|
||||
i18n_dict = self.message_gen.generate_message(event.event_type, event.metadata, event.event_class)
|
||||
i18n = I18nContent(**i18n_dict)
|
||||
|
||||
# 2. Query orchestrator for AI context (parallel with other enrichments)
|
||||
orchestrator_context_dict = await self.orchestrator_client.get_context(
|
||||
tenant_id=event.tenant_id,
|
||||
event_type=event.event_type,
|
||||
metadata=event.metadata
|
||||
)
|
||||
|
||||
# Fallback: If orchestrator service didn't return context with already_addressed,
|
||||
# check if the event metadata contains orchestrator_context (e.g., from demo seeder)
|
||||
if not orchestrator_context_dict.get("already_addressed"):
|
||||
metadata_context = event.metadata.get("orchestrator_context")
|
||||
if metadata_context and isinstance(metadata_context, dict):
|
||||
# Merge metadata context into orchestrator context
|
||||
orchestrator_context_dict.update(metadata_context)
|
||||
logger.debug(
|
||||
"using_metadata_orchestrator_context",
|
||||
event_type=event.event_type,
|
||||
already_addressed=metadata_context.get("already_addressed")
|
||||
)
|
||||
|
||||
# Convert to OrchestratorContext if data exists
|
||||
orchestrator_context = None
|
||||
if orchestrator_context_dict:
|
||||
orchestrator_context = OrchestratorContext(**orchestrator_context_dict)
|
||||
|
||||
# 3. Analyze business impact
|
||||
business_impact_dict = self.impact_analyzer.analyze(
|
||||
event_type=event.event_type,
|
||||
metadata=event.metadata
|
||||
)
|
||||
business_impact = BusinessImpact(**business_impact_dict)
|
||||
|
||||
# 4. Assess urgency
|
||||
urgency_dict = self.urgency_analyzer.analyze(
|
||||
event_type=event.event_type,
|
||||
metadata=event.metadata
|
||||
)
|
||||
urgency = Urgency(**urgency_dict)
|
||||
|
||||
# 5. Determine user agency
|
||||
user_agency_dict = self.agency_analyzer.analyze(
|
||||
event_type=event.event_type,
|
||||
metadata=event.metadata,
|
||||
orchestrator_context=orchestrator_context_dict
|
||||
)
|
||||
user_agency = UserAgency(**user_agency_dict)
|
||||
|
||||
# 6. Calculate priority score (0-100)
|
||||
priority_score = self.priority_scorer.calculate_priority(
|
||||
business_impact=business_impact_dict,
|
||||
urgency=urgency_dict,
|
||||
user_agency=user_agency_dict,
|
||||
orchestrator_context=orchestrator_context_dict
|
||||
)
|
||||
|
||||
# 7. Determine priority level
|
||||
priority_level = self._get_priority_level(priority_score)
|
||||
|
||||
# 8. Generate smart actions
|
||||
smart_actions = self.action_gen.generate_actions(
|
||||
event_type=event.event_type,
|
||||
metadata=event.metadata,
|
||||
orchestrator_context=orchestrator_context_dict
|
||||
)
|
||||
|
||||
# 9. Determine type class
|
||||
type_class = self._determine_type_class(orchestrator_context_dict, event.metadata)
|
||||
|
||||
# 10. Extract AI reasoning from metadata (if present)
|
||||
reasoning_data = event.metadata.get('reasoning_data')
|
||||
ai_reasoning_details = None
|
||||
confidence_score = None
|
||||
|
||||
if reasoning_data:
|
||||
# Store the complete reasoning data structure
|
||||
ai_reasoning_details = reasoning_data
|
||||
|
||||
# Extract confidence if available
|
||||
if isinstance(reasoning_data, dict):
|
||||
metadata_section = reasoning_data.get('metadata', {})
|
||||
if isinstance(metadata_section, dict) and 'confidence' in metadata_section:
|
||||
confidence_score = metadata_section.get('confidence')
|
||||
|
||||
# 11. Build enriched event
|
||||
enriched = EnrichedEvent(
|
||||
id=str(uuid4()),
|
||||
tenant_id=event.tenant_id,
|
||||
event_class=event.event_class,
|
||||
event_domain=event.event_domain,
|
||||
event_type=event.event_type,
|
||||
service=event.service,
|
||||
i18n=i18n,
|
||||
priority_score=priority_score,
|
||||
priority_level=priority_level,
|
||||
type_class=type_class,
|
||||
orchestrator_context=orchestrator_context,
|
||||
business_impact=business_impact,
|
||||
urgency=urgency,
|
||||
user_agency=user_agency,
|
||||
smart_actions=smart_actions,
|
||||
ai_reasoning_details=ai_reasoning_details,
|
||||
confidence_score=confidence_score,
|
||||
entity_links=self._extract_entity_links(event.metadata),
|
||||
status="active",
|
||||
event_metadata=event.metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"enrichment_completed",
|
||||
event_type=event.event_type,
|
||||
priority_score=priority_score,
|
||||
priority_level=priority_level,
|
||||
type_class=type_class
|
||||
)
|
||||
|
||||
return enriched
|
||||
|
||||
def _get_priority_level(self, score: int) -> str:
|
||||
"""
|
||||
Convert numeric score to priority level.
|
||||
|
||||
- 90-100: critical
|
||||
- 70-89: important
|
||||
- 50-69: standard
|
||||
- 0-49: info
|
||||
"""
|
||||
if score >= 90:
|
||||
return "critical"
|
||||
elif score >= 70:
|
||||
return "important"
|
||||
elif score >= 50:
|
||||
return "standard"
|
||||
else:
|
||||
return "info"
|
||||
|
||||
def _determine_type_class(self, orchestrator_context: dict, metadata: dict = None) -> str:
|
||||
"""
|
||||
Determine type class based on orchestrator context or metadata override.
|
||||
|
||||
Priority order:
|
||||
1. Explicit type_class in metadata (e.g., from demo seeder)
|
||||
2. orchestrator_context.already_addressed = True -> "prevented_issue"
|
||||
3. Default: "action_needed"
|
||||
|
||||
- prevented_issue: AI already handled it
|
||||
- action_needed: User action required
|
||||
"""
|
||||
# Check for explicit type_class in metadata (allows demo seeder override)
|
||||
if metadata:
|
||||
explicit_type_class = metadata.get("type_class")
|
||||
if explicit_type_class in ("prevented_issue", "action_needed"):
|
||||
return explicit_type_class
|
||||
|
||||
# Determine from orchestrator context
|
||||
if orchestrator_context and orchestrator_context.get("already_addressed"):
|
||||
return "prevented_issue"
|
||||
return "action_needed"
|
||||
|
||||
def _extract_entity_links(self, metadata: dict) -> Dict[str, str]:
|
||||
"""
|
||||
Extract entity references from metadata.
|
||||
|
||||
Maps metadata keys to entity types for frontend deep linking.
|
||||
"""
|
||||
links = {}
|
||||
|
||||
# Map metadata keys to entity types
|
||||
entity_mappings = {
|
||||
"po_id": "purchase_order",
|
||||
"batch_id": "production_batch",
|
||||
"ingredient_id": "ingredient",
|
||||
"order_id": "order",
|
||||
"supplier_id": "supplier",
|
||||
"equipment_id": "equipment",
|
||||
"sensor_id": "sensor"
|
||||
}
|
||||
|
||||
for key, entity_type in entity_mappings.items():
|
||||
if key in metadata:
|
||||
links[entity_type] = str(metadata[key])
|
||||
|
||||
return links
|
||||
129
services/alert_processor/app/services/sse_service.py
Normal file
129
services/alert_processor/app/services/sse_service.py
Normal file
@@ -0,0 +1,129 @@
|
||||
"""
|
||||
Server-Sent Events (SSE) service using Redis pub/sub.
|
||||
"""
|
||||
|
||||
from typing import AsyncGenerator
|
||||
import json
|
||||
import structlog
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from app.core.config import settings
|
||||
from app.models.events import Event
|
||||
from shared.redis_utils import get_redis_client
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class SSEService:
|
||||
"""
|
||||
Manage real-time event streaming via Redis pub/sub.
|
||||
|
||||
Pattern: alerts:{tenant_id}
|
||||
"""
|
||||
|
||||
def __init__(self, redis: Redis = None):
|
||||
self._redis = redis # Use private attribute to allow lazy loading
|
||||
self.prefix = settings.REDIS_SSE_PREFIX
|
||||
|
||||
@property
|
||||
async def redis(self) -> Redis:
|
||||
"""
|
||||
Lazy load Redis client if not provided through dependency injection.
|
||||
Uses the shared Redis utilities for consistency.
|
||||
"""
|
||||
if self._redis is None:
|
||||
self._redis = await get_redis_client()
|
||||
return self._redis
|
||||
|
||||
async def publish_event(self, event: Event) -> bool:
|
||||
"""
|
||||
Publish event to Redis for SSE streaming.
|
||||
|
||||
Args:
|
||||
event: Event to publish
|
||||
|
||||
Returns:
|
||||
True if published successfully
|
||||
"""
|
||||
try:
|
||||
redis_client = await self.redis
|
||||
|
||||
# Build channel name
|
||||
channel = f"{self.prefix}:{event.tenant_id}"
|
||||
|
||||
# Build message payload
|
||||
payload = {
|
||||
"id": str(event.id),
|
||||
"tenant_id": str(event.tenant_id),
|
||||
"event_class": event.event_class,
|
||||
"event_domain": event.event_domain,
|
||||
"event_type": event.event_type,
|
||||
"priority_score": event.priority_score,
|
||||
"priority_level": event.priority_level,
|
||||
"type_class": event.type_class,
|
||||
"status": event.status,
|
||||
"created_at": event.created_at.isoformat(),
|
||||
"i18n": {
|
||||
"title_key": event.i18n_title_key,
|
||||
"title_params": event.i18n_title_params,
|
||||
"message_key": event.i18n_message_key,
|
||||
"message_params": event.i18n_message_params
|
||||
},
|
||||
"smart_actions": event.smart_actions,
|
||||
"entity_links": event.entity_links
|
||||
}
|
||||
|
||||
# Publish to Redis
|
||||
await redis_client.publish(channel, json.dumps(payload))
|
||||
|
||||
logger.debug(
|
||||
"sse_event_published",
|
||||
channel=channel,
|
||||
event_type=event.event_type,
|
||||
event_id=str(event.id)
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"sse_publish_failed",
|
||||
error=str(e),
|
||||
event_id=str(event.id)
|
||||
)
|
||||
return False
|
||||
|
||||
async def subscribe_to_tenant(
|
||||
self,
|
||||
tenant_id: str
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""
|
||||
Subscribe to tenant's alert stream.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
|
||||
Yields:
|
||||
JSON-encoded event messages
|
||||
"""
|
||||
redis_client = await self.redis
|
||||
channel = f"{self.prefix}:{tenant_id}"
|
||||
|
||||
logger.info("sse_subscription_started", channel=channel)
|
||||
|
||||
# Subscribe to Redis channel
|
||||
pubsub = redis_client.pubsub()
|
||||
await pubsub.subscribe(channel)
|
||||
|
||||
try:
|
||||
async for message in pubsub.listen():
|
||||
if message["type"] == "message":
|
||||
yield message["data"]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("sse_subscription_error", error=str(e), channel=channel)
|
||||
raise
|
||||
finally:
|
||||
await pubsub.unsubscribe(channel)
|
||||
await pubsub.close()
|
||||
logger.info("sse_subscription_closed", channel=channel)
|
||||
0
services/alert_processor/app/utils/__init__.py
Normal file
0
services/alert_processor/app/utils/__init__.py
Normal file
556
services/alert_processor/app/utils/message_templates.py
Normal file
556
services/alert_processor/app/utils/message_templates.py
Normal file
@@ -0,0 +1,556 @@
|
||||
"""
|
||||
Alert type definitions with i18n key mappings.
|
||||
|
||||
Each alert type maps to:
|
||||
- title_key: i18n key for title (e.g., "alerts.critical_stock_shortage.title")
|
||||
- title_params: parameter mappings from metadata to i18n params
|
||||
- message_variants: different message keys based on context
|
||||
- message_params: parameter mappings for message
|
||||
|
||||
When adding new alert types:
|
||||
1. Add entry to ALERT_TEMPLATES
|
||||
2. Ensure corresponding translations exist in frontend/src/locales/*/alerts.json
|
||||
3. Document required metadata fields
|
||||
"""
|
||||
|
||||
# Alert type templates
|
||||
ALERT_TEMPLATES = {
|
||||
# ==================== INVENTORY ALERTS ====================
|
||||
|
||||
"critical_stock_shortage": {
|
||||
"title_key": "alerts.critical_stock_shortage.title",
|
||||
"title_params": {
|
||||
"ingredient_name": "ingredient_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"with_po_pending": "alerts.critical_stock_shortage.message_with_po_pending",
|
||||
"with_po_created": "alerts.critical_stock_shortage.message_with_po_created",
|
||||
"with_hours": "alerts.critical_stock_shortage.message_with_hours",
|
||||
"with_date": "alerts.critical_stock_shortage.message_with_date",
|
||||
"generic": "alerts.critical_stock_shortage.message_generic"
|
||||
},
|
||||
"message_params": {
|
||||
"ingredient_name": "ingredient_name",
|
||||
"current_stock_kg": "current_stock",
|
||||
"required_stock_kg": "required_stock",
|
||||
"hours_until": "hours_until",
|
||||
"production_day_name": "production_date",
|
||||
"po_id": "po_id",
|
||||
"po_amount": "po_amount",
|
||||
"delivery_day_name": "delivery_date"
|
||||
}
|
||||
},
|
||||
|
||||
"low_stock_warning": {
|
||||
"title_key": "alerts.low_stock.title",
|
||||
"title_params": {
|
||||
"ingredient_name": "ingredient_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"with_po": "alerts.low_stock.message_with_po",
|
||||
"generic": "alerts.low_stock.message_generic"
|
||||
},
|
||||
"message_params": {
|
||||
"ingredient_name": "ingredient_name",
|
||||
"current_stock_kg": "current_stock",
|
||||
"minimum_stock_kg": "minimum_stock"
|
||||
}
|
||||
},
|
||||
|
||||
"overstock_warning": {
|
||||
"title_key": "alerts.overstock_warning.title",
|
||||
"title_params": {
|
||||
"ingredient_name": "ingredient_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.overstock_warning.message"
|
||||
},
|
||||
"message_params": {
|
||||
"ingredient_name": "ingredient_name",
|
||||
"current_stock_kg": "current_stock",
|
||||
"maximum_stock_kg": "maximum_stock",
|
||||
"excess_amount_kg": "excess_amount"
|
||||
}
|
||||
},
|
||||
|
||||
"expired_products": {
|
||||
"title_key": "alerts.expired_products.title",
|
||||
"title_params": {
|
||||
"count": "expired_count"
|
||||
},
|
||||
"message_variants": {
|
||||
"with_names": "alerts.expired_products.message_with_names",
|
||||
"generic": "alerts.expired_products.message_generic"
|
||||
},
|
||||
"message_params": {
|
||||
"expired_count": "expired_count",
|
||||
"product_names": "product_names",
|
||||
"total_value_eur": "total_value"
|
||||
}
|
||||
},
|
||||
|
||||
"urgent_expiry": {
|
||||
"title_key": "alerts.urgent_expiry.title",
|
||||
"title_params": {
|
||||
"ingredient_name": "ingredient_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.urgent_expiry.message"
|
||||
},
|
||||
"message_params": {
|
||||
"ingredient_name": "ingredient_name",
|
||||
"days_until_expiry": "days_until_expiry",
|
||||
"quantity_kg": "quantity"
|
||||
}
|
||||
},
|
||||
|
||||
"temperature_breach": {
|
||||
"title_key": "alerts.temperature_breach.title",
|
||||
"title_params": {
|
||||
"location": "location"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.temperature_breach.message"
|
||||
},
|
||||
"message_params": {
|
||||
"location": "location",
|
||||
"temperature": "temperature",
|
||||
"max_threshold": "max_threshold",
|
||||
"duration_minutes": "duration_minutes"
|
||||
}
|
||||
},
|
||||
|
||||
"stock_depleted_by_order": {
|
||||
"title_key": "alerts.stock_depleted_by_order.title",
|
||||
"title_params": {
|
||||
"ingredient_name": "ingredient_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"with_supplier": "alerts.stock_depleted_by_order.message_with_supplier",
|
||||
"generic": "alerts.stock_depleted_by_order.message_generic"
|
||||
},
|
||||
"message_params": {
|
||||
"ingredient_name": "ingredient_name",
|
||||
"shortage_kg": "shortage_amount",
|
||||
"supplier_name": "supplier_name",
|
||||
"supplier_contact": "supplier_contact"
|
||||
}
|
||||
},
|
||||
|
||||
# ==================== PRODUCTION ALERTS ====================
|
||||
|
||||
"production_delay": {
|
||||
"title_key": "alerts.production_delay.title",
|
||||
"title_params": {
|
||||
"product_name": "product_name",
|
||||
"batch_number": "batch_number"
|
||||
},
|
||||
"message_variants": {
|
||||
"with_customers": "alerts.production_delay.message_with_customers",
|
||||
"with_orders": "alerts.production_delay.message_with_orders",
|
||||
"generic": "alerts.production_delay.message_generic"
|
||||
},
|
||||
"message_params": {
|
||||
"product_name": "product_name",
|
||||
"batch_number": "batch_number",
|
||||
"delay_minutes": "delay_minutes",
|
||||
"affected_orders": "affected_orders",
|
||||
"customer_names": "customer_names"
|
||||
}
|
||||
},
|
||||
|
||||
"equipment_failure": {
|
||||
"title_key": "alerts.equipment_failure.title",
|
||||
"title_params": {
|
||||
"equipment_name": "equipment_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"with_batches": "alerts.equipment_failure.message_with_batches",
|
||||
"generic": "alerts.equipment_failure.message_generic"
|
||||
},
|
||||
"message_params": {
|
||||
"equipment_name": "equipment_name",
|
||||
"equipment_type": "equipment_type",
|
||||
"affected_batches": "affected_batches"
|
||||
}
|
||||
},
|
||||
|
||||
"maintenance_required": {
|
||||
"title_key": "alerts.maintenance_required.title",
|
||||
"title_params": {
|
||||
"equipment_name": "equipment_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"with_hours": "alerts.maintenance_required.message_with_hours",
|
||||
"with_days": "alerts.maintenance_required.message_with_days",
|
||||
"generic": "alerts.maintenance_required.message_generic"
|
||||
},
|
||||
"message_params": {
|
||||
"equipment_name": "equipment_name",
|
||||
"hours_overdue": "hours_overdue",
|
||||
"days_overdue": "days_overdue"
|
||||
}
|
||||
},
|
||||
|
||||
"low_equipment_efficiency": {
|
||||
"title_key": "alerts.low_equipment_efficiency.title",
|
||||
"title_params": {
|
||||
"equipment_name": "equipment_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.low_equipment_efficiency.message"
|
||||
},
|
||||
"message_params": {
|
||||
"equipment_name": "equipment_name",
|
||||
"efficiency_percentage": "efficiency_percentage",
|
||||
"target_efficiency": "target_efficiency"
|
||||
}
|
||||
},
|
||||
|
||||
"capacity_overload": {
|
||||
"title_key": "alerts.capacity_overload.title",
|
||||
"title_params": {
|
||||
"date": "planned_date"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.capacity_overload.message"
|
||||
},
|
||||
"message_params": {
|
||||
"planned_date": "planned_date",
|
||||
"capacity_percentage": "capacity_percentage",
|
||||
"equipment_count": "equipment_count"
|
||||
}
|
||||
},
|
||||
|
||||
"quality_control_failure": {
|
||||
"title_key": "alerts.quality_control_failure.title",
|
||||
"title_params": {
|
||||
"product_name": "product_name",
|
||||
"batch_number": "batch_number"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.quality_control_failure.message"
|
||||
},
|
||||
"message_params": {
|
||||
"product_name": "product_name",
|
||||
"batch_number": "batch_number",
|
||||
"check_type": "check_type",
|
||||
"quality_score": "quality_score",
|
||||
"defect_count": "defect_count"
|
||||
}
|
||||
},
|
||||
|
||||
# ==================== PROCUREMENT ALERTS ====================
|
||||
|
||||
"po_approval_needed": {
|
||||
"title_key": "alerts.po_approval_needed.title",
|
||||
"title_params": {
|
||||
"po_number": "po_number"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.po_approval_needed.message"
|
||||
},
|
||||
"message_params": {
|
||||
"supplier_name": "supplier_name",
|
||||
"total_amount": "total_amount",
|
||||
"currency": "currency",
|
||||
"required_delivery_date": "required_delivery_date",
|
||||
"items_count": "items_count"
|
||||
}
|
||||
},
|
||||
|
||||
"po_approval_escalation": {
|
||||
"title_key": "alerts.po_approval_escalation.title",
|
||||
"title_params": {
|
||||
"po_number": "po_number"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.po_approval_escalation.message"
|
||||
},
|
||||
"message_params": {
|
||||
"po_number": "po_number",
|
||||
"supplier_name": "supplier_name",
|
||||
"hours_pending": "hours_pending",
|
||||
"total_amount": "total_amount"
|
||||
}
|
||||
},
|
||||
|
||||
"delivery_overdue": {
|
||||
"title_key": "alerts.delivery_overdue.title",
|
||||
"title_params": {
|
||||
"po_number": "po_number"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.delivery_overdue.message"
|
||||
},
|
||||
"message_params": {
|
||||
"po_number": "po_number",
|
||||
"supplier_name": "supplier_name",
|
||||
"days_overdue": "days_overdue",
|
||||
"expected_date": "expected_date"
|
||||
}
|
||||
},
|
||||
|
||||
# ==================== SUPPLY CHAIN ALERTS ====================
|
||||
|
||||
"supplier_delay": {
|
||||
"title_key": "alerts.supplier_delay.title",
|
||||
"title_params": {
|
||||
"supplier_name": "supplier_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.supplier_delay.message"
|
||||
},
|
||||
"message_params": {
|
||||
"supplier_name": "supplier_name",
|
||||
"po_count": "po_count",
|
||||
"avg_delay_days": "avg_delay_days"
|
||||
}
|
||||
},
|
||||
|
||||
# ==================== DEMAND ALERTS ====================
|
||||
|
||||
"demand_surge_weekend": {
|
||||
"title_key": "alerts.demand_surge_weekend.title",
|
||||
"title_params": {},
|
||||
"message_variants": {
|
||||
"generic": "alerts.demand_surge_weekend.message"
|
||||
},
|
||||
"message_params": {
|
||||
"product_name": "product_name",
|
||||
"predicted_demand": "predicted_demand",
|
||||
"current_stock": "current_stock"
|
||||
}
|
||||
},
|
||||
|
||||
"weather_impact_alert": {
|
||||
"title_key": "alerts.weather_impact_alert.title",
|
||||
"title_params": {},
|
||||
"message_variants": {
|
||||
"generic": "alerts.weather_impact_alert.message"
|
||||
},
|
||||
"message_params": {
|
||||
"weather_condition": "weather_condition",
|
||||
"impact_percentage": "impact_percentage",
|
||||
"date": "date"
|
||||
}
|
||||
},
|
||||
|
||||
# ==================== PRODUCTION BATCH ALERTS ====================
|
||||
|
||||
"production_batch_start": {
|
||||
"title_key": "alerts.production_batch_start.title",
|
||||
"title_params": {
|
||||
"product_name": "product_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "alerts.production_batch_start.message"
|
||||
},
|
||||
"message_params": {
|
||||
"product_name": "product_name",
|
||||
"batch_number": "batch_number",
|
||||
"quantity_planned": "quantity_planned",
|
||||
"unit": "unit",
|
||||
"priority": "priority"
|
||||
}
|
||||
},
|
||||
|
||||
# ==================== GENERIC FALLBACK ====================
|
||||
|
||||
"generic": {
|
||||
"title_key": "alerts.generic.title",
|
||||
"title_params": {},
|
||||
"message_variants": {
|
||||
"generic": "alerts.generic.message"
|
||||
},
|
||||
"message_params": {
|
||||
"event_type": "event_type"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Notification templates (informational events)
|
||||
NOTIFICATION_TEMPLATES = {
|
||||
"po_approved": {
|
||||
"title_key": "notifications.po_approved.title",
|
||||
"title_params": {
|
||||
"po_number": "po_number"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "notifications.po_approved.message"
|
||||
},
|
||||
"message_params": {
|
||||
"supplier_name": "supplier_name",
|
||||
"total_amount": "total_amount"
|
||||
}
|
||||
},
|
||||
|
||||
"batch_state_changed": {
|
||||
"title_key": "notifications.batch_state_changed.title",
|
||||
"title_params": {
|
||||
"product_name": "product_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "notifications.batch_state_changed.message"
|
||||
},
|
||||
"message_params": {
|
||||
"batch_number": "batch_number",
|
||||
"new_status": "new_status",
|
||||
"quantity": "quantity",
|
||||
"unit": "unit"
|
||||
}
|
||||
},
|
||||
|
||||
"stock_received": {
|
||||
"title_key": "notifications.stock_received.title",
|
||||
"title_params": {
|
||||
"ingredient_name": "ingredient_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "notifications.stock_received.message"
|
||||
},
|
||||
"message_params": {
|
||||
"quantity_received": "quantity_received",
|
||||
"unit": "unit",
|
||||
"supplier_name": "supplier_name"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Recommendation templates (optimization suggestions)
|
||||
RECOMMENDATION_TEMPLATES = {
|
||||
"inventory_optimization": {
|
||||
"title_key": "recommendations.inventory_optimization.title",
|
||||
"title_params": {
|
||||
"ingredient_name": "ingredient_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "recommendations.inventory_optimization.message"
|
||||
},
|
||||
"message_params": {
|
||||
"ingredient_name": "ingredient_name",
|
||||
"current_max_kg": "current_max",
|
||||
"suggested_max_kg": "suggested_max",
|
||||
"recommendation_type": "recommendation_type"
|
||||
}
|
||||
},
|
||||
|
||||
"production_efficiency": {
|
||||
"title_key": "recommendations.production_efficiency.title",
|
||||
"title_params": {
|
||||
"product_name": "product_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "recommendations.production_efficiency.message"
|
||||
},
|
||||
"message_params": {
|
||||
"product_name": "product_name",
|
||||
"potential_time_saved_minutes": "time_saved",
|
||||
"suggestion": "suggestion"
|
||||
}
|
||||
},
|
||||
|
||||
# ==================== AI INSIGHTS RECOMMENDATIONS ====================
|
||||
|
||||
"ai_yield_prediction": {
|
||||
"title_key": "recommendations.ai_yield_prediction.title",
|
||||
"title_params": {
|
||||
"recipe_name": "recipe_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "recommendations.ai_yield_prediction.message"
|
||||
},
|
||||
"message_params": {
|
||||
"recipe_name": "recipe_name",
|
||||
"predicted_yield_percent": "predicted_yield",
|
||||
"confidence_percent": "confidence",
|
||||
"recommendation": "recommendation"
|
||||
}
|
||||
},
|
||||
|
||||
"ai_safety_stock_optimization": {
|
||||
"title_key": "recommendations.ai_safety_stock_optimization.title",
|
||||
"title_params": {
|
||||
"ingredient_name": "ingredient_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "recommendations.ai_safety_stock_optimization.message"
|
||||
},
|
||||
"message_params": {
|
||||
"ingredient_name": "ingredient_name",
|
||||
"suggested_safety_stock_kg": "suggested_safety_stock",
|
||||
"current_safety_stock_kg": "current_safety_stock",
|
||||
"estimated_savings_eur": "estimated_savings",
|
||||
"confidence_percent": "confidence"
|
||||
}
|
||||
},
|
||||
|
||||
"ai_supplier_recommendation": {
|
||||
"title_key": "recommendations.ai_supplier_recommendation.title",
|
||||
"title_params": {
|
||||
"supplier_name": "supplier_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "recommendations.ai_supplier_recommendation.message"
|
||||
},
|
||||
"message_params": {
|
||||
"supplier_name": "supplier_name",
|
||||
"reliability_score": "reliability_score",
|
||||
"recommendation": "recommendation",
|
||||
"confidence_percent": "confidence"
|
||||
}
|
||||
},
|
||||
|
||||
"ai_price_forecast": {
|
||||
"title_key": "recommendations.ai_price_forecast.title",
|
||||
"title_params": {
|
||||
"ingredient_name": "ingredient_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "recommendations.ai_price_forecast.message"
|
||||
},
|
||||
"message_params": {
|
||||
"ingredient_name": "ingredient_name",
|
||||
"predicted_price_eur": "predicted_price",
|
||||
"current_price_eur": "current_price",
|
||||
"price_trend": "price_trend",
|
||||
"recommendation": "recommendation",
|
||||
"confidence_percent": "confidence"
|
||||
}
|
||||
},
|
||||
|
||||
"ai_demand_forecast": {
|
||||
"title_key": "recommendations.ai_demand_forecast.title",
|
||||
"title_params": {
|
||||
"product_name": "product_name"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "recommendations.ai_demand_forecast.message"
|
||||
},
|
||||
"message_params": {
|
||||
"product_name": "product_name",
|
||||
"predicted_demand": "predicted_demand",
|
||||
"forecast_period": "forecast_period",
|
||||
"confidence_percent": "confidence",
|
||||
"recommendation": "recommendation"
|
||||
}
|
||||
},
|
||||
|
||||
"ai_business_rule": {
|
||||
"title_key": "recommendations.ai_business_rule.title",
|
||||
"title_params": {
|
||||
"rule_category": "rule_category"
|
||||
},
|
||||
"message_variants": {
|
||||
"generic": "recommendations.ai_business_rule.message"
|
||||
},
|
||||
"message_params": {
|
||||
"rule_category": "rule_category",
|
||||
"rule_description": "rule_description",
|
||||
"confidence_percent": "confidence",
|
||||
"recommendation": "recommendation"
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user