New alert service

This commit is contained in:
Urtzi Alfaro
2025-12-05 20:07:01 +01:00
parent 1fe3a73549
commit 667e6e0404
393 changed files with 26002 additions and 61033 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,420 +0,0 @@
"""
Delivery Tracking Service
Tracks purchase order deliveries and generates appropriate alerts:
- DELIVERY_SCHEDULED: When PO is approved and delivery date is set
- DELIVERY_ARRIVING_SOON: 2 hours before delivery window
- DELIVERY_OVERDUE: 30 minutes after expected delivery time
- STOCK_RECEIPT_INCOMPLETE: If delivery not marked as received
Integrates with procurement service to get PO details and expected delivery windows.
"""
import structlog
from datetime import datetime, timedelta, timezone
from typing import Dict, Any, Optional, List
from uuid import UUID
import httpx
from shared.schemas.alert_types import AlertTypeConstants
from shared.alerts.base_service import BaseAlertService
logger = structlog.get_logger()
class DeliveryTrackingService:
"""Tracks deliveries and generates lifecycle alerts"""
def __init__(self, config, db_manager, redis_client, rabbitmq_client):
self.config = config
self.db_manager = db_manager
self.redis = redis_client
self.rabbitmq = rabbitmq_client
self.alert_service = BaseAlertService(config)
self.http_client = httpx.AsyncClient(
timeout=30.0,
follow_redirects=True
)
async def check_expected_deliveries(self, tenant_id: UUID) -> Dict[str, int]:
"""
Check all expected deliveries for a tenant and generate appropriate alerts.
Called by scheduled job (runs every hour).
Returns:
Dict with counts: {
'arriving_soon': int,
'overdue': int,
'receipt_incomplete': int
}
"""
logger.info("Checking expected deliveries", tenant_id=str(tenant_id))
counts = {
'arriving_soon': 0,
'overdue': 0,
'receipt_incomplete': 0
}
try:
# Get expected deliveries from procurement service
deliveries = await self._get_expected_deliveries(tenant_id)
now = datetime.now(timezone.utc)
for delivery in deliveries:
po_id = delivery.get('po_id')
po_number = delivery.get('po_number')
expected_date = delivery.get('expected_delivery_date')
delivery_window_hours = delivery.get('delivery_window_hours', 4) # Default 4h window
status = delivery.get('status')
if not expected_date:
continue
# Parse expected date
if isinstance(expected_date, str):
expected_date = datetime.fromisoformat(expected_date)
# Make timezone-aware
if expected_date.tzinfo is None:
expected_date = expected_date.replace(tzinfo=timezone.utc)
# Calculate delivery window
window_start = expected_date
window_end = expected_date + timedelta(hours=delivery_window_hours)
# Check if arriving soon (2 hours before window)
arriving_soon_time = window_start - timedelta(hours=2)
if arriving_soon_time <= now < window_start and status == 'approved':
if await self._send_arriving_soon_alert(tenant_id, delivery):
counts['arriving_soon'] += 1
# Check if overdue (30 min after window end)
overdue_time = window_end + timedelta(minutes=30)
if now >= overdue_time and status == 'approved':
if await self._send_overdue_alert(tenant_id, delivery):
counts['overdue'] += 1
# Check if receipt incomplete (delivery window passed, not marked received)
if now > window_end and status == 'approved':
if await self._send_receipt_incomplete_alert(tenant_id, delivery):
counts['receipt_incomplete'] += 1
logger.info(
"Delivery check completed",
tenant_id=str(tenant_id),
**counts
)
except Exception as e:
logger.error(
"Error checking deliveries",
tenant_id=str(tenant_id),
error=str(e)
)
return counts
async def _get_expected_deliveries(self, tenant_id: UUID) -> List[Dict[str, Any]]:
"""
Query procurement service for expected deliveries.
Returns:
List of delivery dicts with:
- po_id, po_number, expected_delivery_date
- supplier_id, supplier_name
- line_items (product list)
- status (approved, in_transit, received)
"""
try:
procurement_url = self.config.PROCUREMENT_SERVICE_URL
response = await self.http_client.get(
f"{procurement_url}/api/internal/expected-deliveries",
params={
"tenant_id": str(tenant_id),
"days_ahead": 1, # Check today + tomorrow
"include_overdue": True
},
headers={"X-Internal-Service": "orchestrator"}
)
if response.status_code == 200:
data = response.json()
return data.get('deliveries', [])
else:
logger.warning(
"Failed to get expected deliveries",
status_code=response.status_code,
tenant_id=str(tenant_id)
)
return []
except Exception as e:
logger.error(
"Error fetching expected deliveries",
tenant_id=str(tenant_id),
error=str(e)
)
return []
async def _send_arriving_soon_alert(
self,
tenant_id: UUID,
delivery: Dict[str, Any]
) -> bool:
"""
Send DELIVERY_ARRIVING_SOON alert (2h before delivery window).
This appears in the action queue with "Mark as Received" action.
"""
# Check if already sent
cache_key = f"delivery_alert:arriving:{tenant_id}:{delivery['po_id']}"
if await self.redis.exists(cache_key):
return False
po_number = delivery.get('po_number', 'N/A')
supplier_name = delivery.get('supplier_name', 'Supplier')
expected_date = delivery.get('expected_delivery_date')
line_items = delivery.get('line_items', [])
# Format product list
products = [item['product_name'] for item in line_items[:3]]
product_list = ", ".join(products)
if len(line_items) > 3:
product_list += f" (+{len(line_items) - 3} more)"
# Calculate time until arrival
if isinstance(expected_date, str):
expected_date = datetime.fromisoformat(expected_date)
if expected_date.tzinfo is None:
expected_date = expected_date.replace(tzinfo=timezone.utc)
hours_until = (expected_date - datetime.now(timezone.utc)).total_seconds() / 3600
alert_data = {
"tenant_id": str(tenant_id),
"alert_type": AlertTypeConstants.DELIVERY_ARRIVING_SOON,
"title": f"Delivery arriving soon: {supplier_name}",
"message": f"Purchase order {po_number} expected in ~{hours_until:.1f} hours. Products: {product_list}",
"service": "orchestrator",
"actions": ["mark_delivery_received", "call_supplier"],
"alert_metadata": {
"po_id": delivery['po_id'],
"po_number": po_number,
"supplier_id": delivery.get('supplier_id'),
"supplier_name": supplier_name,
"supplier_phone": delivery.get('supplier_phone'),
"expected_delivery_date": expected_date.isoformat(),
"line_items": line_items,
"hours_until_arrival": hours_until,
"confidence_score": 0.9
}
}
success = await self.alert_service.send_alert(alert_data)
if success:
# Cache for 24 hours to avoid duplicate alerts
await self.redis.set(cache_key, "1", ex=86400)
logger.info(
"Sent arriving soon alert",
po_number=po_number,
supplier=supplier_name
)
return success
async def _send_overdue_alert(
self,
tenant_id: UUID,
delivery: Dict[str, Any]
) -> bool:
"""
Send DELIVERY_OVERDUE alert (30min after expected window).
Critical priority - needs immediate action (call supplier).
"""
# Check if already sent
cache_key = f"delivery_alert:overdue:{tenant_id}:{delivery['po_id']}"
if await self.redis.exists(cache_key):
return False
po_number = delivery.get('po_number', 'N/A')
supplier_name = delivery.get('supplier_name', 'Supplier')
expected_date = delivery.get('expected_delivery_date')
# Calculate how late
if isinstance(expected_date, str):
expected_date = datetime.fromisoformat(expected_date)
if expected_date.tzinfo is None:
expected_date = expected_date.replace(tzinfo=timezone.utc)
hours_late = (datetime.now(timezone.utc) - expected_date).total_seconds() / 3600
alert_data = {
"tenant_id": str(tenant_id),
"alert_type": AlertTypeConstants.DELIVERY_OVERDUE,
"title": f"Delivery overdue: {supplier_name}",
"message": f"Purchase order {po_number} was expected {hours_late:.1f} hours ago. Contact supplier immediately.",
"service": "orchestrator",
"actions": ["call_supplier", "snooze", "report_issue"],
"alert_metadata": {
"po_id": delivery['po_id'],
"po_number": po_number,
"supplier_id": delivery.get('supplier_id'),
"supplier_name": supplier_name,
"supplier_phone": delivery.get('supplier_phone'),
"expected_delivery_date": expected_date.isoformat(),
"hours_late": hours_late,
"financial_impact": delivery.get('total_amount', 0), # Blocked capital
"affected_orders": len(delivery.get('affected_production_batches', [])),
"confidence_score": 1.0
}
}
success = await self.alert_service.send_alert(alert_data)
if success:
# Cache for 48 hours
await self.redis.set(cache_key, "1", ex=172800)
logger.warning(
"Sent overdue delivery alert",
po_number=po_number,
supplier=supplier_name,
hours_late=hours_late
)
return success
async def _send_receipt_incomplete_alert(
self,
tenant_id: UUID,
delivery: Dict[str, Any]
) -> bool:
"""
Send STOCK_RECEIPT_INCOMPLETE alert.
Delivery window has passed but stock not marked as received.
"""
# Check if already sent
cache_key = f"delivery_alert:receipt:{tenant_id}:{delivery['po_id']}"
if await self.redis.exists(cache_key):
return False
po_number = delivery.get('po_number', 'N/A')
supplier_name = delivery.get('supplier_name', 'Supplier')
alert_data = {
"tenant_id": str(tenant_id),
"alert_type": AlertTypeConstants.STOCK_RECEIPT_INCOMPLETE,
"title": f"Confirm stock receipt: {po_number}",
"message": f"Delivery from {supplier_name} should have arrived. Please confirm receipt and log lot details.",
"service": "orchestrator",
"actions": ["complete_stock_receipt", "report_missing"],
"alert_metadata": {
"po_id": delivery['po_id'],
"po_number": po_number,
"supplier_id": delivery.get('supplier_id'),
"supplier_name": supplier_name,
"expected_delivery_date": delivery.get('expected_delivery_date'),
"confidence_score": 0.8
}
}
success = await self.alert_service.send_alert(alert_data)
if success:
# Cache for 7 days
await self.redis.set(cache_key, "1", ex=604800)
logger.info(
"Sent receipt incomplete alert",
po_number=po_number
)
return success
async def mark_delivery_received(
self,
tenant_id: UUID,
po_id: UUID,
received_by_user_id: UUID
) -> Dict[str, Any]:
"""
Mark delivery as received and trigger stock receipt workflow.
This is called when user clicks "Mark as Received" action button.
Returns:
Dict with receipt_id and status
"""
try:
# Call inventory service to create draft stock receipt
inventory_url = self.config.INVENTORY_SERVICE_URL
response = await self.http_client.post(
f"{inventory_url}/api/inventory/stock-receipts",
json={
"tenant_id": str(tenant_id),
"po_id": str(po_id),
"received_by_user_id": str(received_by_user_id)
},
headers={"X-Internal-Service": "orchestrator"}
)
if response.status_code in [200, 201]:
receipt_data = response.json()
# Clear delivery alerts
await self._clear_delivery_alerts(tenant_id, po_id)
logger.info(
"Delivery marked as received",
po_id=str(po_id),
receipt_id=receipt_data.get('id')
)
return {
"status": "success",
"receipt_id": receipt_data.get('id'),
"message": "Stock receipt created. Please complete lot details."
}
else:
logger.error(
"Failed to create stock receipt",
status_code=response.status_code,
po_id=str(po_id)
)
return {
"status": "error",
"message": "Failed to create stock receipt"
}
except Exception as e:
logger.error(
"Error marking delivery received",
po_id=str(po_id),
error=str(e)
)
return {
"status": "error",
"message": str(e)
}
async def _clear_delivery_alerts(self, tenant_id: UUID, po_id: UUID):
"""Clear all delivery-related alerts for a PO once received"""
alert_types = [
"arriving",
"overdue",
"receipt"
]
for alert_type in alert_types:
cache_key = f"delivery_alert:{alert_type}:{tenant_id}:{po_id}"
await self.redis.delete(cache_key)
logger.debug("Cleared delivery alerts", po_id=str(po_id))
async def close(self):
"""Close HTTP client on shutdown"""
await self.http_client.aclose()

View File

@@ -1,648 +0,0 @@
"""
Enterprise Dashboard Service for Orchestrator
Handles aggregated metrics and data for enterprise tier parent tenants
"""
import asyncio
from typing import Dict, Any, List
from datetime import date, datetime, timedelta
import structlog
from decimal import Decimal
# Import clients
from shared.clients.tenant_client import TenantServiceClient
from shared.clients.forecast_client import ForecastServiceClient
from shared.clients.production_client import ProductionServiceClient
from shared.clients.sales_client import SalesServiceClient
from shared.clients.inventory_client import InventoryServiceClient
from shared.clients.distribution_client import DistributionServiceClient
from shared.clients.procurement_client import ProcurementServiceClient
logger = structlog.get_logger()
class EnterpriseDashboardService:
"""
Service for providing enterprise dashboard data for parent tenants
"""
def __init__(
self,
tenant_client: TenantServiceClient,
forecast_client: ForecastServiceClient,
production_client: ProductionServiceClient,
sales_client: SalesServiceClient,
inventory_client: InventoryServiceClient,
distribution_client: DistributionServiceClient,
procurement_client: ProcurementServiceClient
):
self.tenant_client = tenant_client
self.forecast_client = forecast_client
self.production_client = production_client
self.sales_client = sales_client
self.inventory_client = inventory_client
self.distribution_client = distribution_client
self.procurement_client = procurement_client
async def get_network_summary(
self,
parent_tenant_id: str
) -> Dict[str, Any]:
"""
Get network summary metrics for enterprise dashboard
Args:
parent_tenant_id: Parent tenant ID
Returns:
Dict with aggregated network metrics
"""
logger.info("Getting network summary for parent tenant", parent_tenant_id=parent_tenant_id)
# Get child tenants
child_tenants = await self.tenant_client.get_child_tenants(parent_tenant_id)
child_tenant_ids = [child['id'] for child in (child_tenants or [])]
# Fetch metrics in parallel
tasks = [
self._get_child_count(parent_tenant_id),
self._get_network_sales(parent_tenant_id, child_tenant_ids),
self._get_production_volume(parent_tenant_id),
self._get_pending_internal_transfers(parent_tenant_id),
self._get_active_shipments(parent_tenant_id)
]
results = await asyncio.gather(*tasks, return_exceptions=True)
# Handle results and errors
child_count = results[0] if not isinstance(results[0], Exception) else 0
network_sales = results[1] if not isinstance(results[1], Exception) else 0
production_volume = results[2] if not isinstance(results[2], Exception) else 0
pending_transfers = results[3] if not isinstance(results[3], Exception) else 0
active_shipments = results[4] if not isinstance(results[4], Exception) else 0
return {
'parent_tenant_id': parent_tenant_id,
'child_tenant_count': child_count,
'network_sales_30d': float(network_sales),
'production_volume_30d': float(production_volume),
'pending_internal_transfers_count': pending_transfers,
'active_shipments_count': active_shipments,
'last_updated': datetime.utcnow().isoformat()
}
async def _get_child_count(self, parent_tenant_id: str) -> int:
"""Get count of child tenants"""
try:
child_tenants = await self.tenant_client.get_child_tenants(parent_tenant_id)
return len(child_tenants)
except Exception as e:
logger.warning(f"Could not get child count for parent tenant {parent_tenant_id}: {e}")
return 0
async def _get_network_sales(self, parent_tenant_id: str, child_tenant_ids: List[str]) -> float:
"""Get total network sales for the last 30 days"""
try:
total_sales = Decimal("0.00")
start_date = date.today() - timedelta(days=30)
end_date = date.today()
# Include parent tenant sales
try:
parent_sales = await self.sales_client.get_sales_summary(
tenant_id=parent_tenant_id,
start_date=start_date,
end_date=end_date
)
total_sales += Decimal(str(parent_sales.get('total_revenue', 0)))
except Exception as e:
logger.warning(f"Could not get sales for parent tenant {parent_tenant_id}: {e}")
# Add child tenant sales
for child_id in child_tenant_ids:
try:
child_sales = await self.sales_client.get_sales_summary(
tenant_id=child_id,
start_date=start_date,
end_date=end_date
)
total_sales += Decimal(str(child_sales.get('total_revenue', 0)))
except Exception as e:
logger.warning(f"Could not get sales for child tenant {child_id}: {e}")
return float(total_sales)
except Exception as e:
logger.error(f"Error getting network sales: {e}")
return 0.0
async def _get_production_volume(self, parent_tenant_id: str) -> float:
"""Get total production volume for the parent tenant (central production)"""
try:
production_summary = await self.production_client.get_dashboard_summary(
tenant_id=parent_tenant_id
)
# Return total production value
return float(production_summary.get('total_value', 0))
except Exception as e:
logger.warning(f"Could not get production volume for parent tenant {parent_tenant_id}: {e}")
return 0.0
async def _get_pending_internal_transfers(self, parent_tenant_id: str) -> int:
"""Get count of pending internal transfer orders from parent to children"""
try:
# Get pending internal purchase orders for parent tenant
pending_pos = await self.procurement_client.get_approved_internal_purchase_orders(
parent_tenant_id=parent_tenant_id,
status="pending" # or whatever status indicates pending delivery
)
return len(pending_pos) if pending_pos else 0
except Exception as e:
logger.warning(f"Could not get pending internal transfers for parent tenant {parent_tenant_id}: {e}")
return 0
async def _get_active_shipments(self, parent_tenant_id: str) -> int:
"""Get count of active shipments for today"""
try:
today = date.today()
shipments = await self.distribution_client.get_shipments_for_date(
parent_tenant_id,
today
)
# Filter for active shipments (not delivered/cancelled)
active_statuses = ['pending', 'in_transit', 'packed']
active_shipments = [s for s in shipments if s.get('status') in active_statuses]
return len(active_shipments)
except Exception as e:
logger.warning(f"Could not get active shipments for parent tenant {parent_tenant_id}: {e}")
return 0
async def get_children_performance(
self,
parent_tenant_id: str,
metric: str = "sales",
period_days: int = 30
) -> Dict[str, Any]:
"""
Get anonymized performance ranking of child tenants
Args:
parent_tenant_id: Parent tenant ID
metric: Metric to rank by ('sales', 'inventory_value', 'order_frequency')
period_days: Number of days to look back
Returns:
Dict with anonymized ranking data
"""
logger.info("Getting children performance",
parent_tenant_id=parent_tenant_id,
metric=metric,
period_days=period_days)
child_tenants = await self.tenant_client.get_child_tenants(parent_tenant_id)
# Gather performance data for each child
performance_data = []
for child in (child_tenants or []):
child_id = child['id']
child_name = child['name']
metric_value = 0
try:
if metric == 'sales':
start_date = date.today() - timedelta(days=period_days)
end_date = date.today()
sales_summary = await self.sales_client.get_sales_summary(
tenant_id=child_id,
start_date=start_date,
end_date=end_date
)
metric_value = float(sales_summary.get('total_revenue', 0))
elif metric == 'inventory_value':
inventory_summary = await self.inventory_client.get_inventory_summary(
tenant_id=child_id
)
metric_value = float(inventory_summary.get('total_value', 0))
elif metric == 'order_frequency':
# Count orders placed in the period
orders = await self.sales_client.get_sales_orders(
tenant_id=child_id,
start_date=start_date,
end_date=end_date
)
metric_value = len(orders) if orders else 0
except Exception as e:
logger.warning(f"Could not get performance data for child {child_id}: {e}")
continue
performance_data.append({
'tenant_id': child_id,
'original_name': child_name,
'metric_value': metric_value
})
# Sort by metric value and anonymize
performance_data.sort(key=lambda x: x['metric_value'], reverse=True)
# Anonymize data (no tenant names, just ranks)
anonymized_data = []
for rank, data in enumerate(performance_data, 1):
anonymized_data.append({
'rank': rank,
'tenant_id': data['tenant_id'],
'anonymized_name': f"Outlet {rank}",
'metric_value': data['metric_value']
})
return {
'parent_tenant_id': parent_tenant_id,
'metric': metric,
'period_days': period_days,
'rankings': anonymized_data,
'total_children': len(performance_data),
'last_updated': datetime.utcnow().isoformat()
}
async def get_distribution_overview(
self,
parent_tenant_id: str,
target_date: date = None
) -> Dict[str, Any]:
"""
Get distribution overview for enterprise dashboard
Args:
parent_tenant_id: Parent tenant ID
target_date: Date to get distribution data for (default: today)
Returns:
Dict with distribution metrics and route information
"""
if target_date is None:
target_date = date.today()
logger.info("Getting distribution overview",
parent_tenant_id=parent_tenant_id,
date=target_date)
try:
# Get all routes for the target date
routes = await self.distribution_client.get_delivery_routes(
parent_tenant_id=parent_tenant_id,
date_from=target_date,
date_to=target_date
)
# Get all shipments for the target date
shipments = await self.distribution_client.get_shipments_for_date(
parent_tenant_id,
target_date
)
# Aggregate by status
status_counts = {}
for shipment in shipments:
status = shipment.get('status', 'unknown')
status_counts[status] = status_counts.get(status, 0) + 1
# Prepare route sequences for map visualization
route_sequences = []
for route in routes:
route_sequences.append({
'route_id': route.get('id'),
'route_number': route.get('route_number'),
'status': route.get('status', 'unknown'),
'total_distance_km': route.get('total_distance_km', 0),
'stops': route.get('route_sequence', []),
'estimated_duration_minutes': route.get('estimated_duration_minutes', 0)
})
return {
'parent_tenant_id': parent_tenant_id,
'target_date': target_date.isoformat(),
'route_count': len(routes),
'shipment_count': len(shipments),
'status_counts': status_counts,
'route_sequences': route_sequences,
'last_updated': datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error getting distribution overview: {e}", exc_info=True)
return {
'parent_tenant_id': parent_tenant_id,
'target_date': target_date.isoformat(),
'route_count': 0,
'shipment_count': 0,
'status_counts': {},
'route_sequences': [],
'last_updated': datetime.utcnow().isoformat(),
'error': str(e)
}
async def get_enterprise_forecast_summary(
self,
parent_tenant_id: str,
days_ahead: int = 7
) -> Dict[str, Any]:
"""
Get aggregated forecast summary for the enterprise network
Args:
parent_tenant_id: Parent tenant ID
days_ahead: Number of days ahead to forecast
Returns:
Dict with aggregated forecast data
"""
try:
end_date = date.today() + timedelta(days=days_ahead)
start_date = date.today()
# Get aggregated forecast from the forecasting service
forecast_data = await self.forecast_client.get_aggregated_forecast(
parent_tenant_id=parent_tenant_id,
start_date=start_date,
end_date=end_date
)
# Aggregate the forecast data for the summary
total_demand = 0
daily_summary = {}
if not forecast_data:
logger.warning("No forecast data returned", parent_tenant_id=parent_tenant_id)
return {
'parent_tenant_id': parent_tenant_id,
'days_forecast': days_ahead,
'total_predicted_demand': 0,
'daily_summary': {},
'last_updated': datetime.utcnow().isoformat()
}
for forecast_date_str, products in forecast_data.get('aggregated_forecasts', {}).items():
day_total = sum(item.get('predicted_demand', 0) for item in products.values())
total_demand += day_total
daily_summary[forecast_date_str] = {
'predicted_demand': day_total,
'product_count': len(products)
}
return {
'parent_tenant_id': parent_tenant_id,
'days_forecast': days_ahead,
'total_predicted_demand': total_demand,
'daily_summary': daily_summary,
'last_updated': datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error getting enterprise forecast summary: {e}", exc_info=True)
return {
'parent_tenant_id': parent_tenant_id,
'days_forecast': days_ahead,
'total_predicted_demand': 0,
'daily_summary': {},
'last_updated': datetime.utcnow().isoformat(),
'error': str(e)
}
async def get_network_performance_metrics(
self,
parent_tenant_id: str,
start_date: date,
end_date: date
) -> Dict[str, Any]:
"""
Get aggregated performance metrics across the enterprise network
Args:
parent_tenant_id: Parent tenant ID
start_date: Start date for metrics
end_date: End date for metrics
Returns:
Dict with aggregated network metrics
"""
try:
# Get all child tenants
child_tenants = await self.tenant_client.get_child_tenants(parent_tenant_id)
child_tenant_ids = [child['id'] for child in (child_tenants or [])]
# Include parent in tenant list for complete network metrics
all_tenant_ids = [parent_tenant_id] + child_tenant_ids
# Parallel fetch of metrics for all tenants
tasks = []
for tenant_id in all_tenant_ids:
# Create individual tasks for each metric
sales_task = self._get_tenant_sales(tenant_id, start_date, end_date)
production_task = self._get_tenant_production(tenant_id, start_date, end_date)
inventory_task = self._get_tenant_inventory(tenant_id)
# Gather all tasks for this tenant
tenant_tasks = asyncio.gather(sales_task, production_task, inventory_task, return_exceptions=True)
tasks.append(tenant_tasks)
results = await asyncio.gather(*tasks, return_exceptions=True)
# Aggregate metrics
total_network_sales = Decimal("0.00")
total_network_production = Decimal("0.00")
total_network_inventory_value = Decimal("0.00")
metrics_error_count = 0
for i, result in enumerate(results):
if isinstance(result, Exception):
logger.error(f"Error getting metrics for tenant {all_tenant_ids[i]}: {result}")
metrics_error_count += 1
continue
if isinstance(result, list) and len(result) == 3:
sales, production, inventory = result
total_network_sales += Decimal(str(sales or 0))
total_network_production += Decimal(str(production or 0))
total_network_inventory_value += Decimal(str(inventory or 0))
return {
'parent_tenant_id': parent_tenant_id,
'start_date': start_date.isoformat(),
'end_date': end_date.isoformat(),
'total_network_sales': float(total_network_sales),
'total_network_production': float(total_network_production),
'total_network_inventory_value': float(total_network_inventory_value),
'included_tenant_count': len(all_tenant_ids),
'child_tenant_count': len(child_tenant_ids),
'metrics_error_count': metrics_error_count,
'coverage_percentage': (
(len(all_tenant_ids) - metrics_error_count) / len(all_tenant_ids) * 100
if all_tenant_ids else 0
)
}
except Exception as e:
logger.error(f"Error getting network performance metrics: {e}", exc_info=True)
raise
async def _get_tenant_sales(self, tenant_id: str, start_date: date, end_date: date) -> float:
"""Helper to get sales for a specific tenant"""
try:
sales_data = await self.sales_client.get_sales_summary(
tenant_id=tenant_id,
start_date=start_date,
end_date=end_date
)
return float(sales_data.get('total_revenue', 0))
except Exception as e:
logger.warning(f"Could not get sales for tenant {tenant_id}: {e}")
return 0
async def _get_tenant_production(self, tenant_id: str, start_date: date, end_date: date) -> float:
"""Helper to get production for a specific tenant"""
try:
production_data = await self.production_client.get_dashboard_summary(
tenant_id=tenant_id
)
return float(production_data.get('total_value', 0))
except Exception as e:
logger.warning(f"Could not get production for tenant {tenant_id}: {e}")
return 0
async def _get_tenant_inventory(self, tenant_id: str) -> float:
"""Helper to get inventory value for a specific tenant"""
try:
inventory_data = await self.inventory_client.get_inventory_summary(tenant_id=tenant_id)
return float(inventory_data.get('total_value', 0))
except Exception as e:
logger.warning(f"Could not get inventory for tenant {tenant_id}: {e}")
return 0
async def initialize_enterprise_demo(
self,
parent_tenant_id: str,
child_tenant_ids: List[str],
session_id: str
) -> Dict[str, Any]:
"""
Initialize enterprise demo data including parent-child relationships and distribution setup
Args:
parent_tenant_id: Parent tenant ID
child_tenant_ids: List of child tenant IDs
session_id: Demo session ID
Returns:
Dict with initialization results
"""
logger.info("Initializing enterprise demo",
parent_tenant_id=parent_tenant_id,
child_tenant_ids=child_tenant_ids)
try:
# Step 1: Set up parent-child tenant relationships
await self._setup_parent_child_relationships(
parent_tenant_id=parent_tenant_id,
child_tenant_ids=child_tenant_ids
)
# Step 2: Initialize distribution for the parent
await self._setup_distribution_for_enterprise(
parent_tenant_id=parent_tenant_id,
child_tenant_ids=child_tenant_ids
)
# Step 3: Generate initial internal transfer orders
await self._generate_initial_internal_transfers(
parent_tenant_id=parent_tenant_id,
child_tenant_ids=child_tenant_ids
)
logger.info("Enterprise demo initialized successfully",
parent_tenant_id=parent_tenant_id)
return {
'status': 'success',
'parent_tenant_id': parent_tenant_id,
'child_tenant_count': len(child_tenant_ids),
'session_id': session_id,
'initialized_at': datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error initializing enterprise demo: {e}", exc_info=True)
raise
async def _setup_parent_child_relationships(
self,
parent_tenant_id: str,
child_tenant_ids: List[str]
):
"""Set up parent-child tenant relationships"""
try:
for child_id in child_tenant_ids:
# Update child tenant to have parent reference
await self.tenant_client.update_tenant(
tenant_id=child_id,
updates={
'parent_tenant_id': parent_tenant_id,
'tenant_type': 'child',
'hierarchy_path': f"{parent_tenant_id}.{child_id}"
}
)
# Update parent tenant
await self.tenant_client.update_tenant(
tenant_id=parent_tenant_id,
updates={
'tenant_type': 'parent',
'hierarchy_path': parent_tenant_id # Root path
}
)
logger.info("Parent-child relationships established",
parent_tenant_id=parent_tenant_id,
child_count=len(child_tenant_ids))
except Exception as e:
logger.error(f"Error setting up parent-child relationships: {e}", exc_info=True)
raise
async def _setup_distribution_for_enterprise(
self,
parent_tenant_id: str,
child_tenant_ids: List[str]
):
"""Set up distribution routes and schedules for the enterprise network"""
try:
# In a real implementation, this would call the distribution service
# to set up default delivery routes and schedules between parent and children
logger.info("Setting up distribution for enterprise network",
parent_tenant_id=parent_tenant_id,
child_count=len(child_tenant_ids))
except Exception as e:
logger.error(f"Error setting up distribution: {e}", exc_info=True)
raise
async def _generate_initial_internal_transfers(
self,
parent_tenant_id: str,
child_tenant_ids: List[str]
):
"""Generate initial internal transfer orders for demo"""
try:
for child_id in child_tenant_ids:
# Generate initial internal purchase orders from parent to child
# This would typically be done through the procurement service
logger.info("Generated initial internal transfer order",
parent_tenant_id=parent_tenant_id,
child_tenant_id=child_id)
except Exception as e:
logger.error(f"Error generating initial internal transfers: {e}", exc_info=True)
raise

View File

@@ -1,88 +1,60 @@
"""
Orchestration Notification Service
Orchestration Notification Service - Simplified
Emits informational notifications for orchestration events:
- orchestration_run_started: When an orchestration run begins
- orchestration_run_completed: When an orchestration run finishes successfully
- action_created: When the orchestrator creates an action (PO, batch, adjustment)
These are NOTIFICATIONS (not alerts) - informational state changes that don't require user action.
Emits minimal events using EventPublisher.
All enrichment handled by alert_processor.
"""
import logging
from datetime import datetime, timezone
from typing import Optional, Dict, Any, List
from sqlalchemy.orm import Session
from typing import Optional, Dict, Any
from uuid import UUID
import structlog
from shared.schemas.event_classification import RawEvent, EventClass, EventDomain
from shared.alerts.base_service import BaseAlertService
from shared.messaging import UnifiedEventPublisher
logger = structlog.get_logger()
logger = logging.getLogger(__name__)
class OrchestrationNotificationService(BaseAlertService):
class OrchestrationNotificationService:
"""
Service for emitting orchestration notifications (informational state changes).
Service for emitting orchestration notifications using EventPublisher.
"""
def __init__(self, rabbitmq_url: str = None):
super().__init__(service_name="orchestrator", rabbitmq_url=rabbitmq_url)
def __init__(self, event_publisher: UnifiedEventPublisher):
self.publisher = event_publisher
async def emit_orchestration_run_started_notification(
self,
db: Session,
tenant_id: str,
tenant_id: UUID,
run_id: str,
run_type: str, # 'scheduled', 'manual', 'triggered'
scope: str, # 'full', 'inventory_only', 'production_only'
) -> None:
"""
Emit notification when an orchestration run starts.
Args:
db: Database session
tenant_id: Tenant ID
run_id: Orchestration run ID
run_type: Type of run
scope: Scope of run
"""
try:
event = RawEvent(
tenant_id=tenant_id,
event_class=EventClass.NOTIFICATION,
event_domain=EventDomain.OPERATIONS,
event_type="orchestration_run_started",
title="Orchestration Started",
message=f"AI orchestration run started ({run_type}, scope: {scope})",
service="orchestrator",
event_metadata={
"run_id": run_id,
"run_type": run_type,
"scope": scope,
"started_at": datetime.now(timezone.utc).isoformat(),
},
timestamp=datetime.now(timezone.utc),
)
metadata = {
"run_id": run_id,
"run_type": run_type,
"scope": scope,
"started_at": datetime.now(timezone.utc).isoformat(),
}
await self.publish_item(tenant_id, event.dict(), item_type="notification")
await self.publisher.publish_notification(
event_type="operations.orchestration_run_started",
tenant_id=tenant_id,
data=metadata
)
logger.info(
f"Orchestration run started notification emitted: {run_id}",
extra={"tenant_id": tenant_id, "run_id": run_id}
)
except Exception as e:
logger.error(
f"Failed to emit orchestration run started notification: {e}",
extra={"tenant_id": tenant_id, "run_id": run_id},
exc_info=True,
)
logger.info(
"orchestration_run_started_notification_emitted",
tenant_id=str(tenant_id),
run_id=run_id
)
async def emit_orchestration_run_completed_notification(
self,
db: Session,
tenant_id: str,
tenant_id: UUID,
run_id: str,
duration_seconds: float,
actions_created: int,
@@ -91,63 +63,39 @@ class OrchestrationNotificationService(BaseAlertService):
) -> None:
"""
Emit notification when an orchestration run completes.
Args:
db: Database session
tenant_id: Tenant ID
run_id: Orchestration run ID
duration_seconds: Run duration
actions_created: Total actions created
actions_by_type: Breakdown of actions by type
status: Run status (success, partial, failed)
"""
try:
# Build message with action summary
if actions_created == 0:
message = "No actions needed"
else:
action_summary = ", ".join([f"{count} {action_type}" for action_type, count in actions_by_type.items()])
message = f"Created {actions_created} actions: {action_summary}"
# Build message with action summary
if actions_created == 0:
action_summary = "No actions needed"
else:
action_summary = ", ".join([f"{count} {action_type}" for action_type, count in actions_by_type.items()])
message += f" ({duration_seconds:.1f}s)"
metadata = {
"run_id": run_id,
"status": status,
"duration_seconds": float(duration_seconds),
"actions_created": actions_created,
"actions_by_type": actions_by_type,
"action_summary": action_summary,
"completed_at": datetime.now(timezone.utc).isoformat(),
}
event = RawEvent(
tenant_id=tenant_id,
event_class=EventClass.NOTIFICATION,
event_domain=EventDomain.OPERATIONS,
event_type="orchestration_run_completed",
title=f"Orchestration Completed: {status.title()}",
message=message,
service="orchestrator",
event_metadata={
"run_id": run_id,
"status": status,
"duration_seconds": duration_seconds,
"actions_created": actions_created,
"actions_by_type": actions_by_type,
"completed_at": datetime.now(timezone.utc).isoformat(),
},
timestamp=datetime.now(timezone.utc),
)
await self.publisher.publish_notification(
event_type="operations.orchestration_run_completed",
tenant_id=tenant_id,
data=metadata
)
await self.publish_item(tenant_id, event.dict(), item_type="notification")
logger.info(
f"Orchestration run completed notification emitted: {run_id} ({actions_created} actions)",
extra={"tenant_id": tenant_id, "run_id": run_id}
)
except Exception as e:
logger.error(
f"Failed to emit orchestration run completed notification: {e}",
extra={"tenant_id": tenant_id, "run_id": run_id},
exc_info=True,
)
logger.info(
"orchestration_run_completed_notification_emitted",
tenant_id=str(tenant_id),
run_id=run_id,
actions_created=actions_created
)
async def emit_action_created_notification(
self,
db: Session,
tenant_id: str,
tenant_id: UUID,
run_id: str,
action_id: str,
action_type: str, # 'purchase_order', 'production_batch', 'inventory_adjustment'
@@ -157,70 +105,33 @@ class OrchestrationNotificationService(BaseAlertService):
) -> None:
"""
Emit notification when the orchestrator creates an action.
Args:
db: Database session
tenant_id: Tenant ID
run_id: Orchestration run ID
action_id: Created action ID
action_type: Type of action
action_details: Action-specific details
reason: Reason for creating action
estimated_impact: Estimated impact (optional)
"""
try:
# Build title and message based on action type
if action_type == "purchase_order":
title = f"Purchase Order Created: {action_details.get('supplier_name', 'Unknown')}"
message = f"Ordered {action_details.get('items_count', 0)} items - {reason}"
elif action_type == "production_batch":
title = f"Production Batch Scheduled: {action_details.get('product_name', 'Unknown')}"
message = f"Scheduled {action_details.get('quantity', 0)} {action_details.get('unit', 'units')} - {reason}"
elif action_type == "inventory_adjustment":
title = f"Inventory Adjustment: {action_details.get('ingredient_name', 'Unknown')}"
message = f"Adjusted by {action_details.get('quantity', 0)} {action_details.get('unit', 'units')} - {reason}"
else:
title = f"Action Created: {action_type}"
message = reason
metadata = {
"run_id": run_id,
"action_id": action_id,
"action_type": action_type,
"action_details": action_details,
"reason": reason,
"estimated_impact": estimated_impact,
"created_at": datetime.now(timezone.utc).isoformat(),
}
event = RawEvent(
tenant_id=tenant_id,
event_class=EventClass.NOTIFICATION,
event_domain=EventDomain.OPERATIONS,
event_type="action_created",
title=title,
message=message,
service="orchestrator",
event_metadata={
"run_id": run_id,
"action_id": action_id,
"action_type": action_type,
"action_details": action_details,
"reason": reason,
"estimated_impact": estimated_impact,
"created_at": datetime.now(timezone.utc).isoformat(),
},
timestamp=datetime.now(timezone.utc),
)
await self.publisher.publish_notification(
event_type="operations.action_created",
tenant_id=tenant_id,
data=metadata
)
await self.publish_item(tenant_id, event.dict(), item_type="notification")
logger.info(
f"Action created notification emitted: {action_type} - {action_id}",
extra={"tenant_id": tenant_id, "action_id": action_id}
)
except Exception as e:
logger.error(
f"Failed to emit action created notification: {e}",
extra={"tenant_id": tenant_id, "action_id": action_id},
exc_info=True,
)
logger.info(
"action_created_notification_emitted",
tenant_id=str(tenant_id),
action_id=action_id,
action_type=action_type
)
async def emit_action_completed_notification(
self,
db: Session,
tenant_id: str,
tenant_id: UUID,
action_id: str,
action_type: str,
action_status: str, # 'approved', 'completed', 'rejected', 'cancelled'
@@ -228,48 +139,24 @@ class OrchestrationNotificationService(BaseAlertService):
) -> None:
"""
Emit notification when an orchestrator action is completed/resolved.
Args:
db: Database session
tenant_id: Tenant ID
action_id: Action ID
action_type: Type of action
action_status: Final status
completed_by: Who completed it (optional)
"""
try:
message = f"{action_type.replace('_', ' ').title()}: {action_status}"
if completed_by:
message += f" by {completed_by}"
metadata = {
"action_id": action_id,
"action_type": action_type,
"action_status": action_status,
"completed_by": completed_by,
"completed_at": datetime.now(timezone.utc).isoformat(),
}
event = RawEvent(
tenant_id=tenant_id,
event_class=EventClass.NOTIFICATION,
event_domain=EventDomain.OPERATIONS,
event_type="action_completed",
title=f"Action {action_status.title()}",
message=message,
service="orchestrator",
event_metadata={
"action_id": action_id,
"action_type": action_type,
"action_status": action_status,
"completed_by": completed_by,
"completed_at": datetime.now(timezone.utc).isoformat(),
},
timestamp=datetime.now(timezone.utc),
)
await self.publisher.publish_notification(
event_type="operations.action_completed",
tenant_id=tenant_id,
data=metadata
)
await self.publish_item(tenant_id, event.dict(), item_type="notification")
logger.info(
f"Action completed notification emitted: {action_id} ({action_status})",
extra={"tenant_id": tenant_id, "action_id": action_id}
)
except Exception as e:
logger.error(
f"Failed to emit action completed notification: {e}",
extra={"tenant_id": tenant_id, "action_id": action_id},
exc_info=True,
)
logger.info(
"action_completed_notification_emitted",
tenant_id=str(tenant_id),
action_id=action_id,
action_status=action_status
)

View File

@@ -1,9 +1,9 @@
"""
Orchestrator Scheduler Service - REFACTORED
Coordinates daily auto-generation workflow: Forecasting → Production → Procurement → Notifications
Coordinates daily auto-generation workflow: Forecasting → Production → Procurement
CHANGES FROM ORIGINAL:
- Removed all TODO/stub code
- Updated to use new EventPublisher pattern for all messaging
- Integrated OrchestrationSaga for error handling and compensation
- Added circuit breakers for all service calls
- Implemented real Forecasting Service integration
@@ -21,7 +21,8 @@ from typing import List, Dict, Any, Optional
import structlog
from apscheduler.triggers.cron import CronTrigger
from shared.alerts.base_service import BaseAlertService
# Updated imports - removed old alert system
from shared.messaging import UnifiedEventPublisher
from shared.clients.forecast_client import ForecastServiceClient
from shared.clients.production_client import ProductionServiceClient
from shared.clients.procurement_client import ProcurementServiceClient
@@ -40,14 +41,15 @@ from app.services.orchestration_saga import OrchestrationSaga
logger = structlog.get_logger()
class OrchestratorSchedulerService(BaseAlertService):
class OrchestratorSchedulerService:
"""
Orchestrator Service extending BaseAlertService
Orchestrator Service using EventPublisher for messaging
Handles automated daily orchestration of forecasting, production, and procurement
"""
def __init__(self, config):
super().__init__(config)
def __init__(self, event_publisher: UnifiedEventPublisher, config):
self.publisher = event_publisher
self.config = config
# Service clients
self.forecast_client = ForecastServiceClient(config, "orchestrator-service")
@@ -98,47 +100,149 @@ class OrchestratorSchedulerService(BaseAlertService):
success_threshold=2
)
def setup_scheduled_checks(self):
async def emit_orchestration_run_started(
self,
tenant_id: uuid.UUID,
run_id: str,
run_type: str, # 'scheduled', 'manual', 'triggered'
scope: str, # 'full', 'inventory_only', 'production_only'
):
"""
Configure scheduled orchestration jobs
Runs daily at 5:30 AM (configured via ORCHESTRATION_SCHEDULE)
Emit notification when an orchestration run starts.
"""
# Parse cron schedule from config (default: "30 5 * * *" = 5:30 AM daily)
cron_parts = settings.ORCHESTRATION_SCHEDULE.split()
if len(cron_parts) == 5:
minute, hour, day, month, day_of_week = cron_parts
else:
# Fallback to default
minute, hour, day, month, day_of_week = "30", "5", "*", "*", "*"
metadata = {
"run_id": run_id,
"run_type": run_type,
"scope": scope,
"started_at": datetime.now(timezone.utc).isoformat(),
}
# Schedule daily orchestration
self.scheduler.add_job(
func=self.run_daily_orchestration,
trigger=CronTrigger(
minute=minute,
hour=hour,
day=day,
month=month,
day_of_week=day_of_week
),
id="daily_orchestration",
name="Daily Orchestration (Forecasting → Production → Procurement)",
misfire_grace_time=300, # 5 minutes grace period
max_instances=1 # Only one instance running at a time
await self.publisher.publish_notification(
event_type="operations.orchestration_run_started",
tenant_id=tenant_id,
data=metadata
)
logger.info("Orchestrator scheduler configured",
schedule=settings.ORCHESTRATION_SCHEDULE)
logger.info(
"orchestration_run_started_notification_emitted",
tenant_id=str(tenant_id),
run_id=run_id
)
async def emit_orchestration_run_completed(
self,
tenant_id: uuid.UUID,
run_id: str,
duration_seconds: float,
actions_created: int,
actions_by_type: Dict[str, int], # e.g., {'purchase_order': 2, 'production_batch': 3}
status: str = "success",
):
"""
Emit notification when an orchestration run completes.
"""
# Build message with action summary
if actions_created == 0:
action_summary = "No actions needed"
else:
action_summary = ", ".join([f"{count} {action_type}" for action_type, count in actions_by_type.items()])
metadata = {
"run_id": run_id,
"status": status,
"duration_seconds": float(duration_seconds),
"actions_created": actions_created,
"actions_by_type": actions_by_type,
"action_summary": action_summary,
"completed_at": datetime.now(timezone.utc).isoformat(),
}
await self.publisher.publish_notification(
event_type="operations.orchestration_run_completed",
tenant_id=tenant_id,
data=metadata
)
logger.info(
"orchestration_run_completed_notification_emitted",
tenant_id=str(tenant_id),
run_id=run_id,
actions_created=actions_created
)
async def emit_action_created_notification(
self,
tenant_id: uuid.UUID,
run_id: str,
action_id: str,
action_type: str, # 'purchase_order', 'production_batch', 'inventory_adjustment'
action_details: Dict[str, Any], # Type-specific details
reason: str,
estimated_impact: Optional[Dict[str, Any]] = None,
):
"""
Emit notification when the orchestrator creates an action.
"""
metadata = {
"run_id": run_id,
"action_id": action_id,
"action_type": action_type,
"action_details": action_details,
"reason": reason,
"estimated_impact": estimated_impact,
"created_at": datetime.now(timezone.utc).isoformat(),
}
await self.publisher.publish_notification(
event_type="operations.action_created",
tenant_id=tenant_id,
data=metadata
)
logger.info(
"action_created_notification_emitted",
tenant_id=str(tenant_id),
action_id=action_id,
action_type=action_type
)
async def emit_action_completed_notification(
self,
tenant_id: uuid.UUID,
action_id: str,
action_type: str,
action_status: str, # 'approved', 'completed', 'rejected', 'cancelled'
completed_by: Optional[str] = None,
):
"""
Emit notification when an orchestrator action is completed/resolved.
"""
metadata = {
"action_id": action_id,
"action_type": action_type,
"action_status": action_status,
"completed_by": completed_by,
"completed_at": datetime.now(timezone.utc).isoformat(),
}
await self.publisher.publish_notification(
event_type="operations.action_completed",
tenant_id=tenant_id,
data=metadata
)
logger.info(
"action_completed_notification_emitted",
tenant_id=str(tenant_id),
action_id=action_id,
action_status=action_status
)
async def run_daily_orchestration(self):
"""
Main orchestration workflow - runs daily
Executes for all active tenants in parallel (with limits)
"""
if not self.is_leader:
logger.debug("Not leader, skipping orchestration")
return
if not settings.ORCHESTRATION_ENABLED:
logger.info("Orchestration disabled via config")
return
@@ -188,7 +292,7 @@ class OrchestratorSchedulerService(BaseAlertService):
logger.info("Starting orchestration for tenant", tenant_id=str(tenant_id))
# Create orchestration run record
async with self.db_manager.get_session() as session:
async with self.config.database_manager.get_session() as session:
repo = OrchestrationRunRepository(session)
run_number = await repo.generate_run_number()
@@ -204,6 +308,14 @@ class OrchestratorSchedulerService(BaseAlertService):
run_id = run.id
try:
# Emit orchestration started event
await self.emit_orchestration_run_started(
tenant_id=tenant_id,
run_id=str(run_id),
run_type='scheduled',
scope='full'
)
# Set timeout for entire tenant orchestration
async with asyncio.timeout(settings.TENANT_TIMEOUT_SECONDS):
# Execute orchestration using Saga pattern
@@ -241,6 +353,16 @@ class OrchestratorSchedulerService(BaseAlertService):
result
)
# Emit orchestration completed event
await self.emit_orchestration_run_completed(
tenant_id=tenant_id,
run_id=str(run_id),
duration_seconds=result.get('duration_seconds', 0),
actions_created=result.get('total_actions', 0),
actions_by_type=result.get('actions_by_type', {}),
status='success'
)
logger.info("Tenant orchestration completed successfully",
tenant_id=str(tenant_id), run_id=str(run_id))
return True
@@ -250,6 +372,17 @@ class OrchestratorSchedulerService(BaseAlertService):
run_id,
result.get('error', 'Saga execution failed')
)
# Emit orchestration failed event
await self.emit_orchestration_run_completed(
tenant_id=tenant_id,
run_id=str(run_id),
duration_seconds=result.get('duration_seconds', 0),
actions_created=0,
actions_by_type={},
status='failed'
)
return False
except asyncio.TimeoutError:
@@ -318,7 +451,7 @@ class OrchestratorSchedulerService(BaseAlertService):
run_id: Orchestration run ID
saga_result: Result from saga execution
"""
async with self.db_manager.get_session() as session:
async with self.config.database_manager.get_session() as session:
repo = OrchestrationRunRepository(session)
run = await repo.get_run_by_id(run_id)
@@ -489,7 +622,7 @@ class OrchestratorSchedulerService(BaseAlertService):
async def _mark_orchestration_failed(self, run_id: uuid.UUID, error_message: str):
"""Mark orchestration run as failed"""
async with self.db_manager.get_session() as session:
async with self.config.database_manager.get_session() as session:
repo = OrchestrationRunRepository(session)
run = await repo.get_run_by_id(run_id)
@@ -535,6 +668,16 @@ class OrchestratorSchedulerService(BaseAlertService):
'message': 'Orchestration completed' if success else 'Orchestration failed'
}
async def start(self):
"""Start the orchestrator scheduler service"""
logger.info("OrchestratorSchedulerService started")
# Add any initialization logic here if needed
async def stop(self):
"""Stop the orchestrator scheduler service"""
logger.info("OrchestratorSchedulerService stopped")
# Add any cleanup logic here if needed
def get_circuit_breaker_stats(self) -> Dict[str, Any]:
"""Get circuit breaker statistics for monitoring"""
return {
@@ -545,4 +688,4 @@ class OrchestratorSchedulerService(BaseAlertService):
'inventory_service': self.inventory_breaker.get_stats(),
'suppliers_service': self.suppliers_breaker.get_stats(),
'recipes_service': self.recipes_breaker.get_stats()
}
}