New enterprise feature
This commit is contained in:
@@ -37,6 +37,15 @@ The **Forecasting Service** is the AI brain of the Bakery-IA platform, providing
|
||||
- **Comprehensive Metrics** - MAE, MAPE, RMSE, R², accuracy percentage by product/location
|
||||
- **Audit Trail** - Complete history of all validations and model improvements
|
||||
|
||||
### 🆕 Enterprise Tier: Network Demand Aggregation (NEW)
|
||||
- **Parent-Level Aggregation** - Consolidated demand forecasts across all child outlets for centralized production planning
|
||||
- **Child Contribution Tracking** - Track each outlet's contribution to total network demand
|
||||
- **Redis Caching Strategy** - 1-hour TTL for enterprise forecasts to balance freshness vs performance
|
||||
- **Intelligent Rollup** - Aggregate child forecasts with parent-specific demand for complete visibility
|
||||
- **Network-Wide Insights** - Total production needs, capacity requirements, distribution planning support
|
||||
- **Hierarchical Forecasting** - Generate forecasts at both individual outlet and network levels
|
||||
- **Subscription Gating** - Enterprise aggregation requires Enterprise tier validation
|
||||
|
||||
### Intelligent Alerting
|
||||
- **Low Demand Alerts** - Automatic notifications for unusually low predicted demand
|
||||
- **High Demand Alerts** - Warnings for demand spikes requiring extra production
|
||||
@@ -257,6 +266,11 @@ Event-Driven Validation
|
||||
- `POST /webhooks/pos-sync-completed` - Receive POS sync completion events
|
||||
- `GET /webhooks/health` - Webhook health check
|
||||
|
||||
### 🆕 Enterprise Aggregation (NEW)
|
||||
- `GET /api/v1/{parent_tenant}/forecasting/enterprise/network-forecast` - Get aggregated network forecast (parent + all children)
|
||||
- `GET /api/v1/{parent_tenant}/forecasting/enterprise/child-contributions` - Get each child's contribution to total demand
|
||||
- `GET /api/v1/{parent_tenant}/forecasting/enterprise/production-requirements` - Calculate total production needs for network
|
||||
|
||||
### Predictions
|
||||
- `GET /api/v1/forecasting/predictions/daily` - Get today's predictions
|
||||
- `GET /api/v1/forecasting/predictions/daily/{date}` - Get predictions for specific date
|
||||
@@ -391,6 +405,53 @@ TTL: 86400 # 24 hours
|
||||
}
|
||||
```
|
||||
|
||||
### 🆕 Enterprise Network Events (NEW)
|
||||
|
||||
**Exchange**: `forecasting.enterprise`
|
||||
**Routing Key**: `forecasting.enterprise.network_forecast_generated`
|
||||
|
||||
**Network Forecast Generated Event** - Published when aggregated network forecast is calculated
|
||||
```json
|
||||
{
|
||||
"event_id": "uuid",
|
||||
"event_type": "network_forecast_generated",
|
||||
"service_name": "forecasting",
|
||||
"timestamp": "2025-11-12T10:30:00Z",
|
||||
"data": {
|
||||
"parent_tenant_id": "uuid",
|
||||
"forecast_date": "2025-11-14",
|
||||
"total_network_demand": {
|
||||
"product_id": "uuid",
|
||||
"product_name": "Pan de Molde",
|
||||
"total_quantity": 250.0,
|
||||
"unit": "kg"
|
||||
},
|
||||
"child_contributions": [
|
||||
{
|
||||
"child_tenant_id": "uuid",
|
||||
"child_name": "Outlet Centro",
|
||||
"quantity": 80.0,
|
||||
"percentage": 32.0
|
||||
},
|
||||
{
|
||||
"child_tenant_id": "uuid",
|
||||
"child_name": "Outlet Norte",
|
||||
"quantity": 90.0,
|
||||
"percentage": 36.0
|
||||
},
|
||||
{
|
||||
"child_tenant_id": "uuid",
|
||||
"child_name": "Outlet Sur",
|
||||
"quantity": 80.0,
|
||||
"percentage": 32.0
|
||||
}
|
||||
],
|
||||
"parent_demand": 50.0,
|
||||
"cache_ttl_seconds": 3600
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Custom Metrics (Prometheus)
|
||||
|
||||
```python
|
||||
@@ -567,6 +628,7 @@ poi_features = await poi_service.fetch_poi_features(tenant_id)
|
||||
- **Sales Service** - Fetch historical sales data for training
|
||||
- **External Service** - Fetch weather, traffic, holiday, and POI feature data
|
||||
- **Training Service** - Load trained Prophet models
|
||||
- **🆕 Tenant Service** (NEW) - Fetch tenant hierarchy for enterprise aggregation (parent/child relationships)
|
||||
- **Redis** - Cache predictions and session data
|
||||
- **PostgreSQL** - Store forecasts and performance metrics
|
||||
- **RabbitMQ** - Publish alert events
|
||||
@@ -577,6 +639,8 @@ poi_features = await poi_service.fetch_poi_features(tenant_id)
|
||||
- **Orchestrator Service** - Trigger daily forecast generation
|
||||
- **Frontend Dashboard** - Display forecasts and charts
|
||||
- **AI Insights Service** - Analyze forecast patterns
|
||||
- **🆕 Distribution Service** (NEW) - Network forecasts inform delivery route capacity planning
|
||||
- **🆕 Orchestrator Enterprise Dashboard** (NEW) - Displays aggregated network demand for parent tenants
|
||||
|
||||
## ML Model Performance
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ from .historical_validation import router as historical_validation_router
|
||||
from .webhooks import router as webhooks_router
|
||||
from .performance_monitoring import router as performance_monitoring_router
|
||||
from .retraining import router as retraining_router
|
||||
from .enterprise_forecasting import router as enterprise_forecasting_router
|
||||
|
||||
|
||||
__all__ = [
|
||||
@@ -22,4 +23,5 @@ __all__ = [
|
||||
"webhooks_router",
|
||||
"performance_monitoring_router",
|
||||
"retraining_router",
|
||||
"enterprise_forecasting_router",
|
||||
]
|
||||
108
services/forecasting/app/api/enterprise_forecasting.py
Normal file
108
services/forecasting/app/api/enterprise_forecasting.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""
|
||||
Enterprise forecasting API endpoints
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from typing import Optional
|
||||
from datetime import date
|
||||
import structlog
|
||||
|
||||
from app.services.enterprise_forecasting_service import EnterpriseForecastingService
|
||||
from shared.auth.tenant_access import verify_tenant_permission_dep
|
||||
from shared.clients import get_forecast_client, get_tenant_client
|
||||
import shared.redis_utils
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter()
|
||||
|
||||
# Global Redis client
|
||||
_redis_client = None
|
||||
|
||||
|
||||
async def get_forecasting_redis_client():
|
||||
"""Get or create Redis client"""
|
||||
global _redis_client
|
||||
try:
|
||||
if _redis_client is None:
|
||||
_redis_client = await shared.redis_utils.initialize_redis(settings.REDIS_URL)
|
||||
logger.info("Redis client initialized for enterprise forecasting")
|
||||
return _redis_client
|
||||
except Exception as e:
|
||||
logger.warning("Failed to initialize Redis client, enterprise forecasting will work with limited functionality", error=str(e))
|
||||
return None
|
||||
|
||||
|
||||
async def get_enterprise_forecasting_service(
|
||||
redis_client = Depends(get_forecasting_redis_client)
|
||||
) -> EnterpriseForecastingService:
|
||||
"""Dependency injection for EnterpriseForecastingService"""
|
||||
forecast_client = get_forecast_client(settings, "forecasting-service")
|
||||
tenant_client = get_tenant_client(settings, "forecasting-service")
|
||||
return EnterpriseForecastingService(
|
||||
forecast_client=forecast_client,
|
||||
tenant_client=tenant_client,
|
||||
redis_client=redis_client
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/forecasting/enterprise/aggregated")
|
||||
async def get_aggregated_forecast(
|
||||
tenant_id: str,
|
||||
start_date: date = Query(..., description="Start date for forecast aggregation"),
|
||||
end_date: date = Query(..., description="End date for forecast aggregation"),
|
||||
product_id: Optional[str] = Query(None, description="Optional product ID to filter by"),
|
||||
enterprise_forecasting_service: EnterpriseForecastingService = Depends(get_enterprise_forecasting_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get aggregated forecasts across parent and child tenants
|
||||
"""
|
||||
try:
|
||||
# Check if this tenant is a parent tenant
|
||||
tenant_info = await enterprise_forecasting_service.tenant_client.get_tenant(tenant_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access aggregated enterprise forecasts"
|
||||
)
|
||||
|
||||
result = await enterprise_forecasting_service.get_aggregated_forecast(
|
||||
parent_tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
product_id=product_id
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get aggregated forecast: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/tenants/{tenant_id}/forecasting/enterprise/network-performance")
|
||||
async def get_network_performance_metrics(
|
||||
tenant_id: str,
|
||||
start_date: date = Query(..., description="Start date for metrics"),
|
||||
end_date: date = Query(..., description="End date for metrics"),
|
||||
enterprise_forecasting_service: EnterpriseForecastingService = Depends(get_enterprise_forecasting_service),
|
||||
verified_tenant: str = Depends(verify_tenant_permission_dep)
|
||||
):
|
||||
"""
|
||||
Get aggregated performance metrics across tenant network
|
||||
"""
|
||||
try:
|
||||
# Check if this tenant is a parent tenant
|
||||
tenant_info = await enterprise_forecasting_service.tenant_client.get_tenant(tenant_id)
|
||||
if tenant_info.get('tenant_type') != 'parent':
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Only parent tenants can access network performance metrics"
|
||||
)
|
||||
|
||||
result = await enterprise_forecasting_service.get_network_performance_metrics(
|
||||
parent_tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get network performance: {str(e)}")
|
||||
@@ -23,17 +23,14 @@ from app.models.forecasts import Forecast, PredictionBatch
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(prefix="/internal/demo", tags=["internal"])
|
||||
|
||||
# Internal API key for service-to-service auth
|
||||
INTERNAL_API_KEY = os.getenv("INTERNAL_API_KEY", "dev-internal-key-change-in-production")
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_SAN_PABLO = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
DEMO_TENANT_LA_ESPIGA = "b2c3d4e5-f6a7-48b9-c0d1-e2f3a4b5c6d7"
|
||||
DEMO_TENANT_PROFESSIONAL = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
|
||||
|
||||
def verify_internal_api_key(x_internal_api_key: Optional[str] = Header(None)):
|
||||
"""Verify internal API key for service-to-service communication"""
|
||||
if x_internal_api_key != INTERNAL_API_KEY:
|
||||
from app.core.config import settings
|
||||
if x_internal_api_key != settings.INTERNAL_API_KEY:
|
||||
logger.warning("Unauthorized internal API access attempted")
|
||||
raise HTTPException(status_code=403, detail="Invalid internal API key")
|
||||
return True
|
||||
|
||||
178
services/forecasting/app/consumers/forecast_event_consumer.py
Normal file
178
services/forecasting/app/consumers/forecast_event_consumer.py
Normal file
@@ -0,0 +1,178 @@
|
||||
"""
|
||||
Forecast event consumer for the forecasting service
|
||||
Handles events that should trigger cache invalidation for aggregated forecasts
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
import json
|
||||
import redis.asyncio as redis
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ForecastEventConsumer:
|
||||
"""
|
||||
Consumer for forecast events that may trigger cache invalidation
|
||||
"""
|
||||
|
||||
def __init__(self, redis_client: redis.Redis):
|
||||
self.redis_client = redis_client
|
||||
|
||||
async def handle_forecast_updated(self, event_data: Dict[str, Any]):
|
||||
"""
|
||||
Handle forecast updated event
|
||||
Invalidate parent tenant's aggregated forecast cache if this tenant is a child
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Handling forecast updated event: {event_data}")
|
||||
|
||||
tenant_id = event_data.get('tenant_id')
|
||||
forecast_date = event_data.get('forecast_date')
|
||||
product_id = event_data.get('product_id')
|
||||
updated_at = event_data.get('updated_at', None)
|
||||
|
||||
if not tenant_id:
|
||||
logger.error("Missing tenant_id in forecast event")
|
||||
return
|
||||
|
||||
# Check if this tenant is a child tenant (has parent)
|
||||
# In a real implementation, this would call the tenant service to check hierarchy
|
||||
parent_tenant_id = await self._get_parent_tenant_id(tenant_id)
|
||||
|
||||
if parent_tenant_id:
|
||||
# Invalidate parent's aggregated forecast cache
|
||||
await self._invalidate_parent_aggregated_cache(
|
||||
parent_tenant_id=parent_tenant_id,
|
||||
child_tenant_id=tenant_id,
|
||||
forecast_date=forecast_date,
|
||||
product_id=product_id
|
||||
)
|
||||
|
||||
logger.info(f"Forecast updated event processed for tenant {tenant_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling forecast updated event: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def handle_forecast_created(self, event_data: Dict[str, Any]):
|
||||
"""
|
||||
Handle forecast created event
|
||||
Similar to update, may affect parent tenant's aggregated forecasts
|
||||
"""
|
||||
await self.handle_forecast_updated(event_data)
|
||||
|
||||
async def handle_forecast_deleted(self, event_data: Dict[str, Any]):
|
||||
"""
|
||||
Handle forecast deleted event
|
||||
Similar to update, may affect parent tenant's aggregated forecasts
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Handling forecast deleted event: {event_data}")
|
||||
|
||||
tenant_id = event_data.get('tenant_id')
|
||||
forecast_date = event_data.get('forecast_date')
|
||||
product_id = event_data.get('product_id')
|
||||
|
||||
if not tenant_id:
|
||||
logger.error("Missing tenant_id in forecast delete event")
|
||||
return
|
||||
|
||||
# Check if this tenant is a child tenant
|
||||
parent_tenant_id = await self._get_parent_tenant_id(tenant_id)
|
||||
|
||||
if parent_tenant_id:
|
||||
# Invalidate parent's aggregated forecast cache
|
||||
await self._invalidate_parent_aggregated_cache(
|
||||
parent_tenant_id=parent_tenant_id,
|
||||
child_tenant_id=tenant_id,
|
||||
forecast_date=forecast_date,
|
||||
product_id=product_id
|
||||
)
|
||||
|
||||
logger.info(f"Forecast deleted event processed for tenant {tenant_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling forecast deleted event: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def _get_parent_tenant_id(self, tenant_id: str) -> Optional[str]:
|
||||
"""
|
||||
Get parent tenant ID for a child tenant
|
||||
In a real implementation, this would call the tenant service
|
||||
"""
|
||||
# This is a placeholder implementation
|
||||
# In real implementation, this would use TenantServiceClient to get tenant hierarchy
|
||||
try:
|
||||
# Simulate checking tenant hierarchy
|
||||
# In real implementation: return await self.tenant_client.get_parent_tenant_id(tenant_id)
|
||||
|
||||
# For now, we'll return a placeholder implementation that would check the database
|
||||
# This is just a simulation of the actual implementation needed
|
||||
return None # Placeholder - real implementation needed
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting parent tenant ID for {tenant_id}: {e}")
|
||||
return None
|
||||
|
||||
async def _invalidate_parent_aggregated_cache(
|
||||
self,
|
||||
parent_tenant_id: str,
|
||||
child_tenant_id: str,
|
||||
forecast_date: Optional[str] = None,
|
||||
product_id: Optional[str] = None
|
||||
):
|
||||
"""
|
||||
Invalidate parent tenant's aggregated forecast cache
|
||||
"""
|
||||
try:
|
||||
# Pattern to match all aggregated forecast cache keys for this parent
|
||||
# Format: agg_forecast:{parent_tenant_id}:{start_date}:{end_date}:{product_id}
|
||||
pattern = f"agg_forecast:{parent_tenant_id}:*:*:*"
|
||||
|
||||
# Find all matching keys and delete them
|
||||
keys_to_delete = []
|
||||
async for key in self.redis_client.scan_iter(match=pattern):
|
||||
if isinstance(key, bytes):
|
||||
key = key.decode('utf-8')
|
||||
keys_to_delete.append(key)
|
||||
|
||||
if keys_to_delete:
|
||||
await self.redis_client.delete(*keys_to_delete)
|
||||
logger.info(f"Invalidated {len(keys_to_delete)} aggregated forecast cache entries for parent tenant {parent_tenant_id}")
|
||||
else:
|
||||
logger.info(f"No aggregated forecast cache entries found to invalidate for parent tenant {parent_tenant_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error invalidating parent aggregated cache: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def handle_tenant_hierarchy_changed(self, event_data: Dict[str, Any]):
|
||||
"""
|
||||
Handle tenant hierarchy change event
|
||||
This could be when a tenant becomes a child of another, or when the hierarchy changes
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Handling tenant hierarchy change event: {event_data}")
|
||||
|
||||
tenant_id = event_data.get('tenant_id')
|
||||
parent_tenant_id = event_data.get('parent_tenant_id')
|
||||
action = event_data.get('action') # 'added', 'removed', 'changed'
|
||||
|
||||
# Invalidate any cached aggregated forecasts that might be affected
|
||||
if parent_tenant_id:
|
||||
# If this child tenant changed, invalidate parent's cache
|
||||
await self._invalidate_parent_aggregated_cache(
|
||||
parent_tenant_id=parent_tenant_id,
|
||||
child_tenant_id=tenant_id
|
||||
)
|
||||
|
||||
# If this was a former parent tenant that's no longer a parent,
|
||||
# its aggregated cache might need to be invalidated differently
|
||||
if action == 'removed' and event_data.get('was_parent'):
|
||||
# Invalidate its own aggregated cache since it's no longer a parent
|
||||
# This would be handled by tenant service events
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling tenant hierarchy change event: {e}", exc_info=True)
|
||||
raise
|
||||
@@ -15,7 +15,7 @@ from app.services.forecasting_alert_service import ForecastingAlertService
|
||||
from shared.service_base import StandardFastAPIService
|
||||
|
||||
# Import API routers
|
||||
from app.api import forecasts, forecasting_operations, analytics, scenario_operations, internal_demo, audit, ml_insights, validation, historical_validation, webhooks, performance_monitoring, retraining
|
||||
from app.api import forecasts, forecasting_operations, analytics, scenario_operations, internal_demo, audit, ml_insights, validation, historical_validation, webhooks, performance_monitoring, retraining, enterprise_forecasting
|
||||
|
||||
|
||||
class ForecastingService(StandardFastAPIService):
|
||||
@@ -176,6 +176,7 @@ service.add_router(historical_validation.router) # Historical validation endpoi
|
||||
service.add_router(webhooks.router) # Webhooks endpoint
|
||||
service.add_router(performance_monitoring.router) # Performance monitoring endpoint
|
||||
service.add_router(retraining.router) # Retraining endpoint
|
||||
service.add_router(enterprise_forecasting.router) # Enterprise forecasting endpoint
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
@@ -0,0 +1,228 @@
|
||||
"""
|
||||
Enterprise forecasting service for aggregated demand across parent-child tenants
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import date, datetime
|
||||
import json
|
||||
import redis.asyncio as redis
|
||||
|
||||
from shared.clients.forecast_client import ForecastServiceClient
|
||||
from shared.clients.tenant_client import TenantServiceClient
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EnterpriseForecastingService:
|
||||
"""
|
||||
Service for aggregating forecasts across parent and child tenants
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
forecast_client: ForecastServiceClient,
|
||||
tenant_client: TenantServiceClient,
|
||||
redis_client: redis.Redis
|
||||
):
|
||||
self.forecast_client = forecast_client
|
||||
self.tenant_client = tenant_client
|
||||
self.redis_client = redis_client
|
||||
self.cache_ttl_seconds = 3600 # 1 hour TTL
|
||||
|
||||
async def get_aggregated_forecast(
|
||||
self,
|
||||
parent_tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date,
|
||||
product_id: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get aggregated forecast across parent and all child tenants
|
||||
|
||||
Args:
|
||||
parent_tenant_id: Parent tenant ID
|
||||
start_date: Start date for forecast aggregation
|
||||
end_date: End date for forecast aggregation
|
||||
product_id: Optional product ID to filter by
|
||||
|
||||
Returns:
|
||||
Dict with aggregated forecast data by date and product
|
||||
"""
|
||||
# Create cache key
|
||||
cache_key = f"agg_forecast:{parent_tenant_id}:{start_date}:{end_date}:{product_id or 'all'}"
|
||||
|
||||
# Try to get from cache first
|
||||
try:
|
||||
cached_result = await self.redis_client.get(cache_key)
|
||||
if cached_result:
|
||||
logger.info(f"Cache hit for aggregated forecast: {cache_key}")
|
||||
return json.loads(cached_result)
|
||||
except Exception as e:
|
||||
logger.warning(f"Cache read failed: {e}")
|
||||
|
||||
logger.info(f"Computing aggregated forecast for parent {parent_tenant_id} from {start_date} to {end_date}")
|
||||
|
||||
# Get child tenant IDs
|
||||
child_tenants = await self.tenant_client.get_child_tenants(parent_tenant_id)
|
||||
child_tenant_ids = [child['id'] for child in child_tenants]
|
||||
|
||||
# Include parent tenant in the list for complete aggregation
|
||||
all_tenant_ids = [parent_tenant_id] + child_tenant_ids
|
||||
|
||||
# Fetch forecasts for all tenants (parent + children)
|
||||
all_forecasts = {}
|
||||
tenant_contributions = {} # Track which tenant contributed to each forecast
|
||||
|
||||
for tenant_id in all_tenant_ids:
|
||||
try:
|
||||
tenant_forecasts = await self.forecast_client.get_forecasts(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
product_id=product_id
|
||||
)
|
||||
|
||||
for forecast_date_str, products in tenant_forecasts.items():
|
||||
if forecast_date_str not in all_forecasts:
|
||||
all_forecasts[forecast_date_str] = {}
|
||||
tenant_contributions[forecast_date_str] = {}
|
||||
|
||||
for product_id_key, forecast_data in products.items():
|
||||
if product_id_key not in all_forecasts[forecast_date_str]:
|
||||
all_forecasts[forecast_date_str][product_id_key] = {
|
||||
'predicted_demand': 0,
|
||||
'confidence_lower': 0,
|
||||
'confidence_upper': 0,
|
||||
'tenant_contributions': []
|
||||
}
|
||||
|
||||
# Aggregate the forecast values
|
||||
all_forecasts[forecast_date_str][product_id_key]['predicted_demand'] += forecast_data.get('predicted_demand', 0)
|
||||
|
||||
# For confidence intervals, we'll use a simple approach
|
||||
# In a real implementation, this would require proper statistical combination
|
||||
all_forecasts[forecast_date_str][product_id_key]['confidence_lower'] += forecast_data.get('confidence_lower', 0)
|
||||
all_forecasts[forecast_date_str][product_id_key]['confidence_upper'] += forecast_data.get('confidence_upper', 0)
|
||||
|
||||
# Track contribution by tenant
|
||||
all_forecasts[forecast_date_str][product_id_key]['tenant_contributions'].append({
|
||||
'tenant_id': tenant_id,
|
||||
'demand': forecast_data.get('predicted_demand', 0),
|
||||
'confidence_lower': forecast_data.get('confidence_lower', 0),
|
||||
'confidence_upper': forecast_data.get('confidence_upper', 0)
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch forecasts for tenant {tenant_id}: {e}")
|
||||
# Continue with other tenants even if one fails
|
||||
|
||||
# Prepare result
|
||||
result = {
|
||||
"parent_tenant_id": parent_tenant_id,
|
||||
"aggregated_forecasts": all_forecasts,
|
||||
"tenant_contributions": tenant_contributions,
|
||||
"child_tenant_count": len(child_tenant_ids),
|
||||
"forecast_dates": list(all_forecasts.keys()),
|
||||
"computed_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
# Cache the result
|
||||
try:
|
||||
await self.redis_client.setex(
|
||||
cache_key,
|
||||
self.cache_ttl_seconds,
|
||||
json.dumps(result, default=str) # Handle date serialization
|
||||
)
|
||||
logger.info(f"Forecast cached for {cache_key}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Cache write failed: {e}")
|
||||
|
||||
return result
|
||||
|
||||
async def get_network_performance_metrics(
|
||||
self,
|
||||
parent_tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get aggregated performance metrics across the tenant network
|
||||
|
||||
Args:
|
||||
parent_tenant_id: Parent tenant ID
|
||||
start_date: Start date for metrics
|
||||
end_date: End date for metrics
|
||||
|
||||
Returns:
|
||||
Dict with aggregated performance metrics
|
||||
"""
|
||||
child_tenants = await self.tenant_client.get_child_tenants(parent_tenant_id)
|
||||
child_tenant_ids = [child['id'] for child in child_tenants]
|
||||
|
||||
# Include parent tenant in the list for complete aggregation
|
||||
all_tenant_ids = [parent_tenant_id] + child_tenant_ids
|
||||
|
||||
total_sales = 0
|
||||
total_forecasted = 0
|
||||
total_accuracy = 0
|
||||
tenant_count = 0
|
||||
|
||||
performance_data = {}
|
||||
|
||||
for tenant_id in all_tenant_ids:
|
||||
try:
|
||||
# Fetch sales and forecast data for the period
|
||||
sales_data = await self._fetch_sales_data(tenant_id, start_date, end_date)
|
||||
forecast_data = await self.get_aggregated_forecast(tenant_id, start_date, end_date)
|
||||
|
||||
tenant_performance = {
|
||||
'tenant_id': tenant_id,
|
||||
'sales': sales_data.get('total_sales', 0),
|
||||
'forecasted': sum(
|
||||
sum(day.get('predicted_demand', 0) for product in day.values())
|
||||
if isinstance(day, dict) else day
|
||||
for day in forecast_data.get('aggregated_forecasts', {}).values()
|
||||
),
|
||||
}
|
||||
|
||||
# Calculate accuracy if both sales and forecast data exist
|
||||
if tenant_performance['sales'] > 0 and tenant_performance['forecasted'] > 0:
|
||||
accuracy = 1 - abs(tenant_performance['forecasted'] - tenant_performance['sales']) / tenant_performance['sales']
|
||||
tenant_performance['accuracy'] = max(0, min(1, accuracy)) # Clamp between 0 and 1
|
||||
else:
|
||||
tenant_performance['accuracy'] = 0
|
||||
|
||||
performance_data[tenant_id] = tenant_performance
|
||||
total_sales += tenant_performance['sales']
|
||||
total_forecasted += tenant_performance['forecasted']
|
||||
total_accuracy += tenant_performance['accuracy']
|
||||
tenant_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch performance data for tenant {tenant_id}: {e}")
|
||||
|
||||
network_performance = {
|
||||
"parent_tenant_id": parent_tenant_id,
|
||||
"total_sales": total_sales,
|
||||
"total_forecasted": total_forecasted,
|
||||
"average_accuracy": total_accuracy / tenant_count if tenant_count > 0 else 0,
|
||||
"tenant_count": tenant_count,
|
||||
"child_tenant_count": len(child_tenant_ids),
|
||||
"tenant_performances": performance_data,
|
||||
"computed_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
return network_performance
|
||||
|
||||
async def _fetch_sales_data(self, tenant_id: str, start_date: date, end_date: date) -> Dict[str, Any]:
|
||||
"""
|
||||
Helper method to fetch sales data (in a real implementation, this would call the sales service)
|
||||
"""
|
||||
# This is a placeholder implementation
|
||||
# In real implementation, this would call the sales service
|
||||
return {
|
||||
'total_sales': 0, # Placeholder - would come from sales service
|
||||
'date_range': f"{start_date} to {end_date}",
|
||||
'tenant_id': tenant_id
|
||||
}
|
||||
@@ -34,8 +34,7 @@ from shared.utils.demo_dates import BASE_REFERENCE_DATE
|
||||
# Configure logging
|
||||
logger = structlog.get_logger()
|
||||
|
||||
DEMO_TENANT_SAN_PABLO = uuid.UUID("a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6") # Individual bakery
|
||||
DEMO_TENANT_LA_ESPIGA = uuid.UUID("b2c3d4e5-f6a7-48b9-c0d1-e2f3a4b5c6d7") # Central bakery
|
||||
DEMO_TENANT_PROFESSIONAL = uuid.UUID("a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6") # Individual bakery
|
||||
|
||||
# Day of week mapping
|
||||
DAYS_OF_WEEK = {
|
||||
@@ -413,24 +412,15 @@ async def seed_all(db: AsyncSession):
|
||||
results = []
|
||||
|
||||
# Seed San Pablo (Individual Bakery)
|
||||
result_san_pablo = await generate_forecasts_for_tenant(
|
||||
# Seed Professional Bakery (merged from San Pablo + La Espiga)
|
||||
result_professional = await generate_forecasts_for_tenant(
|
||||
db,
|
||||
DEMO_TENANT_SAN_PABLO,
|
||||
"San Pablo - Individual Bakery",
|
||||
DEMO_TENANT_PROFESSIONAL,
|
||||
"Professional Bakery",
|
||||
"individual_bakery",
|
||||
config
|
||||
)
|
||||
results.append(result_san_pablo)
|
||||
|
||||
# Seed La Espiga (Central Bakery)
|
||||
result_la_espiga = await generate_forecasts_for_tenant(
|
||||
db,
|
||||
DEMO_TENANT_LA_ESPIGA,
|
||||
"La Espiga - Central Bakery",
|
||||
"central_bakery",
|
||||
config
|
||||
)
|
||||
results.append(result_la_espiga)
|
||||
results.append(result_professional)
|
||||
|
||||
total_forecasts = sum(r["forecasts_created"] for r in results)
|
||||
total_batches = sum(r["batches_created"] for r in results)
|
||||
|
||||
167
services/forecasting/scripts/demo/seed_demo_forecasts_retail.py
Normal file
167
services/forecasting/scripts/demo/seed_demo_forecasts_retail.py
Normal file
@@ -0,0 +1,167 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Demo Retail Forecasting Seeding Script for Forecasting Service
|
||||
Creates store-level demand forecasts for child retail outlets
|
||||
|
||||
This script populates child retail tenants with AI-generated demand forecasts.
|
||||
|
||||
Usage:
|
||||
python /app/scripts/demo/seed_demo_forecasts_retail.py
|
||||
|
||||
Environment Variables Required:
|
||||
FORECASTING_DATABASE_URL - PostgreSQL connection string
|
||||
DEMO_MODE - Set to 'production' for production seeding
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import uuid
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
from decimal import Decimal
|
||||
|
||||
# Add app to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
# Add shared to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent.parent))
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy import select
|
||||
import structlog
|
||||
|
||||
from shared.utils.demo_dates import BASE_REFERENCE_DATE
|
||||
from app.models import Forecast, PredictionBatch
|
||||
|
||||
structlog.configure(
|
||||
processors=[
|
||||
structlog.stdlib.add_log_level,
|
||||
structlog.processors.TimeStamper(fmt="iso"),
|
||||
structlog.dev.ConsoleRenderer()
|
||||
]
|
||||
)
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Fixed Demo Tenant IDs
|
||||
DEMO_TENANT_CHILD_1 = uuid.UUID("d4e5f6a7-b8c9-40d1-e2f3-a4b5c6d7e8f9") # Madrid Centro
|
||||
DEMO_TENANT_CHILD_2 = uuid.UUID("e5f6a7b8-c9d0-41e2-f3a4-b5c6d7e8f9a0") # Barcelona Gràcia
|
||||
DEMO_TENANT_CHILD_3 = uuid.UUID("f6a7b8c9-d0e1-42f3-a4b5-c6d7e8f9a0b1") # Valencia Ruzafa
|
||||
|
||||
# Product IDs
|
||||
PRODUCT_IDS = {
|
||||
"PRO-BAG-001": "20000000-0000-0000-0000-000000000001",
|
||||
"PRO-CRO-001": "20000000-0000-0000-0000-000000000002",
|
||||
"PRO-PUE-001": "20000000-0000-0000-0000-000000000003",
|
||||
"PRO-NAP-001": "20000000-0000-0000-0000-000000000004",
|
||||
}
|
||||
|
||||
# Retail forecasting patterns
|
||||
RETAIL_FORECASTS = [
|
||||
(DEMO_TENANT_CHILD_1, "Madrid Centro", {"PRO-BAG-001": 120, "PRO-CRO-001": 80, "PRO-PUE-001": 35, "PRO-NAP-001": 60}),
|
||||
(DEMO_TENANT_CHILD_2, "Barcelona Gràcia", {"PRO-BAG-001": 90, "PRO-CRO-001": 60, "PRO-PUE-001": 25, "PRO-NAP-001": 45}),
|
||||
(DEMO_TENANT_CHILD_3, "Valencia Ruzafa", {"PRO-BAG-001": 70, "PRO-CRO-001": 45, "PRO-PUE-001": 20, "PRO-NAP-001": 35})
|
||||
]
|
||||
|
||||
|
||||
async def seed_forecasts_for_retail_tenant(db: AsyncSession, tenant_id: uuid.UUID, tenant_name: str, base_forecasts: dict):
|
||||
"""Seed forecasts for a retail tenant"""
|
||||
logger.info(f"Seeding forecasts for: {tenant_name}", tenant_id=str(tenant_id))
|
||||
|
||||
created = 0
|
||||
# Create 7 days of forecasts
|
||||
for days_ahead in range(1, 8):
|
||||
forecast_date = BASE_REFERENCE_DATE + timedelta(days=days_ahead)
|
||||
|
||||
for sku, base_qty in base_forecasts.items():
|
||||
base_product_id = uuid.UUID(PRODUCT_IDS[sku])
|
||||
tenant_int = int(tenant_id.hex, 16)
|
||||
product_id = uuid.UUID(int=tenant_int ^ int(base_product_id.hex, 16))
|
||||
|
||||
# Weekend boost
|
||||
is_weekend = forecast_date.weekday() in [5, 6]
|
||||
day_of_week = forecast_date.weekday()
|
||||
multiplier = random.uniform(1.3, 1.5) if is_weekend else random.uniform(0.9, 1.1)
|
||||
forecasted_quantity = int(base_qty * multiplier)
|
||||
|
||||
forecast = Forecast(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_id=product_id,
|
||||
product_name=sku,
|
||||
location=tenant_name,
|
||||
forecast_date=forecast_date,
|
||||
created_at=BASE_REFERENCE_DATE,
|
||||
predicted_demand=float(forecasted_quantity),
|
||||
confidence_lower=float(int(forecasted_quantity * 0.85)),
|
||||
confidence_upper=float(int(forecasted_quantity * 1.15)),
|
||||
confidence_level=0.90,
|
||||
model_id="retail_forecast_model",
|
||||
model_version="retail_v1.0",
|
||||
algorithm="prophet_retail",
|
||||
business_type="retail_outlet",
|
||||
day_of_week=day_of_week,
|
||||
is_holiday=False,
|
||||
is_weekend=is_weekend,
|
||||
weather_temperature=random.uniform(10.0, 25.0),
|
||||
weather_precipitation=random.uniform(0.0, 5.0) if random.random() < 0.3 else 0.0,
|
||||
weather_description="Clear" if random.random() > 0.3 else "Rainy",
|
||||
traffic_volume=random.randint(50, 200) if is_weekend else random.randint(30, 120),
|
||||
processing_time_ms=random.randint(50, 200),
|
||||
features_used={"historical_sales": True, "weather": True, "day_of_week": True}
|
||||
)
|
||||
|
||||
db.add(forecast)
|
||||
created += 1
|
||||
|
||||
await db.commit()
|
||||
logger.info(f"Created {created} forecasts for {tenant_name}")
|
||||
return {"tenant_id": str(tenant_id), "forecasts_created": created}
|
||||
|
||||
|
||||
async def seed_all(db: AsyncSession):
|
||||
"""Seed all retail forecasts"""
|
||||
logger.info("=" * 80)
|
||||
logger.info("📈 Starting Demo Retail Forecasting Seeding")
|
||||
logger.info("=" * 80)
|
||||
|
||||
results = []
|
||||
for tenant_id, tenant_name, base_forecasts in RETAIL_FORECASTS:
|
||||
result = await seed_forecasts_for_retail_tenant(db, tenant_id, f"{tenant_name} (Retail)", base_forecasts)
|
||||
results.append(result)
|
||||
|
||||
total = sum(r["forecasts_created"] for r in results)
|
||||
logger.info(f"✅ Total forecasts created: {total}")
|
||||
return {"total_forecasts": total, "results": results}
|
||||
|
||||
|
||||
async def main():
|
||||
database_url = os.getenv("FORECASTING_DATABASE_URL") or os.getenv("DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.error("❌ DATABASE_URL not set")
|
||||
return 1
|
||||
|
||||
if database_url.startswith("postgresql://"):
|
||||
database_url = database_url.replace("postgresql://", "postgresql+asyncpg://", 1)
|
||||
|
||||
engine = create_async_engine(database_url, echo=False, pool_pre_ping=True)
|
||||
async_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
||||
|
||||
try:
|
||||
async with async_session() as session:
|
||||
await seed_all(session)
|
||||
logger.info("🎉 Retail forecasting seed completed!")
|
||||
return 0
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Seed failed: {e}", exc_info=True)
|
||||
return 1
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
sys.exit(exit_code)
|
||||
Reference in New Issue
Block a user