Initial commit - production deployment
This commit is contained in:
510
shared/clients/forecast_client.py
Executable file
510
shared/clients/forecast_client.py
Executable file
@@ -0,0 +1,510 @@
|
||||
# shared/clients/forecast_client.py
|
||||
"""
|
||||
Forecast Service Client for Inter-Service Communication
|
||||
|
||||
This client provides a high-level API for interacting with the Forecasting Service,
|
||||
which generates demand predictions using Prophet ML algorithm, validates forecast accuracy,
|
||||
and provides enterprise network demand aggregation for multi-location bakeries.
|
||||
|
||||
Key Capabilities:
|
||||
- Forecast Generation: Single product, multi-day, batch forecasting
|
||||
- Real-Time Predictions: On-demand predictions with custom features
|
||||
- Forecast Validation: Compare predictions vs actual sales, track accuracy
|
||||
- Analytics: Prediction performance metrics, historical accuracy trends
|
||||
- Enterprise Aggregation: Network-wide demand forecasting for parent-child hierarchies
|
||||
- Caching: Redis-backed caching for high-performance prediction serving
|
||||
|
||||
Backend Architecture:
|
||||
- ATOMIC: /forecasting/forecasts (CRUD operations on forecast records)
|
||||
- BUSINESS: /forecasting/operations/* (forecast generation, validation)
|
||||
- ANALYTICS: /forecasting/analytics/* (performance metrics, accuracy trends)
|
||||
- ENTERPRISE: /forecasting/enterprise/* (network demand aggregation)
|
||||
|
||||
Enterprise Features (NEW):
|
||||
- Network demand aggregation across all child outlets for centralized production planning
|
||||
- Child contribution tracking (each outlet's % of total network demand)
|
||||
- Redis caching with 1-hour TTL for enterprise forecasts
|
||||
- Subscription gating (requires Enterprise tier)
|
||||
|
||||
Usage Example:
|
||||
```python
|
||||
from shared.clients import get_forecast_client
|
||||
from shared.config.base import get_settings
|
||||
from datetime import date, timedelta
|
||||
|
||||
config = get_settings()
|
||||
client = get_forecast_client(config, calling_service_name="production")
|
||||
|
||||
# Generate 7-day forecast for a product
|
||||
forecast = await client.generate_multi_day_forecast(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_id=product_id,
|
||||
forecast_date=date.today(),
|
||||
forecast_days=7,
|
||||
include_recommendations=True
|
||||
)
|
||||
|
||||
# Batch forecast for multiple products
|
||||
batch_forecast = await client.generate_batch_forecast(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_ids=[product_id_1, product_id_2],
|
||||
forecast_date=date.today(),
|
||||
forecast_days=7
|
||||
)
|
||||
|
||||
# Validate forecasts against actual sales
|
||||
validation = await client.validate_forecasts(
|
||||
tenant_id=tenant_id,
|
||||
date=date.today() - timedelta(days=1)
|
||||
)
|
||||
|
||||
# Get predictions for a specific date (from cache or DB)
|
||||
predictions = await client.get_predictions_for_date(
|
||||
tenant_id=tenant_id,
|
||||
target_date=date.today()
|
||||
)
|
||||
```
|
||||
|
||||
Service Architecture:
|
||||
- Base URL: Configured via FORECASTING_SERVICE_URL environment variable
|
||||
- Authentication: Uses BaseServiceClient with tenant_id header validation
|
||||
- Error Handling: Returns None on errors, logs detailed error context
|
||||
- Async: All methods are async and use httpx for HTTP communication
|
||||
- Caching: 24-hour TTL for standard forecasts, 1-hour TTL for enterprise aggregations
|
||||
|
||||
ML Model Details:
|
||||
- Algorithm: Facebook Prophet (time series forecasting)
|
||||
- Features: 20+ temporal, weather, traffic, holiday, POI features
|
||||
- Accuracy: 15-25% MAPE (Mean Absolute Percentage Error)
|
||||
- Training: Weekly retraining via orchestrator automation
|
||||
- Confidence Intervals: 95% confidence bounds (yhat_lower, yhat_upper)
|
||||
|
||||
Related Services:
|
||||
- Production Service: Uses forecasts for production planning
|
||||
- Procurement Service: Uses forecasts for ingredient ordering
|
||||
- Orchestrator Service: Triggers daily forecast generation, displays network forecasts on enterprise dashboard
|
||||
- Tenant Service: Validates hierarchy for enterprise aggregation
|
||||
- Distribution Service: Network forecasts inform capacity planning
|
||||
|
||||
For more details, see services/forecasting/README.md
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, Optional, List
|
||||
from datetime import date
|
||||
import structlog
|
||||
from .base_service_client import BaseServiceClient
|
||||
from shared.config.base import BaseServiceSettings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ForecastServiceClient(BaseServiceClient):
|
||||
"""Client for communicating with the forecasting service"""
|
||||
|
||||
def __init__(self, config: BaseServiceSettings, calling_service_name: str = "unknown"):
|
||||
super().__init__(calling_service_name, config)
|
||||
|
||||
def get_service_base_path(self) -> str:
|
||||
return "/api/v1"
|
||||
|
||||
# ================================================================
|
||||
# ATOMIC: Forecast CRUD Operations
|
||||
# ================================================================
|
||||
|
||||
async def get_forecast(self, tenant_id: str, forecast_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get forecast details by ID"""
|
||||
return await self.get(f"forecasting/forecasts/{forecast_id}", tenant_id=tenant_id)
|
||||
|
||||
async def list_forecasts(
|
||||
self,
|
||||
tenant_id: str,
|
||||
inventory_product_id: Optional[str] = None,
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None,
|
||||
limit: int = 50,
|
||||
offset: int = 0
|
||||
) -> Optional[List[Dict[str, Any]]]:
|
||||
"""List forecasts for a tenant with optional filters"""
|
||||
params = {"limit": limit, "offset": offset}
|
||||
if inventory_product_id:
|
||||
params["inventory_product_id"] = inventory_product_id
|
||||
if start_date:
|
||||
params["start_date"] = start_date.isoformat()
|
||||
if end_date:
|
||||
params["end_date"] = end_date.isoformat()
|
||||
|
||||
return await self.get("forecasting/forecasts", tenant_id=tenant_id, params=params)
|
||||
|
||||
async def delete_forecast(self, tenant_id: str, forecast_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Delete a forecast"""
|
||||
return await self.delete(f"forecasting/forecasts/{forecast_id}", tenant_id=tenant_id)
|
||||
|
||||
# ================================================================
|
||||
# BUSINESS: Forecasting Operations
|
||||
# ================================================================
|
||||
|
||||
async def generate_single_forecast(
|
||||
self,
|
||||
tenant_id: str,
|
||||
inventory_product_id: str,
|
||||
forecast_date: date,
|
||||
include_recommendations: bool = False
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Generate a single product forecast"""
|
||||
data = {
|
||||
"inventory_product_id": inventory_product_id,
|
||||
"forecast_date": forecast_date.isoformat(),
|
||||
"include_recommendations": include_recommendations
|
||||
}
|
||||
return await self.post("forecasting/operations/single", data=data, tenant_id=tenant_id)
|
||||
|
||||
async def generate_multi_day_forecast(
|
||||
self,
|
||||
tenant_id: str,
|
||||
inventory_product_id: str,
|
||||
forecast_date: date,
|
||||
forecast_days: int = 7,
|
||||
include_recommendations: bool = False
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Generate multiple daily forecasts for the specified period"""
|
||||
data = {
|
||||
"inventory_product_id": inventory_product_id,
|
||||
"forecast_date": forecast_date.isoformat(),
|
||||
"forecast_days": forecast_days,
|
||||
"include_recommendations": include_recommendations
|
||||
}
|
||||
return await self.post("forecasting/operations/multi-day", data=data, tenant_id=tenant_id)
|
||||
|
||||
async def generate_batch_forecast(
|
||||
self,
|
||||
tenant_id: str,
|
||||
inventory_product_ids: List[str],
|
||||
forecast_date: date,
|
||||
forecast_days: int = 1
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Generate forecasts for multiple products in batch"""
|
||||
data = {
|
||||
"inventory_product_ids": inventory_product_ids,
|
||||
"forecast_date": forecast_date.isoformat(),
|
||||
"forecast_days": forecast_days
|
||||
}
|
||||
return await self.post("forecasting/operations/batch", data=data, tenant_id=tenant_id)
|
||||
|
||||
async def generate_realtime_prediction(
|
||||
self,
|
||||
tenant_id: str,
|
||||
inventory_product_id: str,
|
||||
model_id: str,
|
||||
features: Dict[str, Any],
|
||||
model_path: Optional[str] = None,
|
||||
confidence_level: float = 0.8
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Generate real-time prediction"""
|
||||
data = {
|
||||
"inventory_product_id": inventory_product_id,
|
||||
"model_id": model_id,
|
||||
"features": features,
|
||||
"confidence_level": confidence_level
|
||||
}
|
||||
if model_path:
|
||||
data["model_path"] = model_path
|
||||
|
||||
return await self.post("forecasting/operations/realtime", data=data, tenant_id=tenant_id)
|
||||
|
||||
async def validate_predictions(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Validate predictions against actual sales data"""
|
||||
params = {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat()
|
||||
}
|
||||
return await self.post("forecasting/operations/validate-predictions", params=params, tenant_id=tenant_id)
|
||||
|
||||
async def validate_forecasts(
|
||||
self,
|
||||
tenant_id: str,
|
||||
date: date
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Validate forecasts for a specific date against actual sales.
|
||||
Calculates MAPE, RMSE, MAE and identifies products with poor accuracy.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
date: Date to validate (validates this single day)
|
||||
|
||||
Returns:
|
||||
Dict with overall metrics and poor accuracy products list
|
||||
"""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
# Convert date to datetime with timezone for start/end of day
|
||||
start_datetime = datetime.combine(date, datetime.min.time()).replace(tzinfo=timezone.utc)
|
||||
end_datetime = datetime.combine(date, datetime.max.time()).replace(tzinfo=timezone.utc)
|
||||
|
||||
# Call the new validation endpoint
|
||||
result = await self.post(
|
||||
"forecasting/validation/validate-yesterday",
|
||||
params={"orchestration_run_id": None},
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
if not result:
|
||||
return None
|
||||
|
||||
# Transform the new response format to match the expected format
|
||||
overall_metrics = result.get("overall_metrics", {})
|
||||
|
||||
# Get poor accuracy products from the result
|
||||
poor_accuracy_products = result.get("poor_accuracy_products", [])
|
||||
|
||||
return {
|
||||
"overall_mape": overall_metrics.get("mape", 0),
|
||||
"overall_rmse": overall_metrics.get("rmse", 0),
|
||||
"overall_mae": overall_metrics.get("mae", 0),
|
||||
"overall_r2_score": overall_metrics.get("r2_score", 0),
|
||||
"overall_accuracy_percentage": overall_metrics.get("accuracy_percentage", 0),
|
||||
"products_validated": result.get("forecasts_with_actuals", 0),
|
||||
"poor_accuracy_products": poor_accuracy_products,
|
||||
"validation_run_id": result.get("validation_run_id"),
|
||||
"forecasts_evaluated": result.get("forecasts_evaluated", 0),
|
||||
"forecasts_with_actuals": result.get("forecasts_with_actuals", 0),
|
||||
"forecasts_without_actuals": result.get("forecasts_without_actuals", 0)
|
||||
}
|
||||
|
||||
async def get_forecast_statistics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Get forecast statistics"""
|
||||
params = {}
|
||||
if start_date:
|
||||
params["start_date"] = start_date.isoformat()
|
||||
if end_date:
|
||||
params["end_date"] = end_date.isoformat()
|
||||
|
||||
return await self.get("forecasting/operations/statistics", tenant_id=tenant_id, params=params)
|
||||
|
||||
async def clear_prediction_cache(self, tenant_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Clear prediction cache"""
|
||||
return await self.delete("forecasting/operations/cache", tenant_id=tenant_id)
|
||||
|
||||
# ================================================================
|
||||
# ANALYTICS: Forecasting Analytics
|
||||
# ================================================================
|
||||
|
||||
async def get_predictions_performance(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Get predictions performance analytics"""
|
||||
params = {}
|
||||
if start_date:
|
||||
params["start_date"] = start_date.isoformat()
|
||||
if end_date:
|
||||
params["end_date"] = end_date.isoformat()
|
||||
|
||||
return await self.get("forecasting/analytics/predictions-performance", tenant_id=tenant_id, params=params)
|
||||
|
||||
# ================================================================
|
||||
# ML INSIGHTS: Dynamic Rules Generation
|
||||
# ================================================================
|
||||
|
||||
async def trigger_rules_generation(
|
||||
self,
|
||||
tenant_id: str,
|
||||
product_ids: Optional[List[str]] = None,
|
||||
lookback_days: int = 90,
|
||||
min_samples: int = 10
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Trigger dynamic business rules learning for demand forecasting.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
product_ids: Specific product IDs to analyze. If None, analyzes all products
|
||||
lookback_days: Days of historical data to analyze (30-365)
|
||||
min_samples: Minimum samples required for rule learning (5-100)
|
||||
|
||||
Returns:
|
||||
Dict with rules generation results including insights posted
|
||||
"""
|
||||
data = {
|
||||
"product_ids": product_ids,
|
||||
"lookback_days": lookback_days,
|
||||
"min_samples": min_samples
|
||||
}
|
||||
return await self.post("forecasting/ml/insights/generate-rules", data=data, tenant_id=tenant_id)
|
||||
|
||||
async def trigger_demand_insights_internal(
|
||||
self,
|
||||
tenant_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Trigger demand forecasting insights for a tenant (internal service use only).
|
||||
|
||||
This method calls the internal endpoint which is protected by x-internal-service header.
|
||||
Used by demo-session service after cloning to generate AI insights from seeded data.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID to trigger insights for
|
||||
|
||||
Returns:
|
||||
Dict with trigger results or None if failed
|
||||
"""
|
||||
try:
|
||||
result = await self._make_request(
|
||||
method="POST",
|
||||
endpoint=f"forecasting/internal/ml/generate-demand-insights",
|
||||
tenant_id=tenant_id,
|
||||
data={"tenant_id": tenant_id},
|
||||
headers={"x-internal-service": "demo-session"}
|
||||
)
|
||||
|
||||
if result:
|
||||
logger.info(
|
||||
"Demand insights triggered successfully via internal endpoint",
|
||||
tenant_id=tenant_id,
|
||||
insights_posted=result.get("insights_posted", 0)
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Demand insights internal endpoint returned no result",
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to trigger demand insights",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# Legacy/Compatibility Methods (deprecated)
|
||||
# ================================================================
|
||||
|
||||
async def generate_forecasts(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_days: int = 7,
|
||||
inventory_product_ids: Optional[List[str]] = None
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
COMPATIBILITY: Orchestrator-friendly method to generate forecasts
|
||||
|
||||
This method is called by the orchestrator service and generates batch forecasts
|
||||
for either specified products or all products.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
forecast_days: Number of days to forecast (default 7)
|
||||
inventory_product_ids: Optional list of product IDs. If None, forecasts all products.
|
||||
|
||||
Returns:
|
||||
Dict with forecast results
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
# If no product IDs specified, let the backend handle it
|
||||
if not inventory_product_ids:
|
||||
# Call the batch operation endpoint to forecast all products
|
||||
# The forecasting service will handle fetching all products internally
|
||||
data = {
|
||||
"batch_name": f"orchestrator-batch-{datetime.now().strftime('%Y%m%d')}",
|
||||
"inventory_product_ids": [], # Empty list will trigger fetching all products
|
||||
"forecast_days": forecast_days
|
||||
}
|
||||
return await self.post("forecasting/operations/batch", data=data, tenant_id=tenant_id)
|
||||
|
||||
# Otherwise use the standard batch forecast
|
||||
return await self.generate_batch_forecast(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_ids=inventory_product_ids,
|
||||
forecast_date=datetime.now().date(),
|
||||
forecast_days=forecast_days
|
||||
)
|
||||
|
||||
async def get_aggregated_forecast(
|
||||
self,
|
||||
parent_tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date,
|
||||
product_id: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get aggregated forecast for enterprise tenant and all children.
|
||||
|
||||
This method calls the enterprise forecasting aggregation endpoint which
|
||||
combines demand forecasts across the parent tenant and all child tenants
|
||||
in the network. Used for centralized production planning.
|
||||
|
||||
Args:
|
||||
parent_tenant_id: The parent tenant (central bakery) UUID
|
||||
start_date: Start date for forecast range
|
||||
end_date: End date for forecast range
|
||||
product_id: Optional product ID to filter forecasts
|
||||
|
||||
Returns:
|
||||
Aggregated forecast data including:
|
||||
- total_demand: Sum of all child demands
|
||||
- child_contributions: Per-child demand breakdown
|
||||
- forecast_date_range: Date range for the forecast
|
||||
- cached: Whether data was served from Redis cache
|
||||
"""
|
||||
params = {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat()
|
||||
}
|
||||
if product_id:
|
||||
params["product_id"] = product_id
|
||||
|
||||
# Use _make_request directly because the base_service_client adds /tenants/{tenant_id}/ prefix
|
||||
# Gateway route is: /api/v1/tenants/{tenant_id}/forecasting/enterprise/{path}
|
||||
# So we need the full path without tenant_id parameter to avoid double prefixing
|
||||
return await self._make_request(
|
||||
"GET",
|
||||
f"tenants/{parent_tenant_id}/forecasting/enterprise/aggregated",
|
||||
params=params
|
||||
)
|
||||
|
||||
async def create_forecast(
|
||||
self,
|
||||
tenant_id: str,
|
||||
model_id: str,
|
||||
start_date: str,
|
||||
end_date: str,
|
||||
product_ids: Optional[List[str]] = None,
|
||||
include_confidence_intervals: bool = True,
|
||||
**kwargs
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
DEPRECATED: Use generate_single_forecast or generate_batch_forecast instead
|
||||
Legacy method for backward compatibility
|
||||
"""
|
||||
# Map to new batch forecast operation
|
||||
if product_ids:
|
||||
return await self.generate_batch_forecast(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_ids=product_ids,
|
||||
forecast_date=date.fromisoformat(start_date),
|
||||
forecast_days=1
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# Backward compatibility alias
|
||||
def create_forecast_client(config: BaseServiceSettings, service_name: str = "unknown") -> ForecastServiceClient:
|
||||
"""Create a forecast service client (backward compatibility)"""
|
||||
return ForecastServiceClient(config, service_name)
|
||||
Reference in New Issue
Block a user