Add AI insights feature

This commit is contained in:
Urtzi Alfaro
2025-12-15 21:14:22 +01:00
parent 5642b5a0c0
commit c566967bea
39 changed files with 17729 additions and 404 deletions

View File

@@ -62,6 +62,9 @@ class OrchestratorClient:
params={
"tenant_id": tenant_id,
**query_params
},
headers={
"x-internal-service": "alert-intelligence"
}
)

View File

@@ -450,5 +450,107 @@ RECOMMENDATION_TEMPLATES = {
"potential_time_saved_minutes": "time_saved",
"suggestion": "suggestion"
}
},
# ==================== AI INSIGHTS RECOMMENDATIONS ====================
"ai_yield_prediction": {
"title_key": "recommendations.ai_yield_prediction.title",
"title_params": {
"recipe_name": "recipe_name"
},
"message_variants": {
"generic": "recommendations.ai_yield_prediction.message"
},
"message_params": {
"recipe_name": "recipe_name",
"predicted_yield_percent": "predicted_yield",
"confidence_percent": "confidence",
"recommendation": "recommendation"
}
},
"ai_safety_stock_optimization": {
"title_key": "recommendations.ai_safety_stock_optimization.title",
"title_params": {
"ingredient_name": "ingredient_name"
},
"message_variants": {
"generic": "recommendations.ai_safety_stock_optimization.message"
},
"message_params": {
"ingredient_name": "ingredient_name",
"suggested_safety_stock_kg": "suggested_safety_stock",
"current_safety_stock_kg": "current_safety_stock",
"estimated_savings_eur": "estimated_savings",
"confidence_percent": "confidence"
}
},
"ai_supplier_recommendation": {
"title_key": "recommendations.ai_supplier_recommendation.title",
"title_params": {
"supplier_name": "supplier_name"
},
"message_variants": {
"generic": "recommendations.ai_supplier_recommendation.message"
},
"message_params": {
"supplier_name": "supplier_name",
"reliability_score": "reliability_score",
"recommendation": "recommendation",
"confidence_percent": "confidence"
}
},
"ai_price_forecast": {
"title_key": "recommendations.ai_price_forecast.title",
"title_params": {
"ingredient_name": "ingredient_name"
},
"message_variants": {
"generic": "recommendations.ai_price_forecast.message"
},
"message_params": {
"ingredient_name": "ingredient_name",
"predicted_price_eur": "predicted_price",
"current_price_eur": "current_price",
"price_trend": "price_trend",
"recommendation": "recommendation",
"confidence_percent": "confidence"
}
},
"ai_demand_forecast": {
"title_key": "recommendations.ai_demand_forecast.title",
"title_params": {
"product_name": "product_name"
},
"message_variants": {
"generic": "recommendations.ai_demand_forecast.message"
},
"message_params": {
"product_name": "product_name",
"predicted_demand": "predicted_demand",
"forecast_period": "forecast_period",
"confidence_percent": "confidence",
"recommendation": "recommendation"
}
},
"ai_business_rule": {
"title_key": "recommendations.ai_business_rule.title",
"title_params": {
"rule_category": "rule_category"
},
"message_variants": {
"generic": "recommendations.ai_business_rule.message"
},
"message_params": {
"rule_category": "rule_category",
"rule_description": "rule_description",
"confidence_percent": "confidence",
"recommendation": "recommendation"
}
}
}

View File

@@ -344,7 +344,7 @@ async def generate_batch_forecast(
return BatchForecastResponse(
id=str(uuid.uuid4()),
tenant_id=tenant_id,
batch_name=getattr(request, 'batch_name', f"orchestrator-batch-{datetime.now().strftime('%Y%m%d')}"),
batch_name=request.batch_name,
status="completed",
total_products=0,
completed_products=0,
@@ -358,8 +358,8 @@ async def generate_batch_forecast(
# IMPROVEMENT: For large batches (>5 products), use background task
# For small batches, execute synchronously for immediate results
batch_name = getattr(request, 'batch_name', f"batch-{datetime.now().strftime('%Y%m%d_%H%M%S')}")
forecast_days = getattr(request, 'forecast_days', 7)
batch_name = request.batch_name
forecast_days = request.forecast_days
# Create batch record first
batch_id = str(uuid.uuid4())

View File

@@ -7,7 +7,7 @@ Provides endpoints to trigger ML insight generation for:
- Seasonal trend detection
"""
from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks
from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks, Request
from pydantic import BaseModel, Field
from typing import Optional, List
from uuid import UUID
@@ -62,6 +62,70 @@ class RulesGenerationResponse(BaseModel):
errors: List[str] = []
class DemandAnalysisRequest(BaseModel):
"""Request schema for demand analysis"""
product_ids: Optional[List[str]] = Field(
None,
description="Specific product IDs to analyze. If None, analyzes all products"
)
lookback_days: int = Field(
90,
description="Days of historical data to analyze",
ge=30,
le=365
)
forecast_horizon_days: int = Field(
30,
description="Days to forecast ahead",
ge=7,
le=90
)
class DemandAnalysisResponse(BaseModel):
"""Response schema for demand analysis"""
success: bool
message: str
tenant_id: str
products_analyzed: int
total_insights_generated: int
total_insights_posted: int
insights_by_product: dict
errors: List[str] = []
class BusinessRulesAnalysisRequest(BaseModel):
"""Request schema for business rules analysis"""
product_ids: Optional[List[str]] = Field(
None,
description="Specific product IDs to analyze. If None, analyzes all products"
)
lookback_days: int = Field(
90,
description="Days of historical data to analyze",
ge=30,
le=365
)
min_samples: int = Field(
10,
description="Minimum samples required for rule analysis",
ge=5,
le=100
)
class BusinessRulesAnalysisResponse(BaseModel):
"""Response schema for business rules analysis"""
success: bool
message: str
tenant_id: str
products_analyzed: int
total_insights_generated: int
total_insights_posted: int
insights_by_product: dict
errors: List[str] = []
# ================================================================
# API ENDPOINTS
# ================================================================
@@ -70,6 +134,7 @@ class RulesGenerationResponse(BaseModel):
async def trigger_rules_generation(
tenant_id: str,
request_data: RulesGenerationRequest,
request: Request,
db: AsyncSession = Depends(get_db)
):
"""
@@ -103,8 +168,11 @@ async def trigger_rules_generation(
from shared.clients.inventory_client import InventoryServiceClient
from app.core.config import settings
# Get event publisher from app state
event_publisher = getattr(request.app.state, 'event_publisher', None)
# Initialize orchestrator and clients
orchestrator = RulesOrchestrator()
orchestrator = RulesOrchestrator(event_publisher=event_publisher)
inventory_client = InventoryServiceClient(settings)
# Get products to analyze from inventory service via API
@@ -278,6 +346,415 @@ async def trigger_rules_generation(
)
@router.post("/analyze-demand", response_model=DemandAnalysisResponse)
async def trigger_demand_analysis(
tenant_id: str,
request_data: DemandAnalysisRequest,
request: Request,
db: AsyncSession = Depends(get_db)
):
"""
Trigger demand pattern analysis from historical sales data.
This endpoint:
1. Fetches historical sales data for specified products
2. Runs the DemandInsightsOrchestrator to analyze patterns
3. Generates insights about demand forecasting optimization
4. Posts insights to AI Insights Service
5. Publishes events to RabbitMQ
Args:
tenant_id: Tenant UUID
request_data: Demand analysis parameters
request: FastAPI request object to access app state
db: Database session
Returns:
DemandAnalysisResponse with analysis results
"""
logger.info(
"ML insights demand analysis requested",
tenant_id=tenant_id,
product_ids=request_data.product_ids,
lookback_days=request_data.lookback_days
)
try:
# Import ML orchestrator and clients
from app.ml.demand_insights_orchestrator import DemandInsightsOrchestrator
from shared.clients.sales_client import SalesServiceClient
from shared.clients.inventory_client import InventoryServiceClient
from app.core.config import settings
# Get event publisher from app state
event_publisher = getattr(request.app.state, 'event_publisher', None)
# Initialize orchestrator and clients
orchestrator = DemandInsightsOrchestrator(event_publisher=event_publisher)
inventory_client = InventoryServiceClient(settings)
# Get products to analyze from inventory service via API
if request_data.product_ids:
# Fetch specific products
products = []
for product_id in request_data.product_ids:
product = await inventory_client.get_ingredient_by_id(
ingredient_id=UUID(product_id),
tenant_id=tenant_id
)
if product:
products.append(product)
else:
# Fetch all products for tenant (limit to 10)
all_products = await inventory_client.get_all_ingredients(tenant_id=tenant_id)
products = all_products[:10] # Limit to prevent timeout
if not products:
return DemandAnalysisResponse(
success=False,
message="No products found for analysis",
tenant_id=tenant_id,
products_analyzed=0,
total_insights_generated=0,
total_insights_posted=0,
insights_by_product={},
errors=["No products found"]
)
# Initialize sales client to fetch historical data
sales_client = SalesServiceClient(config=settings, calling_service_name="forecasting")
# Calculate date range
end_date = datetime.utcnow()
start_date = end_date - timedelta(days=request_data.lookback_days)
# Process each product
total_insights_generated = 0
total_insights_posted = 0
insights_by_product = {}
errors = []
for product in products:
try:
product_id = str(product['id'])
product_name = product.get('name', 'Unknown')
logger.info(f"Analyzing product {product_name} ({product_id})")
# Fetch sales data for product
sales_data = await sales_client.get_sales_data(
tenant_id=tenant_id,
product_id=product_id,
start_date=start_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
)
if not sales_data:
logger.warning(f"No sales data for product {product_id}")
continue
# Convert to DataFrame
sales_df = pd.DataFrame(sales_data)
if len(sales_df) < 30: # Minimum for demand analysis
logger.warning(
f"Insufficient data for product {product_id}: "
f"{len(sales_df)} samples < 30 required"
)
continue
# Check what columns are available and map to expected format
logger.debug(f"Sales data columns for product {product_id}: {sales_df.columns.tolist()}")
# Map common field names to 'quantity' and 'date'
if 'quantity' not in sales_df.columns:
if 'total_quantity' in sales_df.columns:
sales_df['quantity'] = sales_df['total_quantity']
elif 'amount' in sales_df.columns:
sales_df['quantity'] = sales_df['amount']
else:
logger.warning(f"No quantity field found for product {product_id}, skipping")
continue
if 'date' not in sales_df.columns:
if 'sale_date' in sales_df.columns:
sales_df['date'] = sales_df['sale_date']
else:
logger.warning(f"No date field found for product {product_id}, skipping")
continue
# Prepare sales data with required columns
sales_df['date'] = pd.to_datetime(sales_df['date'])
sales_df['quantity'] = sales_df['quantity'].astype(float)
sales_df['day_of_week'] = sales_df['date'].dt.dayofweek
# Run demand analysis
results = await orchestrator.analyze_and_post_demand_insights(
tenant_id=tenant_id,
inventory_product_id=product_id,
sales_data=sales_df,
forecast_horizon_days=request_data.forecast_horizon_days,
min_history_days=request_data.lookback_days
)
# Track results
total_insights_generated += results['insights_generated']
total_insights_posted += results['insights_posted']
insights_by_product[product_id] = {
'product_name': product_name,
'insights_posted': results['insights_posted'],
'trend_analysis': results.get('trend_analysis', {})
}
logger.info(
f"Product {product_id} demand analysis complete",
insights_posted=results['insights_posted']
)
except Exception as e:
error_msg = f"Error analyzing product {product_id}: {str(e)}"
logger.error(error_msg, exc_info=True)
errors.append(error_msg)
# Close orchestrator
await orchestrator.close()
# Build response
response = DemandAnalysisResponse(
success=total_insights_posted > 0,
message=f"Successfully generated {total_insights_posted} insights from {len(products)} products",
tenant_id=tenant_id,
products_analyzed=len(products),
total_insights_generated=total_insights_generated,
total_insights_posted=total_insights_posted,
insights_by_product=insights_by_product,
errors=errors
)
logger.info(
"ML insights demand analysis complete",
tenant_id=tenant_id,
total_insights=total_insights_posted
)
return response
except Exception as e:
logger.error(
"ML insights demand analysis failed",
tenant_id=tenant_id,
error=str(e),
exc_info=True
)
raise HTTPException(
status_code=500,
detail=f"Demand analysis failed: {str(e)}"
)
@router.post("/analyze-business-rules", response_model=BusinessRulesAnalysisResponse)
async def trigger_business_rules_analysis(
tenant_id: str,
request_data: BusinessRulesAnalysisRequest,
request: Request,
db: AsyncSession = Depends(get_db)
):
"""
Trigger business rules optimization analysis from historical sales data.
This endpoint:
1. Fetches historical sales data for specified products
2. Runs the BusinessRulesInsightsOrchestrator to analyze rules
3. Generates insights about business rule optimization
4. Posts insights to AI Insights Service
5. Publishes events to RabbitMQ
Args:
tenant_id: Tenant UUID
request_data: Business rules analysis parameters
request: FastAPI request object to access app state
db: Database session
Returns:
BusinessRulesAnalysisResponse with analysis results
"""
logger.info(
"ML insights business rules analysis requested",
tenant_id=tenant_id,
product_ids=request_data.product_ids,
lookback_days=request_data.lookback_days
)
try:
# Import ML orchestrator and clients
from app.ml.business_rules_insights_orchestrator import BusinessRulesInsightsOrchestrator
from shared.clients.sales_client import SalesServiceClient
from shared.clients.inventory_client import InventoryServiceClient
from app.core.config import settings
# Get event publisher from app state
event_publisher = getattr(request.app.state, 'event_publisher', None)
# Initialize orchestrator and clients
orchestrator = BusinessRulesInsightsOrchestrator(event_publisher=event_publisher)
inventory_client = InventoryServiceClient(settings)
# Get products to analyze from inventory service via API
if request_data.product_ids:
# Fetch specific products
products = []
for product_id in request_data.product_ids:
product = await inventory_client.get_ingredient_by_id(
ingredient_id=UUID(product_id),
tenant_id=tenant_id
)
if product:
products.append(product)
else:
# Fetch all products for tenant (limit to 10)
all_products = await inventory_client.get_all_ingredients(tenant_id=tenant_id)
products = all_products[:10] # Limit to prevent timeout
if not products:
return BusinessRulesAnalysisResponse(
success=False,
message="No products found for analysis",
tenant_id=tenant_id,
products_analyzed=0,
total_insights_generated=0,
total_insights_posted=0,
insights_by_product={},
errors=["No products found"]
)
# Initialize sales client to fetch historical data
sales_client = SalesServiceClient(config=settings, calling_service_name="forecasting")
# Calculate date range
end_date = datetime.utcnow()
start_date = end_date - timedelta(days=request_data.lookback_days)
# Process each product
total_insights_generated = 0
total_insights_posted = 0
insights_by_product = {}
errors = []
for product in products:
try:
product_id = str(product['id'])
product_name = product.get('name', 'Unknown')
logger.info(f"Analyzing product {product_name} ({product_id})")
# Fetch sales data for product
sales_data = await sales_client.get_sales_data(
tenant_id=tenant_id,
product_id=product_id,
start_date=start_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
)
if not sales_data:
logger.warning(f"No sales data for product {product_id}")
continue
# Convert to DataFrame
sales_df = pd.DataFrame(sales_data)
if len(sales_df) < request_data.min_samples:
logger.warning(
f"Insufficient data for product {product_id}: "
f"{len(sales_df)} samples < {request_data.min_samples} required"
)
continue
# Check what columns are available and map to expected format
logger.debug(f"Sales data columns for product {product_id}: {sales_df.columns.tolist()}")
# Map common field names to 'quantity' and 'date'
if 'quantity' not in sales_df.columns:
if 'total_quantity' in sales_df.columns:
sales_df['quantity'] = sales_df['total_quantity']
elif 'amount' in sales_df.columns:
sales_df['quantity'] = sales_df['amount']
else:
logger.warning(f"No quantity field found for product {product_id}, skipping")
continue
if 'date' not in sales_df.columns:
if 'sale_date' in sales_df.columns:
sales_df['date'] = sales_df['sale_date']
else:
logger.warning(f"No date field found for product {product_id}, skipping")
continue
# Prepare sales data with required columns
sales_df['date'] = pd.to_datetime(sales_df['date'])
sales_df['quantity'] = sales_df['quantity'].astype(float)
sales_df['day_of_week'] = sales_df['date'].dt.dayofweek
# Run business rules analysis
results = await orchestrator.analyze_and_post_business_rules_insights(
tenant_id=tenant_id,
inventory_product_id=product_id,
sales_data=sales_df,
min_samples=request_data.min_samples
)
# Track results
total_insights_generated += results['insights_generated']
total_insights_posted += results['insights_posted']
insights_by_product[product_id] = {
'product_name': product_name,
'insights_posted': results['insights_posted'],
'rules_learned': len(results.get('rules', {}))
}
logger.info(
f"Product {product_id} business rules analysis complete",
insights_posted=results['insights_posted']
)
except Exception as e:
error_msg = f"Error analyzing product {product_id}: {str(e)}"
logger.error(error_msg, exc_info=True)
errors.append(error_msg)
# Close orchestrator
await orchestrator.close()
# Build response
response = BusinessRulesAnalysisResponse(
success=total_insights_posted > 0,
message=f"Successfully generated {total_insights_posted} insights from {len(products)} products",
tenant_id=tenant_id,
products_analyzed=len(products),
total_insights_generated=total_insights_generated,
total_insights_posted=total_insights_posted,
insights_by_product=insights_by_product,
errors=errors
)
logger.info(
"ML insights business rules analysis complete",
tenant_id=tenant_id,
total_insights=total_insights_posted
)
return response
except Exception as e:
logger.error(
"ML insights business rules analysis failed",
tenant_id=tenant_id,
error=str(e),
exc_info=True
)
raise HTTPException(
status_code=500,
detail=f"Business rules analysis failed: {str(e)}"
)
@router.get("/health")
async def ml_insights_health():
"""Health check for ML insights endpoints"""
@@ -285,6 +762,8 @@ async def ml_insights_health():
"status": "healthy",
"service": "forecasting-ml-insights",
"endpoints": [
"POST /ml/insights/generate-rules"
"POST /ml/insights/generate-rules",
"POST /ml/insights/analyze-demand",
"POST /ml/insights/analyze-business-rules"
]
}

View File

@@ -137,6 +137,9 @@ class ForecastingService(StandardFastAPIService):
else:
self.logger.error("Event publisher not initialized, alert service unavailable")
# Store the event publisher in app state for internal API access
app.state.event_publisher = self.event_publisher
async def on_shutdown(self, app: FastAPI):
"""Custom shutdown logic for forecasting service"""

View File

@@ -0,0 +1,393 @@
"""
Business Rules Insights Orchestrator
Coordinates business rules optimization and insight posting
"""
import pandas as pd
from typing import Dict, List, Any, Optional
import structlog
from datetime import datetime
from uuid import UUID
import sys
import os
# Add shared clients to path
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
from shared.clients.ai_insights_client import AIInsightsClient
from shared.messaging import UnifiedEventPublisher
from app.ml.dynamic_rules_engine import DynamicRulesEngine
logger = structlog.get_logger()
class BusinessRulesInsightsOrchestrator:
"""
Orchestrates business rules analysis and insight generation workflow.
Workflow:
1. Analyze dynamic business rule performance
2. Generate insights for rule optimization
3. Post insights to AI Insights Service
4. Publish recommendation events to RabbitMQ
5. Provide rule optimization for forecasting
6. Track rule effectiveness and improvements
"""
def __init__(
self,
ai_insights_base_url: str = "http://ai-insights-service:8000",
event_publisher: Optional[UnifiedEventPublisher] = None
):
self.rules_engine = DynamicRulesEngine()
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
self.event_publisher = event_publisher
async def analyze_and_post_business_rules_insights(
self,
tenant_id: str,
inventory_product_id: str,
sales_data: pd.DataFrame,
min_samples: int = 10
) -> Dict[str, Any]:
"""
Complete workflow: Analyze business rules and post insights.
Args:
tenant_id: Tenant identifier
inventory_product_id: Product identifier
sales_data: Historical sales data
min_samples: Minimum samples for rule analysis
Returns:
Workflow results with analysis and posted insights
"""
logger.info(
"Starting business rules analysis workflow",
tenant_id=tenant_id,
inventory_product_id=inventory_product_id,
samples=len(sales_data)
)
# Step 1: Learn and analyze rules
rules_results = await self.rules_engine.learn_all_rules(
tenant_id=tenant_id,
inventory_product_id=inventory_product_id,
sales_data=sales_data,
external_data=None,
min_samples=min_samples
)
logger.info(
"Business rules analysis complete",
insights_generated=len(rules_results.get('insights', [])),
rules_learned=len(rules_results.get('rules', {}))
)
# Step 2: Enrich insights with tenant_id and product context
enriched_insights = self._enrich_insights(
rules_results.get('insights', []),
tenant_id,
inventory_product_id
)
# Step 3: Post insights to AI Insights Service
if enriched_insights:
post_results = await self.ai_insights_client.create_insights_bulk(
tenant_id=UUID(tenant_id),
insights=enriched_insights
)
logger.info(
"Business rules insights posted to AI Insights Service",
inventory_product_id=inventory_product_id,
total=post_results['total'],
successful=post_results['successful'],
failed=post_results['failed']
)
else:
post_results = {'total': 0, 'successful': 0, 'failed': 0}
logger.info("No insights to post for product", inventory_product_id=inventory_product_id)
# Step 4: Publish insight events to RabbitMQ
created_insights = post_results.get('created_insights', [])
if created_insights:
product_context = {'inventory_product_id': inventory_product_id}
await self._publish_insight_events(
tenant_id=tenant_id,
insights=created_insights,
product_context=product_context
)
# Step 5: Return comprehensive results
return {
'tenant_id': tenant_id,
'inventory_product_id': inventory_product_id,
'learned_at': rules_results['learned_at'],
'rules': rules_results.get('rules', {}),
'insights_generated': len(enriched_insights),
'insights_posted': post_results['successful'],
'insights_failed': post_results['failed'],
'created_insights': post_results.get('created_insights', [])
}
def _enrich_insights(
self,
insights: List[Dict[str, Any]],
tenant_id: str,
inventory_product_id: str
) -> List[Dict[str, Any]]:
"""
Enrich insights with required fields for AI Insights Service.
Args:
insights: Raw insights from rules engine
tenant_id: Tenant identifier
inventory_product_id: Product identifier
Returns:
Enriched insights ready for posting
"""
enriched = []
for insight in insights:
# Add required tenant_id
enriched_insight = insight.copy()
enriched_insight['tenant_id'] = tenant_id
# Add product context to metrics
if 'metrics_json' not in enriched_insight:
enriched_insight['metrics_json'] = {}
enriched_insight['metrics_json']['inventory_product_id'] = inventory_product_id
# Add source metadata
enriched_insight['source_service'] = 'forecasting'
enriched_insight['source_model'] = 'dynamic_rules_engine'
enriched_insight['detected_at'] = datetime.utcnow().isoformat()
enriched.append(enriched_insight)
return enriched
async def analyze_all_business_rules(
self,
tenant_id: str,
products_data: Dict[str, pd.DataFrame],
min_samples: int = 10
) -> Dict[str, Any]:
"""
Analyze all products for business rules optimization and generate comparative insights.
Args:
tenant_id: Tenant identifier
products_data: Dict of {inventory_product_id: sales_data DataFrame}
min_samples: Minimum samples for rule analysis
Returns:
Comprehensive analysis with rule optimization insights
"""
logger.info(
"Analyzing business rules for all products",
tenant_id=tenant_id,
products=len(products_data)
)
all_results = []
total_insights_posted = 0
# Analyze each product
for inventory_product_id, sales_data in products_data.items():
try:
results = await self.analyze_and_post_business_rules_insights(
tenant_id=tenant_id,
inventory_product_id=inventory_product_id,
sales_data=sales_data,
min_samples=min_samples
)
all_results.append(results)
total_insights_posted += results['insights_posted']
except Exception as e:
logger.error(
"Error analyzing business rules for product",
inventory_product_id=inventory_product_id,
error=str(e)
)
# Generate summary insight
if total_insights_posted > 0:
summary_insight = self._generate_portfolio_summary_insight(
tenant_id, all_results
)
if summary_insight:
enriched_summary = self._enrich_insights(
[summary_insight], tenant_id, 'all_products'
)
post_results = await self.ai_insights_client.create_insights_bulk(
tenant_id=UUID(tenant_id),
insights=enriched_summary
)
total_insights_posted += post_results['successful']
logger.info(
"All business rules analysis complete",
tenant_id=tenant_id,
products_analyzed=len(all_results),
total_insights_posted=total_insights_posted
)
return {
'tenant_id': tenant_id,
'analyzed_at': datetime.utcnow().isoformat(),
'products_analyzed': len(all_results),
'product_results': all_results,
'total_insights_posted': total_insights_posted
}
def _generate_portfolio_summary_insight(
self,
tenant_id: str,
all_results: List[Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
"""
Generate portfolio-level business rules summary insight.
Args:
tenant_id: Tenant identifier
all_results: All product analysis results
Returns:
Summary insight or None
"""
if not all_results:
return None
# Calculate summary statistics
total_products = len(all_results)
total_rules = sum(len(r.get('rules', {})) for r in all_results)
# Count products with significant rule improvements
significant_improvements = sum(1 for r in all_results
if any('improvement' in str(v).lower() for v in r.get('rules', {}).values()))
return {
'type': 'recommendation',
'priority': 'high' if significant_improvements > total_products * 0.3 else 'medium',
'category': 'forecasting',
'title': f'Business Rule Optimization: {total_products} Products Analyzed',
'description': f'Learned {total_rules} dynamic rules across {total_products} products. Identified {significant_improvements} products with significant rule improvements.',
'impact_type': 'operational_efficiency',
'impact_value': total_rules,
'impact_unit': 'rules',
'confidence': 80,
'metrics_json': {
'total_products': total_products,
'total_rules': total_rules,
'significant_improvements': significant_improvements,
'rules_per_product': round(total_rules / total_products, 2)
},
'actionable': True,
'recommendation_actions': [
{
'label': 'Review Learned Rules',
'action': 'review_business_rules',
'params': {'tenant_id': tenant_id}
},
{
'label': 'Implement Optimized Rules',
'action': 'implement_business_rules',
'params': {'tenant_id': tenant_id}
}
],
'source_service': 'forecasting',
'source_model': 'dynamic_rules_engine'
}
async def get_learned_rules(
self,
inventory_product_id: str
) -> Optional[Dict[str, Any]]:
"""
Get cached learned rules for a product.
Args:
inventory_product_id: Product identifier
Returns:
Learned rules or None if not analyzed
"""
return self.rules_engine.get_all_rules(inventory_product_id)
async def _publish_insight_events(self, tenant_id, insights, product_context=None):
"""
Publish insight events to RabbitMQ for alert processing.
Args:
tenant_id: Tenant identifier
insights: List of created insights
product_context: Additional context about the product
"""
if not self.event_publisher:
logger.warning("No event publisher available for business rules insights")
return
for insight in insights:
# Determine severity based on confidence and priority
confidence = insight.get('confidence', 0)
priority = insight.get('priority', 'medium')
# Map priority to severity, with confidence as tiebreaker
if priority == 'critical' or (priority == 'high' and confidence >= 70):
severity = 'high'
elif priority == 'high' or (priority == 'medium' and confidence >= 80):
severity = 'medium'
else:
severity = 'low'
# Prepare the event data
event_data = {
'insight_id': insight.get('id'),
'type': insight.get('type'),
'title': insight.get('title'),
'description': insight.get('description'),
'category': insight.get('category'),
'priority': insight.get('priority'),
'confidence': confidence,
'recommendation': insight.get('recommendation_actions', []),
'impact_type': insight.get('impact_type'),
'impact_value': insight.get('impact_value'),
'inventory_product_id': product_context.get('inventory_product_id') if product_context else None,
'timestamp': insight.get('detected_at', datetime.utcnow().isoformat()),
'source_service': 'forecasting',
'source_model': 'dynamic_rules_engine'
}
try:
await self.event_publisher.publish_recommendation(
event_type='ai_business_rule',
tenant_id=tenant_id,
severity=severity,
data=event_data
)
logger.info(
"Published business rules insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
severity=severity
)
except Exception as e:
logger.error(
"Failed to publish business rules insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
error=str(e)
)
async def close(self):
"""Close HTTP client connections."""
await self.ai_insights_client.close()

View File

@@ -0,0 +1,403 @@
"""
Demand Insights Orchestrator
Coordinates demand forecasting analysis and insight posting
"""
import pandas as pd
from typing import Dict, List, Any, Optional
import structlog
from datetime import datetime
from uuid import UUID
import sys
import os
# Add shared clients to path
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
from shared.clients.ai_insights_client import AIInsightsClient
from shared.messaging import UnifiedEventPublisher
from app.ml.predictor import BakeryForecaster
logger = structlog.get_logger()
class DemandInsightsOrchestrator:
"""
Orchestrates demand forecasting analysis and insight generation workflow.
Workflow:
1. Analyze historical demand patterns from sales data
2. Generate insights for demand optimization
3. Post insights to AI Insights Service
4. Publish recommendation events to RabbitMQ
5. Provide demand pattern analysis for forecasting
6. Track demand forecasting performance
"""
def __init__(
self,
ai_insights_base_url: str = "http://ai-insights-service:8000",
event_publisher: Optional[UnifiedEventPublisher] = None
):
self.forecaster = BakeryForecaster()
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
self.event_publisher = event_publisher
async def analyze_and_post_demand_insights(
self,
tenant_id: str,
inventory_product_id: str,
sales_data: pd.DataFrame,
forecast_horizon_days: int = 30,
min_history_days: int = 90
) -> Dict[str, Any]:
"""
Complete workflow: Analyze demand and post insights.
Args:
tenant_id: Tenant identifier
inventory_product_id: Product identifier
sales_data: Historical sales data
forecast_horizon_days: Days to forecast ahead
min_history_days: Minimum days of history required
Returns:
Workflow results with analysis and posted insights
"""
logger.info(
"Starting demand forecasting analysis workflow",
tenant_id=tenant_id,
inventory_product_id=inventory_product_id,
history_days=len(sales_data)
)
# Step 1: Analyze demand patterns
analysis_results = await self.forecaster.analyze_demand_patterns(
tenant_id=tenant_id,
inventory_product_id=inventory_product_id,
sales_data=sales_data,
forecast_horizon_days=forecast_horizon_days,
min_history_days=min_history_days
)
logger.info(
"Demand analysis complete",
inventory_product_id=inventory_product_id,
insights_generated=len(analysis_results.get('insights', []))
)
# Step 2: Enrich insights with tenant_id and product context
enriched_insights = self._enrich_insights(
analysis_results.get('insights', []),
tenant_id,
inventory_product_id
)
# Step 3: Post insights to AI Insights Service
if enriched_insights:
post_results = await self.ai_insights_client.create_insights_bulk(
tenant_id=UUID(tenant_id),
insights=enriched_insights
)
logger.info(
"Demand insights posted to AI Insights Service",
inventory_product_id=inventory_product_id,
total=post_results['total'],
successful=post_results['successful'],
failed=post_results['failed']
)
else:
post_results = {'total': 0, 'successful': 0, 'failed': 0}
logger.info("No insights to post for product", inventory_product_id=inventory_product_id)
# Step 4: Publish insight events to RabbitMQ
created_insights = post_results.get('created_insights', [])
if created_insights:
product_context = {'inventory_product_id': inventory_product_id}
await self._publish_insight_events(
tenant_id=tenant_id,
insights=created_insights,
product_context=product_context
)
# Step 5: Return comprehensive results
return {
'tenant_id': tenant_id,
'inventory_product_id': inventory_product_id,
'analyzed_at': analysis_results['analyzed_at'],
'history_days': analysis_results['history_days'],
'demand_patterns': analysis_results.get('patterns', {}),
'trend_analysis': analysis_results.get('trend_analysis', {}),
'seasonal_factors': analysis_results.get('seasonal_factors', {}),
'insights_generated': len(enriched_insights),
'insights_posted': post_results['successful'],
'insights_failed': post_results['failed'],
'created_insights': post_results.get('created_insights', [])
}
def _enrich_insights(
self,
insights: List[Dict[str, Any]],
tenant_id: str,
inventory_product_id: str
) -> List[Dict[str, Any]]:
"""
Enrich insights with required fields for AI Insights Service.
Args:
insights: Raw insights from forecaster
tenant_id: Tenant identifier
inventory_product_id: Product identifier
Returns:
Enriched insights ready for posting
"""
enriched = []
for insight in insights:
# Add required tenant_id
enriched_insight = insight.copy()
enriched_insight['tenant_id'] = tenant_id
# Add product context to metrics
if 'metrics_json' not in enriched_insight:
enriched_insight['metrics_json'] = {}
enriched_insight['metrics_json']['inventory_product_id'] = inventory_product_id
# Add source metadata
enriched_insight['source_service'] = 'forecasting'
enriched_insight['source_model'] = 'demand_analyzer'
enriched_insight['detected_at'] = datetime.utcnow().isoformat()
enriched.append(enriched_insight)
return enriched
async def analyze_all_products(
self,
tenant_id: str,
products_data: Dict[str, pd.DataFrame],
forecast_horizon_days: int = 30,
min_history_days: int = 90
) -> Dict[str, Any]:
"""
Analyze all products for a tenant and generate comparative insights.
Args:
tenant_id: Tenant identifier
products_data: Dict of {inventory_product_id: sales_data DataFrame}
forecast_horizon_days: Days to forecast
min_history_days: Minimum history required
Returns:
Comprehensive analysis with product comparison
"""
logger.info(
"Analyzing all products for tenant",
tenant_id=tenant_id,
products=len(products_data)
)
all_results = []
total_insights_posted = 0
# Analyze each product
for inventory_product_id, sales_data in products_data.items():
try:
results = await self.analyze_and_post_demand_insights(
tenant_id=tenant_id,
inventory_product_id=inventory_product_id,
sales_data=sales_data,
forecast_horizon_days=forecast_horizon_days,
min_history_days=min_history_days
)
all_results.append(results)
total_insights_posted += results['insights_posted']
except Exception as e:
logger.error(
"Error analyzing product",
inventory_product_id=inventory_product_id,
error=str(e)
)
# Generate summary insight
if total_insights_posted > 0:
summary_insight = self._generate_portfolio_summary_insight(
tenant_id, all_results
)
if summary_insight:
enriched_summary = self._enrich_insights(
[summary_insight], tenant_id, 'all_products'
)
post_results = await self.ai_insights_client.create_insights_bulk(
tenant_id=UUID(tenant_id),
insights=enriched_summary
)
total_insights_posted += post_results['successful']
logger.info(
"All products analysis complete",
tenant_id=tenant_id,
products_analyzed=len(all_results),
total_insights_posted=total_insights_posted
)
return {
'tenant_id': tenant_id,
'analyzed_at': datetime.utcnow().isoformat(),
'products_analyzed': len(all_results),
'product_results': all_results,
'total_insights_posted': total_insights_posted
}
def _generate_portfolio_summary_insight(
self,
tenant_id: str,
all_results: List[Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
"""
Generate portfolio-level summary insight.
Args:
tenant_id: Tenant identifier
all_results: All product analysis results
Returns:
Summary insight or None
"""
if not all_results:
return None
# Calculate summary statistics
total_products = len(all_results)
high_demand_products = sum(1 for r in all_results if r.get('trend_analysis', {}).get('is_increasing', False))
avg_seasonal_factor = sum(
r.get('seasonal_factors', {}).get('peak_ratio', 1.0)
for r in all_results
if r.get('seasonal_factors', {}).get('peak_ratio')
) / max(1, len(all_results))
return {
'type': 'recommendation',
'priority': 'medium' if high_demand_products > total_products * 0.5 else 'low',
'category': 'forecasting',
'title': f'Demand Pattern Summary: {total_products} Products Analyzed',
'description': f'Detected {high_demand_products} products with increasing demand trends. Average seasonal peak ratio: {avg_seasonal_factor:.2f}x.',
'impact_type': 'demand_optimization',
'impact_value': high_demand_products,
'impact_unit': 'products',
'confidence': 75,
'metrics_json': {
'total_products': total_products,
'high_demand_products': high_demand_products,
'avg_seasonal_factor': round(avg_seasonal_factor, 2),
'trend_strength': 'strong' if high_demand_products > total_products * 0.7 else 'moderate'
},
'actionable': True,
'recommendation_actions': [
{
'label': 'Review Production Schedule',
'action': 'review_production_schedule',
'params': {'tenant_id': tenant_id}
},
{
'label': 'Adjust Inventory Levels',
'action': 'adjust_inventory_levels',
'params': {'tenant_id': tenant_id}
}
],
'source_service': 'forecasting',
'source_model': 'demand_analyzer'
}
async def get_demand_patterns(
self,
inventory_product_id: str
) -> Optional[Dict[str, Any]]:
"""
Get cached demand patterns for a product.
Args:
inventory_product_id: Product identifier
Returns:
Demand patterns or None if not analyzed
"""
return self.forecaster.get_cached_demand_patterns(inventory_product_id)
async def _publish_insight_events(self, tenant_id, insights, product_context=None):
"""
Publish insight events to RabbitMQ for alert processing.
Args:
tenant_id: Tenant identifier
insights: List of created insights
product_context: Additional context about the product
"""
if not self.event_publisher:
logger.warning("No event publisher available for demand insights")
return
for insight in insights:
# Determine severity based on confidence and priority
confidence = insight.get('confidence', 0)
priority = insight.get('priority', 'medium')
# Map priority to severity, with confidence as tiebreaker
if priority == 'critical' or (priority == 'high' and confidence >= 70):
severity = 'high'
elif priority == 'high' or (priority == 'medium' and confidence >= 80):
severity = 'medium'
else:
severity = 'low'
# Prepare the event data
event_data = {
'insight_id': insight.get('id'),
'type': insight.get('type'),
'title': insight.get('title'),
'description': insight.get('description'),
'category': insight.get('category'),
'priority': insight.get('priority'),
'confidence': confidence,
'recommendation': insight.get('recommendation_actions', []),
'impact_type': insight.get('impact_type'),
'impact_value': insight.get('impact_value'),
'inventory_product_id': product_context.get('inventory_product_id') if product_context else None,
'timestamp': insight.get('detected_at', datetime.utcnow().isoformat()),
'source_service': 'forecasting',
'source_model': 'demand_analyzer'
}
try:
await self.event_publisher.publish_recommendation(
event_type='ai_demand_forecast',
tenant_id=tenant_id,
severity=severity,
data=event_data
)
logger.info(
"Published demand insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
severity=severity
)
except Exception as e:
logger.error(
"Failed to publish demand insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
error=str(e)
)
async def close(self):
"""Close HTTP client connections."""
await self.ai_insights_client.close()

View File

@@ -11,6 +11,7 @@ from uuid import UUID
from app.ml.dynamic_rules_engine import DynamicRulesEngine
from app.clients.ai_insights_client import AIInsightsClient
from shared.messaging import UnifiedEventPublisher
logger = structlog.get_logger()
@@ -29,10 +30,12 @@ class RulesOrchestrator:
def __init__(
self,
ai_insights_base_url: str = "http://ai-insights-service:8000"
ai_insights_base_url: str = "http://ai-insights-service:8000",
event_publisher: Optional[UnifiedEventPublisher] = None
):
self.rules_engine = DynamicRulesEngine()
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
self.event_publisher = event_publisher
async def learn_and_post_rules(
self,
@@ -100,7 +103,17 @@ class RulesOrchestrator:
post_results = {'total': 0, 'successful': 0, 'failed': 0}
logger.info("No insights to post")
# Step 4: Return comprehensive results
# Step 4: Publish insight events to RabbitMQ
created_insights = post_results.get('created_insights', [])
if created_insights:
product_context = {'inventory_product_id': inventory_product_id}
await self._publish_insight_events(
tenant_id=tenant_id,
insights=created_insights,
product_context=product_context
)
# Step 5: Return comprehensive results
return {
'tenant_id': tenant_id,
'inventory_product_id': inventory_product_id,
@@ -229,6 +242,71 @@ class RulesOrchestrator:
return results
async def _publish_insight_events(self, tenant_id, insights, product_context=None):
"""
Publish insight events to RabbitMQ for alert processing.
Args:
tenant_id: Tenant identifier
insights: List of created insights
product_context: Additional context about the product
"""
if not self.event_publisher:
logger.warning("No event publisher available for business rules insights")
return
for insight in insights:
# Determine severity based on confidence and priority
confidence = insight.get('confidence', 0)
priority = insight.get('priority', 'medium')
# Map priority to severity, with confidence as tiebreaker
if priority == 'critical' or (priority == 'high' and confidence >= 70):
severity = 'high'
elif priority == 'high' or (priority == 'medium' and confidence >= 80):
severity = 'medium'
else:
severity = 'low'
# Prepare the event data
event_data = {
'insight_id': insight.get('id'),
'type': insight.get('type'),
'title': insight.get('title'),
'description': insight.get('description'),
'category': insight.get('category'),
'priority': insight.get('priority'),
'confidence': confidence,
'recommendation': insight.get('recommendation_actions', []),
'impact_type': insight.get('impact_type'),
'impact_value': insight.get('impact_value'),
'inventory_product_id': product_context.get('inventory_product_id') if product_context else None,
'timestamp': insight.get('detected_at', datetime.utcnow().isoformat()),
'source_service': 'forecasting',
'source_model': 'dynamic_rules_engine'
}
try:
await self.event_publisher.publish_recommendation(
event_type='ai_business_rule',
tenant_id=tenant_id,
severity=severity,
data=event_data
)
logger.info(
"Published business rules insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
severity=severity
)
except Exception as e:
logger.error(
"Failed to publish business rules insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
error=str(e)
)
async def close(self):
"""Close HTTP client connections."""
await self.ai_insights_client.close()

View File

@@ -7,7 +7,7 @@ Provides endpoints to trigger ML insight generation for:
- Demand pattern analysis
"""
from fastapi import APIRouter, Depends, HTTPException
from fastapi import APIRouter, Depends, HTTPException, Request
from pydantic import BaseModel, Field
from typing import Optional, List
from uuid import UUID
@@ -71,6 +71,7 @@ class SafetyStockOptimizationResponse(BaseModel):
async def trigger_safety_stock_optimization(
tenant_id: str,
request_data: SafetyStockOptimizationRequest,
request: Request,
db: AsyncSession = Depends(get_db)
):
"""
@@ -81,10 +82,12 @@ async def trigger_safety_stock_optimization(
2. Runs the SafetyStockInsightsOrchestrator to optimize levels
3. Generates insights about safety stock recommendations
4. Posts insights to AI Insights Service
5. Publishes recommendation events to RabbitMQ
Args:
tenant_id: Tenant UUID
request_data: Optimization parameters
request: FastAPI request (for app state access)
db: Database session
Returns:
@@ -103,8 +106,13 @@ async def trigger_safety_stock_optimization(
from app.models.inventory import Ingredient
from sqlalchemy import select
# Get event publisher from app state (if available)
event_publisher = getattr(request.app.state, 'event_publisher', None) if hasattr(request, 'app') else None
# Initialize orchestrator
orchestrator = SafetyStockInsightsOrchestrator()
orchestrator = SafetyStockInsightsOrchestrator(
event_publisher=event_publisher
)
# Get products to optimize
if request_data.product_ids:
@@ -378,6 +386,7 @@ async def generate_safety_stock_insights_internal(
result = await trigger_safety_stock_optimization(
tenant_id=tenant_id,
request_data=request_data,
request=request,
db=db
)

View File

@@ -126,6 +126,7 @@ class InventoryService(StandardFastAPIService):
# Store services in app state
app.state.alert_service = alert_service
app.state.inventory_scheduler = inventory_scheduler # Store scheduler for manual triggering
app.state.event_publisher = self.event_publisher # Store event publisher for ML insights
else:
self.logger.error("Event publisher not initialized, alert service unavailable")

View File

@@ -14,6 +14,7 @@ import os
# Add shared clients to path
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
from shared.clients.ai_insights_client import AIInsightsClient
from shared.messaging import UnifiedEventPublisher
from app.ml.safety_stock_optimizer import SafetyStockOptimizer
@@ -28,15 +29,18 @@ class SafetyStockInsightsOrchestrator:
1. Optimize safety stock from demand history and cost parameters
2. Generate insights comparing optimal vs hardcoded approach
3. Post insights to AI Insights Service
4. Provide optimized safety stock levels for inventory management
4. Publish recommendation events to RabbitMQ
5. Provide optimized safety stock levels for inventory management
"""
def __init__(
self,
ai_insights_base_url: str = "http://ai-insights-service:8000"
ai_insights_base_url: str = "http://ai-insights-service:8000",
event_publisher: Optional[UnifiedEventPublisher] = None
):
self.optimizer = SafetyStockOptimizer()
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
self.event_publisher = event_publisher
async def optimize_and_post_insights(
self,
@@ -109,6 +113,17 @@ class SafetyStockInsightsOrchestrator:
successful=post_results['successful'],
failed=post_results['failed']
)
# Step 4: Publish recommendation events to RabbitMQ
created_insights = post_results.get('created_insights', [])
if created_insights:
product_context = product_characteristics.copy() if product_characteristics else {}
product_context['inventory_product_id'] = inventory_product_id
await self._publish_insight_events(
tenant_id=tenant_id,
insights=created_insights,
product_context=product_context
)
else:
post_results = {'total': 0, 'successful': 0, 'failed': 0}
logger.info("No insights to post for product", inventory_product_id=inventory_product_id)
@@ -167,6 +182,84 @@ class SafetyStockInsightsOrchestrator:
return enriched
async def _publish_insight_events(
self,
tenant_id: str,
insights: List[Dict[str, Any]],
product_context: Optional[Dict[str, Any]] = None
) -> None:
"""
Publish recommendation events to RabbitMQ for each insight.
Args:
tenant_id: Tenant identifier
insights: List of created insights (with insight_id from AI Insights Service)
product_context: Optional product context (name, id, etc.)
"""
if not self.event_publisher:
logger.warning("Event publisher not configured, skipping event publication")
return
for insight in insights:
try:
# Determine severity based on confidence and priority
confidence = insight.get('confidence', 0)
priority = insight.get('priority', 'medium')
if priority == 'urgent' or confidence >= 90:
severity = 'urgent'
elif priority == 'high' or confidence >= 70:
severity = 'high'
elif priority == 'medium' or confidence >= 50:
severity = 'medium'
else:
severity = 'low'
# Build event metadata
event_metadata = {
'insight_id': insight.get('id'),
'insight_type': insight.get('insight_type'),
'inventory_product_id': insight.get('metrics_json', {}).get('inventory_product_id'),
'ingredient_name': product_context.get('ingredient_name') if product_context else None,
'suggested_safety_stock': insight.get('metrics_json', {}).get('suggested_safety_stock'),
'current_safety_stock': insight.get('metrics_json', {}).get('current_safety_stock'),
'estimated_savings': insight.get('impact_value'),
'confidence': confidence,
'recommendation': insight.get('recommendation'),
'impact_type': insight.get('impact_type'),
'source_service': 'inventory',
'source_model': 'safety_stock_optimizer'
}
# Remove None values
event_metadata = {k: v for k, v in event_metadata.items() if v is not None}
# Publish recommendation event
await self.event_publisher.publish_recommendation(
event_type='ai_safety_stock_optimization',
tenant_id=tenant_id,
severity=severity,
data=event_metadata
)
logger.info(
"Published safety stock insight recommendation event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
insight_type=insight.get('insight_type'),
severity=severity
)
except Exception as e:
logger.error(
"Failed to publish insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
error=str(e),
exc_info=True
)
# Don't raise - we don't want to fail the whole workflow if event publishing fails
async def optimize_all_products(
self,
tenant_id: str,

View File

@@ -92,8 +92,8 @@ async def load_fixture_data_for_tenant(
return 0
# Parse and adjust dates from fixture to reference_time
base_started_at = resolve_time_marker(orchestration_run_data.get("started_at"))
base_completed_at = resolve_time_marker(orchestration_run_data.get("completed_at"))
base_started_at = resolve_time_marker(orchestration_run_data.get("started_at"), reference_time)
base_completed_at = resolve_time_marker(orchestration_run_data.get("completed_at"), reference_time)
# Adjust dates to make them appear recent relative to session creation
started_at = adjust_date_for_demo(base_started_at, reference_time) if base_started_at else reference_time - timedelta(hours=2)

View File

@@ -6,7 +6,7 @@ Provides endpoints to trigger ML insight generation for:
- Price forecasting and timing recommendations
"""
from fastapi import APIRouter, Depends, HTTPException
from fastapi import APIRouter, Depends, HTTPException, Request
from pydantic import BaseModel, Field
from typing import Optional, List
from uuid import UUID
@@ -108,6 +108,7 @@ class PriceForecastResponse(BaseModel):
async def trigger_supplier_analysis(
tenant_id: str,
request_data: SupplierAnalysisRequest,
request: Request,
db: AsyncSession = Depends(get_db)
):
"""
@@ -142,8 +143,11 @@ async def trigger_supplier_analysis(
from app.core.config import settings
from sqlalchemy import select
# Get event publisher from app state
event_publisher = getattr(request.app.state, 'event_publisher', None)
# Initialize orchestrator and clients
orchestrator = SupplierInsightsOrchestrator()
orchestrator = SupplierInsightsOrchestrator(event_publisher=event_publisher)
suppliers_client = SuppliersServiceClient(settings)
# Get suppliers to analyze from suppliers service via API
@@ -319,6 +323,7 @@ async def trigger_supplier_analysis(
async def trigger_price_forecasting(
tenant_id: str,
request_data: PriceForecastRequest,
request: Request,
db: AsyncSession = Depends(get_db)
):
"""
@@ -353,8 +358,11 @@ async def trigger_price_forecasting(
from app.core.config import settings
from sqlalchemy import select
# Get event publisher from app state
event_publisher = getattr(request.app.state, 'event_publisher', None)
# Initialize orchestrator and inventory client
orchestrator = PriceInsightsOrchestrator()
orchestrator = PriceInsightsOrchestrator(event_publisher=event_publisher)
inventory_client = InventoryServiceClient(settings)
# Get ingredients to forecast from inventory service via API
@@ -594,6 +602,7 @@ async def generate_price_insights_internal(
result = await trigger_price_forecasting(
tenant_id=tenant_id,
request_data=request_data,
request=request,
db=db
)

View File

@@ -107,6 +107,7 @@ class ProcurementService(StandardFastAPIService):
# Store in app state for internal API access
app.state.delivery_tracking_service = self.delivery_tracking_service
app.state.event_publisher = self.event_publisher
# Start overdue PO scheduler
if self.rabbitmq_client and self.rabbitmq_client.connected:

View File

@@ -14,6 +14,7 @@ import os
# Add shared clients to path
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
from shared.clients.ai_insights_client import AIInsightsClient
from shared.messaging import UnifiedEventPublisher
from app.ml.price_forecaster import PriceForecaster
@@ -33,10 +34,12 @@ class PriceInsightsOrchestrator:
def __init__(
self,
ai_insights_base_url: str = "http://ai-insights-service:8000"
ai_insights_base_url: str = "http://ai-insights-service:8000",
event_publisher: Optional[UnifiedEventPublisher] = None
):
self.forecaster = PriceForecaster()
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
self.event_publisher = event_publisher
async def forecast_and_post_insights(
self,
@@ -107,7 +110,17 @@ class PriceInsightsOrchestrator:
post_results = {'total': 0, 'successful': 0, 'failed': 0}
logger.info("No insights to post for ingredient", ingredient_id=ingredient_id)
# Step 4: Return comprehensive results
# Step 4: Publish insight events to RabbitMQ
created_insights = post_results.get('created_insights', [])
if created_insights:
ingredient_context = {'ingredient_id': ingredient_id}
await self._publish_insight_events(
tenant_id=tenant_id,
insights=created_insights,
ingredient_context=ingredient_context
)
# Step 5: Return comprehensive results
return {
'tenant_id': tenant_id,
'ingredient_id': ingredient_id,
@@ -261,6 +274,71 @@ class PriceInsightsOrchestrator:
'bulk_opportunity_count': bulk_opportunity_count
}
async def _publish_insight_events(self, tenant_id, insights, ingredient_context=None):
"""
Publish insight events to RabbitMQ for alert processing.
Args:
tenant_id: Tenant identifier
insights: List of created insights
ingredient_context: Additional context about the ingredient
"""
if not self.event_publisher:
logger.warning("No event publisher available for price insights")
return
for insight in insights:
# Determine severity based on confidence and priority
confidence = insight.get('confidence', 0)
priority = insight.get('priority', 'medium')
# Map priority to severity, with confidence as tiebreaker
if priority == 'critical' or (priority == 'high' and confidence >= 70):
severity = 'high'
elif priority == 'high' or (priority == 'medium' and confidence >= 80):
severity = 'medium'
else:
severity = 'low'
# Prepare the event data
event_data = {
'insight_id': insight.get('id'),
'type': insight.get('type'),
'title': insight.get('title'),
'description': insight.get('description'),
'category': insight.get('category'),
'priority': insight.get('priority'),
'confidence': confidence,
'recommendation': insight.get('recommendation_actions', []),
'impact_type': insight.get('impact_type'),
'impact_value': insight.get('impact_value'),
'ingredient_id': ingredient_context.get('ingredient_id') if ingredient_context else None,
'timestamp': insight.get('detected_at', datetime.utcnow().isoformat()),
'source_service': 'procurement',
'source_model': 'price_forecaster'
}
try:
await self.event_publisher.publish_recommendation(
event_type='ai_price_forecast',
tenant_id=tenant_id,
severity=severity,
data=event_data
)
logger.info(
"Published price insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
severity=severity
)
except Exception as e:
logger.error(
"Failed to publish price insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
error=str(e)
)
def _generate_portfolio_summary_insight(
self,
tenant_id: str,

View File

@@ -14,6 +14,7 @@ import os
# Add shared clients to path
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
from shared.clients.ai_insights_client import AIInsightsClient
from shared.messaging import UnifiedEventPublisher
from app.ml.supplier_performance_predictor import SupplierPerformancePredictor
@@ -28,16 +29,19 @@ class SupplierInsightsOrchestrator:
1. Analyze supplier performance from historical orders
2. Generate insights for procurement risk management
3. Post insights to AI Insights Service
4. Provide supplier comparison and recommendations
5. Track supplier reliability scores
4. Publish recommendation events to RabbitMQ
5. Provide supplier comparison and recommendations
6. Track supplier reliability scores
"""
def __init__(
self,
ai_insights_base_url: str = "http://ai-insights-service:8000"
ai_insights_base_url: str = "http://ai-insights-service:8000",
event_publisher: Optional[UnifiedEventPublisher] = None
):
self.predictor = SupplierPerformancePredictor()
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
self.event_publisher = event_publisher
async def analyze_and_post_supplier_insights(
self,
@@ -105,7 +109,17 @@ class SupplierInsightsOrchestrator:
post_results = {'total': 0, 'successful': 0, 'failed': 0}
logger.info("No insights to post for supplier", supplier_id=supplier_id)
# Step 4: Return comprehensive results
# Step 4: Publish insight events to RabbitMQ
created_insights = post_results.get('created_insights', [])
if created_insights:
supplier_context = {'supplier_id': supplier_id}
await self._publish_insight_events(
tenant_id=tenant_id,
insights=created_insights,
supplier_context=supplier_context
)
# Step 5: Return comprehensive results
return {
'tenant_id': tenant_id,
'supplier_id': supplier_id,
@@ -159,6 +173,71 @@ class SupplierInsightsOrchestrator:
return enriched
async def _publish_insight_events(self, tenant_id, insights, supplier_context=None):
"""
Publish insight events to RabbitMQ for alert processing.
Args:
tenant_id: Tenant identifier
insights: List of created insights
supplier_context: Additional context about the supplier
"""
if not self.event_publisher:
logger.warning("No event publisher available for supplier insights")
return
for insight in insights:
# Determine severity based on confidence and priority
confidence = insight.get('confidence', 0)
priority = insight.get('priority', 'medium')
# Map priority to severity, with confidence as tiebreaker
if priority == 'critical' or (priority == 'high' and confidence >= 70):
severity = 'high'
elif priority == 'high' or (priority == 'medium' and confidence >= 80):
severity = 'medium'
else:
severity = 'low'
# Prepare the event data
event_data = {
'insight_id': insight.get('id'),
'type': insight.get('type'),
'title': insight.get('title'),
'description': insight.get('description'),
'category': insight.get('category'),
'priority': insight.get('priority'),
'confidence': confidence,
'recommendation': insight.get('recommendation_actions', []),
'impact_type': insight.get('impact_type'),
'impact_value': insight.get('impact_value'),
'supplier_id': supplier_context.get('supplier_id') if supplier_context else None,
'timestamp': insight.get('detected_at', datetime.utcnow().isoformat()),
'source_service': 'procurement',
'source_model': 'supplier_performance_predictor'
}
try:
await self.event_publisher.publish_recommendation(
event_type='ai_supplier_recommendation',
tenant_id=tenant_id,
severity=severity,
data=event_data
)
logger.info(
"Published supplier insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
severity=severity
)
except Exception as e:
logger.error(
"Failed to publish supplier insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
error=str(e)
)
async def analyze_all_suppliers(
self,
tenant_id: str,

View File

@@ -358,13 +358,66 @@ async def clone_demo_data(
except KeyError:
process_stage_value = None
# Transform foreign key references (product_id, recipe_id, order_id, forecast_id)
transformed_product_id = None
if batch_data.get('product_id'):
try:
transformed_product_id = str(transform_id(batch_data['product_id'], virtual_uuid))
except (ValueError, Exception) as e:
logger.warning("Failed to transform product_id",
product_id=batch_data.get('product_id'),
error=str(e))
transformed_recipe_id = None
if batch_data.get('recipe_id'):
try:
transformed_recipe_id = str(transform_id(batch_data['recipe_id'], virtual_uuid))
except (ValueError, Exception) as e:
logger.warning("Failed to transform recipe_id",
recipe_id=batch_data.get('recipe_id'),
error=str(e))
transformed_order_id = None
if batch_data.get('order_id'):
try:
transformed_order_id = str(transform_id(batch_data['order_id'], virtual_uuid))
except (ValueError, Exception) as e:
logger.warning("Failed to transform order_id",
order_id=batch_data.get('order_id'),
error=str(e))
transformed_forecast_id = None
if batch_data.get('forecast_id'):
try:
transformed_forecast_id = str(transform_id(batch_data['forecast_id'], virtual_uuid))
except (ValueError, Exception) as e:
logger.warning("Failed to transform forecast_id",
forecast_id=batch_data.get('forecast_id'),
error=str(e))
# Transform equipment_used array
transformed_equipment = []
if batch_data.get('equipment_used'):
for equip_id in batch_data['equipment_used']:
try:
transformed_equipment.append(str(transform_id(equip_id, virtual_uuid)))
except (ValueError, Exception) as e:
logger.warning("Failed to transform equipment_id",
equipment_id=equip_id,
error=str(e))
# staff_assigned contains user IDs - these should NOT be transformed
# because they reference actual user accounts which are NOT cloned
# The demo uses the same user accounts across all virtual tenants
staff_assigned = batch_data.get('staff_assigned', [])
new_batch = ProductionBatch(
id=str(transformed_id),
tenant_id=virtual_uuid,
batch_number=f"{session_id[:8]}-{batch_data.get('batch_number', f'BATCH-{uuid.uuid4().hex[:8].upper()}')}",
product_id=batch_data.get('product_id'),
product_id=transformed_product_id,
product_name=batch_data.get('product_name'),
recipe_id=batch_data.get('recipe_id'),
recipe_id=transformed_recipe_id,
planned_start_time=adjusted_planned_start,
planned_end_time=adjusted_planned_end,
planned_quantity=batch_data.get('planned_quantity'),
@@ -389,11 +442,11 @@ async def clone_demo_data(
waste_quantity=batch_data.get('waste_quantity'),
defect_quantity=batch_data.get('defect_quantity'),
waste_defect_type=batch_data.get('waste_defect_type'),
equipment_used=batch_data.get('equipment_used'),
staff_assigned=batch_data.get('staff_assigned'),
equipment_used=transformed_equipment,
staff_assigned=staff_assigned,
station_id=batch_data.get('station_id'),
order_id=batch_data.get('order_id'),
forecast_id=batch_data.get('forecast_id'),
order_id=transformed_order_id,
forecast_id=transformed_forecast_id,
is_rush_order=batch_data.get('is_rush_order', False),
is_special_recipe=batch_data.get('is_special_recipe', False),
is_ai_assisted=batch_data.get('is_ai_assisted', False),

View File

@@ -7,7 +7,7 @@ Provides endpoints to trigger ML insight generation for:
- Process efficiency analysis
"""
from fastapi import APIRouter, Depends, HTTPException
from fastapi import APIRouter, Depends, HTTPException, Request
from pydantic import BaseModel, Field
from typing import Optional, List
from uuid import UUID
@@ -71,6 +71,7 @@ class YieldPredictionResponse(BaseModel):
async def trigger_yield_prediction(
tenant_id: str,
request_data: YieldPredictionRequest,
request: Request,
db: AsyncSession = Depends(get_db)
):
"""
@@ -81,10 +82,12 @@ async def trigger_yield_prediction(
2. Runs the YieldInsightsOrchestrator to predict yields
3. Generates insights about yield optimization opportunities
4. Posts insights to AI Insights Service
5. Publishes recommendation events to RabbitMQ
Args:
tenant_id: Tenant UUID
request_data: Prediction parameters
request: FastAPI request (for app state access)
db: Database session
Returns:
@@ -103,8 +106,13 @@ async def trigger_yield_prediction(
from shared.clients.recipes_client import RecipesServiceClient
from app.core.config import settings
# Get event publisher from app state (if available)
event_publisher = getattr(request.app.state, 'event_publisher', None) if hasattr(request, 'app') else None
# Initialize orchestrator and recipes client
orchestrator = YieldInsightsOrchestrator()
orchestrator = YieldInsightsOrchestrator(
event_publisher=event_publisher
)
recipes_client = RecipesServiceClient(settings)
# Get recipes to analyze from recipes service via API
@@ -186,12 +194,18 @@ async def trigger_yield_prediction(
continue # Skip batches without complete data
production_data.append({
'production_date': batch.actual_start_time,
'production_run_id': str(batch.id), # Required: unique identifier for each production run
'recipe_id': str(batch.recipe_id), # Required: recipe identifier
'started_at': batch.actual_start_time,
'completed_at': batch.actual_end_time, # Optional but useful for duration analysis
'batch_size': float(batch.planned_quantity), # Use planned_quantity as batch_size
'planned_quantity': float(batch.planned_quantity),
'actual_quantity': float(batch.actual_quantity),
'yield_percentage': yield_pct,
'worker_id': batch.notes or 'unknown', # Use notes field or default
'batch_number': batch.batch_number
'staff_assigned': batch.staff_assigned if batch.staff_assigned else ['unknown'],
'batch_number': batch.batch_number,
'equipment_id': batch.equipment_used[0] if batch.equipment_used and len(batch.equipment_used) > 0 else None,
'notes': batch.quality_notes # Optional quality notes
})
if not production_data:
@@ -202,6 +216,14 @@ async def trigger_yield_prediction(
production_history = pd.DataFrame(production_data)
# Debug: Log DataFrame columns and sample data
logger.debug(
"Production history DataFrame created",
recipe_id=recipe_id,
columns=list(production_history.columns),
sample_data=production_history.head(1).to_dict('records') if len(production_history) > 0 else None
)
# Run yield analysis
results = await orchestrator.analyze_and_post_insights(
tenant_id=tenant_id,
@@ -291,8 +313,6 @@ async def ml_insights_health():
# INTERNAL ENDPOINTS (for demo-session service)
# ================================================================
from fastapi import Request
# Create a separate router for internal endpoints to avoid the tenant prefix
internal_router = APIRouter(
tags=["ML Insights - Internal"]
@@ -347,6 +367,7 @@ async def generate_yield_insights_internal(
result = await trigger_yield_prediction(
tenant_id=tenant_id,
request_data=request_data,
request=request,
db=db
)

View File

@@ -142,6 +142,7 @@ class ProductionService(StandardFastAPIService):
app.state.production_alert_service = self.alert_service # Also store with this name for internal trigger
app.state.notification_service = self.notification_service # Notification service for state change events
app.state.production_scheduler = self.production_scheduler # Store scheduler for manual triggering
app.state.event_publisher = self.event_publisher # Store event publisher for ML insights
async def on_shutdown(self, app: FastAPI):
"""Custom shutdown logic for production service"""

View File

@@ -14,6 +14,7 @@ import os
# Add shared clients to path
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
from shared.clients.ai_insights_client import AIInsightsClient
from shared.messaging import UnifiedEventPublisher
from app.ml.yield_predictor import YieldPredictor
@@ -28,15 +29,18 @@ class YieldInsightsOrchestrator:
1. Predict yield for upcoming production run or analyze historical performance
2. Generate insights for yield optimization opportunities
3. Post insights to AI Insights Service
4. Provide yield predictions for production planning
4. Publish recommendation events to RabbitMQ
5. Provide yield predictions for production planning
"""
def __init__(
self,
ai_insights_base_url: str = "http://ai-insights-service:8000"
ai_insights_base_url: str = "http://ai-insights-service:8000",
event_publisher: Optional[UnifiedEventPublisher] = None
):
self.predictor = YieldPredictor()
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
self.event_publisher = event_publisher
async def predict_and_post_insights(
self,
@@ -54,7 +58,7 @@ class YieldInsightsOrchestrator:
recipe_id: Recipe identifier
production_history: Historical production runs
production_context: Upcoming production context:
- worker_id
- staff_assigned (list of staff IDs)
- planned_start_time
- batch_size
- planned_quantity
@@ -109,6 +113,17 @@ class YieldInsightsOrchestrator:
successful=post_results['successful'],
failed=post_results['failed']
)
# Step 4: Publish recommendation events to RabbitMQ
created_insights = post_results.get('created_insights', [])
if created_insights:
recipe_context = production_context.copy() if production_context else {}
recipe_context['recipe_id'] = recipe_id
await self._publish_insight_events(
tenant_id=tenant_id,
insights=created_insights,
recipe_context=recipe_context
)
else:
post_results = {'total': 0, 'successful': 0, 'failed': 0}
logger.info("No insights to post for recipe", recipe_id=recipe_id)
@@ -193,6 +208,15 @@ class YieldInsightsOrchestrator:
total=post_results['total'],
successful=post_results['successful']
)
# Step 4: Publish recommendation events to RabbitMQ
created_insights = post_results.get('created_insights', [])
if created_insights:
await self._publish_insight_events(
tenant_id=tenant_id,
insights=created_insights,
recipe_context={'recipe_id': recipe_id}
)
else:
post_results = {'total': 0, 'successful': 0, 'failed': 0}
@@ -248,6 +272,83 @@ class YieldInsightsOrchestrator:
return enriched
async def _publish_insight_events(
self,
tenant_id: str,
insights: List[Dict[str, Any]],
recipe_context: Optional[Dict[str, Any]] = None
) -> None:
"""
Publish recommendation events to RabbitMQ for each insight.
Args:
tenant_id: Tenant identifier
insights: List of created insights (with insight_id from AI Insights Service)
recipe_context: Optional recipe context (name, id, etc.)
"""
if not self.event_publisher:
logger.warning("Event publisher not configured, skipping event publication")
return
for insight in insights:
try:
# Determine severity based on confidence and priority
confidence = insight.get('confidence', 0)
priority = insight.get('priority', 'medium')
if priority == 'urgent' or confidence >= 90:
severity = 'urgent'
elif priority == 'high' or confidence >= 70:
severity = 'high'
elif priority == 'medium' or confidence >= 50:
severity = 'medium'
else:
severity = 'low'
# Build event metadata
event_metadata = {
'insight_id': insight.get('id'), # From AI Insights Service response
'insight_type': insight.get('insight_type'),
'recipe_id': insight.get('metrics_json', {}).get('recipe_id'),
'recipe_name': recipe_context.get('recipe_name') if recipe_context else None,
'predicted_yield': insight.get('metrics_json', {}).get('predicted_yield'),
'confidence': confidence,
'recommendation': insight.get('recommendation'),
'impact_type': insight.get('impact_type'),
'impact_value': insight.get('impact_value'),
'source_service': 'production',
'source_model': 'yield_predictor'
}
# Remove None values
event_metadata = {k: v for k, v in event_metadata.items() if v is not None}
# Publish recommendation event
await self.event_publisher.publish_recommendation(
event_type='ai_yield_prediction',
tenant_id=tenant_id,
severity=severity,
data=event_metadata
)
logger.info(
"Published yield insight recommendation event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
insight_type=insight.get('insight_type'),
severity=severity
)
except Exception as e:
logger.error(
"Failed to publish insight event",
tenant_id=tenant_id,
insight_id=insight.get('id'),
error=str(e),
exc_info=True
)
# Don't raise - we don't want to fail the whole workflow if event publishing fails
async def analyze_all_recipes(
self,
tenant_id: str,

View File

@@ -62,14 +62,14 @@ class YieldPredictor:
- planned_quantity
- actual_quantity
- yield_percentage
- worker_id
- staff_assigned (list of staff IDs)
- started_at
- completed_at
- batch_size
- equipment_id (optional)
- notes (optional)
production_context: Upcoming production context:
- worker_id
- staff_assigned (list of staff IDs)
- planned_start_time
- batch_size
- equipment_id (optional)
@@ -212,6 +212,9 @@ class YieldPredictor:
df['is_small_batch'] = (df['batch_size'] < df['batch_size'].quantile(0.25)).astype(int)
# Worker experience features (proxy: number of previous runs)
# Extract first worker from staff_assigned list
df['worker_id'] = df['staff_assigned'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else 'unknown')
df = df.sort_values('started_at')
df['worker_run_count'] = df.groupby('worker_id').cumcount() + 1
df['worker_experience_level'] = pd.cut(
@@ -232,6 +235,10 @@ class YieldPredictor:
factors = {}
# Worker impact
# Extract worker_id from staff_assigned for analysis
if 'worker_id' not in feature_df.columns:
feature_df['worker_id'] = feature_df['staff_assigned'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else 'unknown')
worker_yields = feature_df.groupby('worker_id')['yield_percentage'].agg(['mean', 'std', 'count'])
worker_yields = worker_yields[worker_yields['count'] >= 3] # Min 3 runs per worker
@@ -339,7 +346,10 @@ class YieldPredictor:
if 'duration_hours' in feature_df.columns:
feature_columns.append('duration_hours')
# Encode worker_id
# Encode worker_id (extracted from staff_assigned)
if 'worker_id' not in feature_df.columns:
feature_df['worker_id'] = feature_df['staff_assigned'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else 'unknown')
worker_encoding = {worker: idx for idx, worker in enumerate(feature_df['worker_id'].unique())}
feature_df['worker_encoded'] = feature_df['worker_id'].map(worker_encoding)
feature_columns.append('worker_encoded')
@@ -420,11 +430,15 @@ class YieldPredictor:
) -> Dict[str, Any]:
"""Predict yield for upcoming production run."""
# Extract context
worker_id = production_context.get('worker_id')
staff_assigned = production_context.get('staff_assigned', [])
worker_id = staff_assigned[0] if isinstance(staff_assigned, list) and len(staff_assigned) > 0 else 'unknown'
planned_start = pd.to_datetime(production_context.get('planned_start_time'))
batch_size = production_context.get('batch_size')
# Get worker experience
if 'worker_id' not in feature_df.columns:
feature_df['worker_id'] = feature_df['staff_assigned'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else 'unknown')
worker_runs = feature_df[feature_df['worker_id'] == worker_id]
worker_run_count = len(worker_runs) if len(worker_runs) > 0 else 1
@@ -578,7 +592,7 @@ class YieldPredictor:
'action': 'review_production_factors',
'params': {
'recipe_id': recipe_id,
'worker_id': production_context.get('worker_id')
'worker_id': worker_id
}
}]
})

View File

@@ -31,7 +31,7 @@ def stable_yield_history():
'planned_quantity': 100,
'actual_quantity': np.random.normal(97, 1.5), # 97% avg, low variance
'yield_percentage': np.random.normal(97, 1.5),
'worker_id': f'worker_{i % 3}', # 3 workers
'staff_assigned': [f'worker_{i % 3}'], # 3 workers
'started_at': run_date,
'completed_at': run_date + timedelta(hours=4),
'batch_size': np.random.randint(80, 120)