Initial commit - production deployment
This commit is contained in:
851
services/procurement/app/ml/price_forecaster.py
Normal file
851
services/procurement/app/ml/price_forecaster.py
Normal file
@@ -0,0 +1,851 @@
|
||||
"""
|
||||
Price Forecaster
|
||||
Predicts supplier price changes for opportunistic buying recommendations
|
||||
Identifies optimal timing for bulk purchases and price negotiation opportunities
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from typing import Dict, List, Any, Optional, Tuple
|
||||
import structlog
|
||||
from datetime import datetime, timedelta
|
||||
from scipy import stats
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from sklearn.ensemble import RandomForestRegressor
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class PriceForecaster:
|
||||
"""
|
||||
Forecasts ingredient and product prices for opportunistic procurement.
|
||||
|
||||
Capabilities:
|
||||
1. Short-term price forecasting (1-4 weeks)
|
||||
2. Seasonal price pattern detection
|
||||
3. Price trend analysis
|
||||
4. Buy/wait recommendations
|
||||
5. Bulk purchase opportunity identification
|
||||
6. Price volatility assessment
|
||||
7. Supplier comparison for price optimization
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.price_models = {}
|
||||
self.seasonal_patterns = {}
|
||||
self.volatility_scores = {}
|
||||
|
||||
async def forecast_price(
|
||||
self,
|
||||
tenant_id: str,
|
||||
ingredient_id: str,
|
||||
price_history: pd.DataFrame,
|
||||
forecast_horizon_days: int = 30,
|
||||
min_history_days: int = 180
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Forecast future prices and generate procurement recommendations.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
ingredient_id: Ingredient/product identifier
|
||||
price_history: Historical price data with columns:
|
||||
- date
|
||||
- price_per_unit
|
||||
- quantity_purchased (optional)
|
||||
- supplier_id (optional)
|
||||
forecast_horizon_days: Days to forecast ahead (default 30)
|
||||
min_history_days: Minimum days of history required (default 180)
|
||||
|
||||
Returns:
|
||||
Dictionary with price forecast and insights
|
||||
"""
|
||||
logger.info(
|
||||
"Forecasting prices",
|
||||
tenant_id=tenant_id,
|
||||
ingredient_id=ingredient_id,
|
||||
history_days=len(price_history),
|
||||
forecast_days=forecast_horizon_days
|
||||
)
|
||||
|
||||
# Validate input
|
||||
if len(price_history) < min_history_days:
|
||||
logger.warning(
|
||||
"Insufficient price history",
|
||||
ingredient_id=ingredient_id,
|
||||
days=len(price_history),
|
||||
required=min_history_days
|
||||
)
|
||||
return self._insufficient_data_response(
|
||||
tenant_id, ingredient_id, price_history
|
||||
)
|
||||
|
||||
# Prepare data
|
||||
price_history = price_history.copy()
|
||||
price_history['date'] = pd.to_datetime(price_history['date'])
|
||||
price_history = price_history.sort_values('date')
|
||||
|
||||
# Calculate price statistics
|
||||
price_stats = self._calculate_price_statistics(price_history)
|
||||
|
||||
# Detect seasonal patterns
|
||||
seasonal_analysis = self._detect_seasonal_patterns(price_history)
|
||||
|
||||
# Detect trends
|
||||
trend_analysis = self._analyze_price_trends(price_history)
|
||||
|
||||
# Forecast future prices
|
||||
forecast = self._generate_price_forecast(
|
||||
price_history,
|
||||
forecast_horizon_days,
|
||||
seasonal_analysis,
|
||||
trend_analysis
|
||||
)
|
||||
|
||||
# Calculate volatility
|
||||
volatility = self._calculate_price_volatility(price_history)
|
||||
|
||||
# Generate buy/wait recommendations
|
||||
recommendations = self._generate_procurement_recommendations(
|
||||
price_history,
|
||||
forecast,
|
||||
price_stats,
|
||||
volatility,
|
||||
trend_analysis
|
||||
)
|
||||
|
||||
# Identify bulk purchase opportunities
|
||||
bulk_opportunities = self._identify_bulk_opportunities(
|
||||
forecast,
|
||||
price_stats,
|
||||
volatility
|
||||
)
|
||||
|
||||
# Generate insights
|
||||
insights = self._generate_price_insights(
|
||||
tenant_id,
|
||||
ingredient_id,
|
||||
price_stats,
|
||||
forecast,
|
||||
recommendations,
|
||||
bulk_opportunities,
|
||||
trend_analysis,
|
||||
volatility
|
||||
)
|
||||
|
||||
# Store models
|
||||
self.seasonal_patterns[ingredient_id] = seasonal_analysis
|
||||
self.volatility_scores[ingredient_id] = volatility
|
||||
|
||||
logger.info(
|
||||
"Price forecasting complete",
|
||||
ingredient_id=ingredient_id,
|
||||
avg_forecast_price=forecast['mean_forecast_price'],
|
||||
recommendation=recommendations['action'],
|
||||
insights_generated=len(insights)
|
||||
)
|
||||
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'ingredient_id': ingredient_id,
|
||||
'forecasted_at': datetime.utcnow().isoformat(),
|
||||
'history_days': len(price_history),
|
||||
'forecast_horizon_days': forecast_horizon_days,
|
||||
'price_stats': price_stats,
|
||||
'seasonal_analysis': seasonal_analysis,
|
||||
'trend_analysis': trend_analysis,
|
||||
'forecast': forecast,
|
||||
'volatility': volatility,
|
||||
'recommendations': recommendations,
|
||||
'bulk_opportunities': bulk_opportunities,
|
||||
'insights': insights
|
||||
}
|
||||
|
||||
def _calculate_price_statistics(
|
||||
self,
|
||||
price_history: pd.DataFrame
|
||||
) -> Dict[str, float]:
|
||||
"""
|
||||
Calculate comprehensive price statistics.
|
||||
|
||||
Args:
|
||||
price_history: Historical price data
|
||||
|
||||
Returns:
|
||||
Dictionary of price statistics
|
||||
"""
|
||||
prices = price_history['price_per_unit'].values
|
||||
|
||||
# Basic statistics
|
||||
current_price = float(prices[-1])
|
||||
mean_price = float(prices.mean())
|
||||
std_price = float(prices.std())
|
||||
cv_price = (std_price / mean_price) if mean_price > 0 else 0
|
||||
|
||||
# Price range
|
||||
min_price = float(prices.min())
|
||||
max_price = float(prices.max())
|
||||
price_range_pct = ((max_price - min_price) / mean_price * 100) if mean_price > 0 else 0
|
||||
|
||||
# Recent vs historical
|
||||
if len(prices) >= 60:
|
||||
recent_30d_mean = float(prices[-30:].mean())
|
||||
historical_mean = float(prices[:-30].mean())
|
||||
price_change_pct = ((recent_30d_mean - historical_mean) / historical_mean * 100) if historical_mean > 0 else 0
|
||||
else:
|
||||
recent_30d_mean = current_price
|
||||
price_change_pct = 0
|
||||
|
||||
# Price momentum (last 7 days vs previous 7 days)
|
||||
if len(prices) >= 14:
|
||||
last_week = prices[-7:].mean()
|
||||
prev_week = prices[-14:-7].mean()
|
||||
momentum = ((last_week - prev_week) / prev_week * 100) if prev_week > 0 else 0
|
||||
else:
|
||||
momentum = 0
|
||||
|
||||
return {
|
||||
'current_price': current_price,
|
||||
'mean_price': mean_price,
|
||||
'std_price': std_price,
|
||||
'cv_price': cv_price,
|
||||
'min_price': min_price,
|
||||
'max_price': max_price,
|
||||
'price_range_pct': price_range_pct,
|
||||
'recent_30d_mean': recent_30d_mean,
|
||||
'price_change_30d_pct': price_change_pct,
|
||||
'momentum_7d_pct': momentum,
|
||||
'data_points': len(prices)
|
||||
}
|
||||
|
||||
def _detect_seasonal_patterns(
|
||||
self,
|
||||
price_history: pd.DataFrame
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Detect seasonal price patterns.
|
||||
|
||||
Args:
|
||||
price_history: Historical price data
|
||||
|
||||
Returns:
|
||||
Seasonal pattern analysis
|
||||
"""
|
||||
# Extract month from date
|
||||
price_history = price_history.copy()
|
||||
price_history['month'] = price_history['date'].dt.month
|
||||
|
||||
# Calculate average price per month
|
||||
monthly_avg = price_history.groupby('month')['price_per_unit'].agg(['mean', 'std', 'count'])
|
||||
|
||||
overall_mean = price_history['price_per_unit'].mean()
|
||||
|
||||
seasonal_patterns = {}
|
||||
has_seasonality = False
|
||||
|
||||
month_names = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
|
||||
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
|
||||
|
||||
for month in range(1, 13):
|
||||
if month in monthly_avg.index and monthly_avg.loc[month, 'count'] >= 3:
|
||||
month_mean = monthly_avg.loc[month, 'mean']
|
||||
deviation_pct = ((month_mean - overall_mean) / overall_mean * 100) if overall_mean > 0 else 0
|
||||
|
||||
seasonal_patterns[month_names[month-1]] = {
|
||||
'month': month,
|
||||
'avg_price': round(float(month_mean), 2),
|
||||
'deviation_pct': round(float(deviation_pct), 2),
|
||||
'sample_size': int(monthly_avg.loc[month, 'count'])
|
||||
}
|
||||
|
||||
# Significant seasonality if >10% deviation
|
||||
if abs(deviation_pct) > 10:
|
||||
has_seasonality = True
|
||||
|
||||
return {
|
||||
'has_seasonality': has_seasonality,
|
||||
'monthly_patterns': seasonal_patterns,
|
||||
'overall_mean_price': round(float(overall_mean), 2)
|
||||
}
|
||||
|
||||
def _analyze_price_trends(
|
||||
self,
|
||||
price_history: pd.DataFrame
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze price trends using linear regression.
|
||||
|
||||
Args:
|
||||
price_history: Historical price data
|
||||
|
||||
Returns:
|
||||
Trend analysis
|
||||
"""
|
||||
# Create time index (days from start)
|
||||
price_history = price_history.copy()
|
||||
price_history['days_from_start'] = (
|
||||
price_history['date'] - price_history['date'].min()
|
||||
).dt.days
|
||||
|
||||
X = price_history['days_from_start'].values.reshape(-1, 1)
|
||||
y = price_history['price_per_unit'].values
|
||||
|
||||
# Fit linear regression
|
||||
model = LinearRegression()
|
||||
model.fit(X, y)
|
||||
|
||||
# Calculate trend
|
||||
slope = float(model.coef_[0])
|
||||
intercept = float(model.intercept_)
|
||||
r_squared = float(model.score(X, y))
|
||||
|
||||
# Trend direction and magnitude
|
||||
avg_price = y.mean()
|
||||
trend_pct_per_month = (slope * 30 / avg_price * 100) if avg_price > 0 else 0
|
||||
|
||||
# Classify trend
|
||||
if abs(trend_pct_per_month) < 2:
|
||||
trend_direction = 'stable'
|
||||
elif trend_pct_per_month > 2:
|
||||
trend_direction = 'increasing'
|
||||
else:
|
||||
trend_direction = 'decreasing'
|
||||
|
||||
# Recent trend (last 90 days)
|
||||
if len(price_history) >= 90:
|
||||
recent_data = price_history.tail(90).copy()
|
||||
recent_X = recent_data['days_from_start'].values.reshape(-1, 1)
|
||||
recent_y = recent_data['price_per_unit'].values
|
||||
|
||||
recent_model = LinearRegression()
|
||||
recent_model.fit(recent_X, recent_y)
|
||||
|
||||
recent_slope = float(recent_model.coef_[0])
|
||||
recent_trend_pct = (recent_slope * 30 / recent_y.mean() * 100) if recent_y.mean() > 0 else 0
|
||||
else:
|
||||
recent_trend_pct = trend_pct_per_month
|
||||
|
||||
return {
|
||||
'trend_direction': trend_direction,
|
||||
'trend_pct_per_month': round(trend_pct_per_month, 2),
|
||||
'recent_trend_pct_per_month': round(recent_trend_pct, 2),
|
||||
'slope': round(slope, 4),
|
||||
'r_squared': round(r_squared, 3),
|
||||
'is_accelerating': abs(recent_trend_pct) > abs(trend_pct_per_month) * 1.5
|
||||
}
|
||||
|
||||
def _generate_price_forecast(
|
||||
self,
|
||||
price_history: pd.DataFrame,
|
||||
forecast_days: int,
|
||||
seasonal_analysis: Dict[str, Any],
|
||||
trend_analysis: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate price forecast for specified horizon.
|
||||
|
||||
Args:
|
||||
price_history: Historical price data
|
||||
forecast_days: Days to forecast
|
||||
seasonal_analysis: Seasonal patterns
|
||||
trend_analysis: Trend analysis
|
||||
|
||||
Returns:
|
||||
Price forecast
|
||||
"""
|
||||
current_price = price_history['price_per_unit'].iloc[-1]
|
||||
current_date = price_history['date'].iloc[-1]
|
||||
|
||||
# Simple forecast: current price + trend + seasonal adjustment
|
||||
trend_slope = trend_analysis['slope']
|
||||
|
||||
forecast_prices = []
|
||||
forecast_dates = []
|
||||
|
||||
for day in range(1, forecast_days + 1):
|
||||
forecast_date = current_date + timedelta(days=day)
|
||||
forecast_dates.append(forecast_date)
|
||||
|
||||
# Base forecast from trend
|
||||
base_forecast = current_price + (trend_slope * day)
|
||||
|
||||
# Seasonal adjustment
|
||||
if seasonal_analysis['has_seasonality']:
|
||||
month_name = forecast_date.strftime('%b')
|
||||
if month_name in seasonal_analysis['monthly_patterns']:
|
||||
month_deviation = seasonal_analysis['monthly_patterns'][month_name]['deviation_pct']
|
||||
seasonal_adjustment = base_forecast * (month_deviation / 100)
|
||||
base_forecast += seasonal_adjustment
|
||||
|
||||
forecast_prices.append(base_forecast)
|
||||
|
||||
forecast_prices = np.array(forecast_prices)
|
||||
|
||||
# Calculate confidence intervals (±2 std)
|
||||
historical_std = price_history['price_per_unit'].std()
|
||||
lower_bound = forecast_prices - 2 * historical_std
|
||||
upper_bound = forecast_prices + 2 * historical_std
|
||||
|
||||
return {
|
||||
'forecast_dates': [d.strftime('%Y-%m-%d') for d in forecast_dates],
|
||||
'forecast_prices': [round(float(p), 2) for p in forecast_prices],
|
||||
'lower_bound': [round(float(p), 2) for p in lower_bound],
|
||||
'upper_bound': [round(float(p), 2) for p in upper_bound],
|
||||
'mean_forecast_price': round(float(forecast_prices.mean()), 2),
|
||||
'min_forecast_price': round(float(forecast_prices.min()), 2),
|
||||
'max_forecast_price': round(float(forecast_prices.max()), 2),
|
||||
'confidence': self._calculate_forecast_confidence(price_history, trend_analysis)
|
||||
}
|
||||
|
||||
def _calculate_forecast_confidence(
|
||||
self,
|
||||
price_history: pd.DataFrame,
|
||||
trend_analysis: Dict[str, Any]
|
||||
) -> int:
|
||||
"""Calculate confidence in price forecast (0-100)."""
|
||||
confidence = 50 # Base confidence
|
||||
|
||||
# More data = higher confidence
|
||||
data_points = len(price_history)
|
||||
if data_points >= 365:
|
||||
confidence += 30
|
||||
elif data_points >= 180:
|
||||
confidence += 20
|
||||
else:
|
||||
confidence += 10
|
||||
|
||||
# Strong trend = higher confidence
|
||||
r_squared = trend_analysis['r_squared']
|
||||
if r_squared > 0.7:
|
||||
confidence += 20
|
||||
elif r_squared > 0.5:
|
||||
confidence += 10
|
||||
|
||||
# Low volatility = higher confidence
|
||||
cv = price_history['price_per_unit'].std() / price_history['price_per_unit'].mean()
|
||||
if cv < 0.1:
|
||||
confidence += 10
|
||||
elif cv < 0.2:
|
||||
confidence += 5
|
||||
|
||||
return min(100, confidence)
|
||||
|
||||
def _calculate_price_volatility(
|
||||
self,
|
||||
price_history: pd.DataFrame
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Calculate price volatility metrics.
|
||||
|
||||
Args:
|
||||
price_history: Historical price data
|
||||
|
||||
Returns:
|
||||
Volatility analysis
|
||||
"""
|
||||
prices = price_history['price_per_unit'].values
|
||||
|
||||
# Coefficient of variation
|
||||
cv = float(prices.std() / prices.mean()) if prices.mean() > 0 else 0
|
||||
|
||||
# Price changes (day-to-day)
|
||||
price_changes = np.diff(prices)
|
||||
pct_changes = (price_changes / prices[:-1] * 100)
|
||||
|
||||
# Volatility classification
|
||||
if cv < 0.1:
|
||||
volatility_level = 'low'
|
||||
elif cv < 0.2:
|
||||
volatility_level = 'medium'
|
||||
else:
|
||||
volatility_level = 'high'
|
||||
|
||||
return {
|
||||
'coefficient_of_variation': round(cv, 3),
|
||||
'volatility_level': volatility_level,
|
||||
'avg_daily_change_pct': round(float(np.abs(pct_changes).mean()), 2),
|
||||
'max_daily_increase_pct': round(float(pct_changes.max()), 2),
|
||||
'max_daily_decrease_pct': round(float(pct_changes.min()), 2)
|
||||
}
|
||||
|
||||
def _generate_procurement_recommendations(
|
||||
self,
|
||||
price_history: pd.DataFrame,
|
||||
forecast: Dict[str, Any],
|
||||
price_stats: Dict[str, float],
|
||||
volatility: Dict[str, Any],
|
||||
trend_analysis: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate buy/wait recommendations based on forecast.
|
||||
|
||||
Args:
|
||||
price_history: Historical data
|
||||
forecast: Price forecast
|
||||
price_stats: Price statistics
|
||||
volatility: Volatility analysis
|
||||
trend_analysis: Trend analysis
|
||||
|
||||
Returns:
|
||||
Procurement recommendations
|
||||
"""
|
||||
current_price = price_stats['current_price']
|
||||
forecast_mean = forecast['mean_forecast_price']
|
||||
forecast_min = forecast['min_forecast_price']
|
||||
|
||||
# Calculate expected price change
|
||||
expected_change_pct = ((forecast_mean - current_price) / current_price * 100) if current_price > 0 else 0
|
||||
|
||||
# Decision logic with i18n-friendly reasoning codes
|
||||
if expected_change_pct < -5:
|
||||
# Price expected to drop >5%
|
||||
action = 'wait'
|
||||
reasoning_data = {
|
||||
'type': 'decrease_expected',
|
||||
'parameters': {
|
||||
'change_pct': round(abs(expected_change_pct), 1),
|
||||
'forecast_days': 30,
|
||||
'current_price': round(current_price, 2),
|
||||
'forecast_mean': round(forecast_mean, 2)
|
||||
}
|
||||
}
|
||||
urgency = 'low'
|
||||
|
||||
elif expected_change_pct > 5:
|
||||
# Price expected to increase >5%
|
||||
action = 'buy_now'
|
||||
reasoning_data = {
|
||||
'type': 'increase_expected',
|
||||
'parameters': {
|
||||
'change_pct': round(expected_change_pct, 1),
|
||||
'forecast_days': 30,
|
||||
'current_price': round(current_price, 2),
|
||||
'forecast_mean': round(forecast_mean, 2)
|
||||
}
|
||||
}
|
||||
urgency = 'high'
|
||||
|
||||
elif volatility['volatility_level'] == 'high':
|
||||
# High volatility - wait for dip
|
||||
action = 'wait_for_dip'
|
||||
reasoning_data = {
|
||||
'type': 'high_volatility',
|
||||
'parameters': {
|
||||
'coefficient': round(volatility['coefficient_of_variation'], 2),
|
||||
'volatility_level': volatility['volatility_level'],
|
||||
'avg_daily_change_pct': round(volatility['avg_daily_change_pct'], 2)
|
||||
}
|
||||
}
|
||||
urgency = 'medium'
|
||||
|
||||
elif current_price < price_stats['mean_price'] * 0.95:
|
||||
# Currently below average
|
||||
below_avg_pct = ((price_stats["mean_price"] - current_price) / price_stats["mean_price"] * 100)
|
||||
action = 'buy_now'
|
||||
reasoning_data = {
|
||||
'type': 'below_average',
|
||||
'parameters': {
|
||||
'current_price': round(current_price, 2),
|
||||
'mean_price': round(price_stats['mean_price'], 2),
|
||||
'below_avg_pct': round(below_avg_pct, 1)
|
||||
}
|
||||
}
|
||||
urgency = 'medium'
|
||||
|
||||
else:
|
||||
# Neutral
|
||||
action = 'normal_purchase'
|
||||
reasoning_data = {
|
||||
'type': 'stable',
|
||||
'parameters': {
|
||||
'current_price': round(current_price, 2),
|
||||
'forecast_mean': round(forecast_mean, 2),
|
||||
'expected_change_pct': round(expected_change_pct, 2)
|
||||
}
|
||||
}
|
||||
urgency = 'low'
|
||||
|
||||
# Optimal purchase timing
|
||||
min_price_index = forecast['forecast_prices'].index(forecast_min)
|
||||
optimal_date = forecast['forecast_dates'][min_price_index]
|
||||
|
||||
return {
|
||||
'action': action,
|
||||
'reasoning_data': reasoning_data,
|
||||
'urgency': urgency,
|
||||
'expected_price_change_pct': round(expected_change_pct, 2),
|
||||
'current_price': current_price,
|
||||
'forecast_mean_price': forecast_mean,
|
||||
'forecast_min_price': forecast_min,
|
||||
'optimal_purchase_date': optimal_date,
|
||||
'days_until_optimal': min_price_index + 1
|
||||
}
|
||||
|
||||
def _identify_bulk_opportunities(
|
||||
self,
|
||||
forecast: Dict[str, Any],
|
||||
price_stats: Dict[str, float],
|
||||
volatility: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Identify bulk purchase opportunities.
|
||||
|
||||
Args:
|
||||
forecast: Price forecast
|
||||
price_stats: Price statistics
|
||||
volatility: Volatility analysis
|
||||
|
||||
Returns:
|
||||
Bulk opportunity analysis
|
||||
"""
|
||||
current_price = price_stats['current_price']
|
||||
forecast_max = forecast['max_forecast_price']
|
||||
|
||||
# Potential savings from bulk buy at current price
|
||||
if forecast_max > current_price:
|
||||
potential_savings_pct = ((forecast_max - current_price) / current_price * 100)
|
||||
|
||||
if potential_savings_pct > 10:
|
||||
opportunity_level = 'high'
|
||||
elif potential_savings_pct > 5:
|
||||
opportunity_level = 'medium'
|
||||
else:
|
||||
opportunity_level = 'low'
|
||||
|
||||
has_opportunity = potential_savings_pct > 5
|
||||
|
||||
else:
|
||||
potential_savings_pct = 0
|
||||
opportunity_level = 'none'
|
||||
has_opportunity = False
|
||||
|
||||
return {
|
||||
'has_bulk_opportunity': has_opportunity,
|
||||
'opportunity_level': opportunity_level,
|
||||
'potential_savings_pct': round(potential_savings_pct, 2),
|
||||
'recommended_bulk_quantity_months': 2 if has_opportunity and volatility['volatility_level'] != 'high' else 1
|
||||
}
|
||||
|
||||
def _generate_price_insights(
|
||||
self,
|
||||
tenant_id: str,
|
||||
ingredient_id: str,
|
||||
price_stats: Dict[str, float],
|
||||
forecast: Dict[str, Any],
|
||||
recommendations: Dict[str, Any],
|
||||
bulk_opportunities: Dict[str, Any],
|
||||
trend_analysis: Dict[str, Any],
|
||||
volatility: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generate actionable pricing insights.
|
||||
|
||||
Returns:
|
||||
List of insights
|
||||
"""
|
||||
insights = []
|
||||
|
||||
# Insight 1: Buy now recommendation
|
||||
if recommendations['action'] == 'buy_now':
|
||||
insights.append({
|
||||
'type': 'recommendation',
|
||||
'priority': recommendations['urgency'],
|
||||
'category': 'procurement',
|
||||
'title': f'Buy Now: Price Increasing {recommendations["expected_price_change_pct"]:.1f}%',
|
||||
'reasoning_data': recommendations['reasoning_data'],
|
||||
'impact_type': 'cost_avoidance',
|
||||
'impact_value': abs(recommendations['expected_price_change_pct']),
|
||||
'impact_unit': 'percentage',
|
||||
'confidence': forecast['confidence'],
|
||||
'metrics_json': {
|
||||
'ingredient_id': ingredient_id,
|
||||
'current_price': price_stats['current_price'],
|
||||
'forecast_price': forecast['mean_forecast_price'],
|
||||
'expected_change_pct': recommendations['expected_price_change_pct'],
|
||||
'optimal_date': recommendations['optimal_purchase_date']
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Purchase Now',
|
||||
'action': 'create_purchase_order',
|
||||
'params': {
|
||||
'ingredient_id': ingredient_id,
|
||||
'priority': 'high'
|
||||
}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'price_forecaster'
|
||||
})
|
||||
|
||||
# Insight 2: Wait recommendation
|
||||
elif recommendations['action'] == 'wait':
|
||||
insights.append({
|
||||
'type': 'recommendation',
|
||||
'priority': 'medium',
|
||||
'category': 'procurement',
|
||||
'title': f'Wait to Buy: Price Decreasing {abs(recommendations["expected_price_change_pct"]):.1f}%',
|
||||
'reasoning_data': {
|
||||
**recommendations['reasoning_data'],
|
||||
'optimal_purchase_date': recommendations['optimal_purchase_date'],
|
||||
'days_until_optimal': recommendations['days_until_optimal']
|
||||
},
|
||||
'impact_type': 'cost_savings',
|
||||
'impact_value': abs(recommendations['expected_price_change_pct']),
|
||||
'impact_unit': 'percentage',
|
||||
'confidence': forecast['confidence'],
|
||||
'metrics_json': {
|
||||
'ingredient_id': ingredient_id,
|
||||
'current_price': price_stats['current_price'],
|
||||
'forecast_min_price': forecast['min_forecast_price'],
|
||||
'optimal_date': recommendations['optimal_purchase_date'],
|
||||
'days_until_optimal': recommendations['days_until_optimal']
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Delay Purchase',
|
||||
'action': 'delay_purchase_order',
|
||||
'params': {
|
||||
'ingredient_id': ingredient_id,
|
||||
'delay_days': recommendations['days_until_optimal']
|
||||
}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'price_forecaster'
|
||||
})
|
||||
|
||||
# Insight 3: Bulk opportunity
|
||||
if bulk_opportunities['has_bulk_opportunity']:
|
||||
insights.append({
|
||||
'type': 'optimization',
|
||||
'priority': bulk_opportunities['opportunity_level'],
|
||||
'category': 'procurement',
|
||||
'title': f'Bulk Buy Opportunity: Save {bulk_opportunities["potential_savings_pct"]:.1f}%',
|
||||
'description': f'Current price is favorable. Purchasing {bulk_opportunities["recommended_bulk_quantity_months"]} months supply now could save {bulk_opportunities["potential_savings_pct"]:.1f}% vs future prices.',
|
||||
'impact_type': 'cost_savings',
|
||||
'impact_value': bulk_opportunities['potential_savings_pct'],
|
||||
'impact_unit': 'percentage',
|
||||
'confidence': forecast['confidence'],
|
||||
'metrics_json': {
|
||||
'ingredient_id': ingredient_id,
|
||||
'current_price': price_stats['current_price'],
|
||||
'forecast_max_price': forecast['max_forecast_price'],
|
||||
'savings_pct': bulk_opportunities['potential_savings_pct'],
|
||||
'recommended_months_supply': bulk_opportunities['recommended_bulk_quantity_months']
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Create Bulk Order',
|
||||
'action': 'create_bulk_purchase_order',
|
||||
'params': {
|
||||
'ingredient_id': ingredient_id,
|
||||
'months_supply': bulk_opportunities['recommended_bulk_quantity_months']
|
||||
}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'price_forecaster'
|
||||
})
|
||||
|
||||
# Insight 4: High volatility warning
|
||||
if volatility['volatility_level'] == 'high':
|
||||
insights.append({
|
||||
'type': 'alert',
|
||||
'priority': 'medium',
|
||||
'category': 'procurement',
|
||||
'title': f'High Price Volatility: CV={volatility["coefficient_of_variation"]:.2f}',
|
||||
'description': f'Ingredient {ingredient_id} shows high price volatility with {volatility["avg_daily_change_pct"]:.1f}% average daily change. Consider alternative suppliers or hedge strategies.',
|
||||
'impact_type': 'risk_warning',
|
||||
'impact_value': volatility['coefficient_of_variation'],
|
||||
'impact_unit': 'cv_score',
|
||||
'confidence': 90,
|
||||
'metrics_json': {
|
||||
'ingredient_id': ingredient_id,
|
||||
'volatility_level': volatility['volatility_level'],
|
||||
'cv': volatility['coefficient_of_variation'],
|
||||
'avg_daily_change_pct': volatility['avg_daily_change_pct']
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Find Alternative Suppliers',
|
||||
'action': 'search_alternative_suppliers',
|
||||
'params': {'ingredient_id': ingredient_id}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'price_forecaster'
|
||||
})
|
||||
|
||||
# Insight 5: Strong price trend
|
||||
if abs(trend_analysis['trend_pct_per_month']) > 5:
|
||||
direction = 'increasing' if trend_analysis['trend_pct_per_month'] > 0 else 'decreasing'
|
||||
insights.append({
|
||||
'type': 'insight',
|
||||
'priority': 'medium',
|
||||
'category': 'procurement',
|
||||
'title': f'Strong Price Trend: {direction.title()} {abs(trend_analysis["trend_pct_per_month"]):.1f}%/month',
|
||||
'description': f'Ingredient {ingredient_id} prices are {direction} at {abs(trend_analysis["trend_pct_per_month"]):.1f}% per month. Plan procurement strategy accordingly.',
|
||||
'impact_type': 'trend_warning',
|
||||
'impact_value': abs(trend_analysis['trend_pct_per_month']),
|
||||
'impact_unit': 'pct_per_month',
|
||||
'confidence': int(trend_analysis['r_squared'] * 100),
|
||||
'metrics_json': {
|
||||
'ingredient_id': ingredient_id,
|
||||
'trend_direction': trend_analysis['trend_direction'],
|
||||
'trend_pct_per_month': trend_analysis['trend_pct_per_month'],
|
||||
'r_squared': trend_analysis['r_squared']
|
||||
},
|
||||
'actionable': False,
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'price_forecaster'
|
||||
})
|
||||
|
||||
return insights
|
||||
|
||||
def _insufficient_data_response(
|
||||
self,
|
||||
tenant_id: str,
|
||||
ingredient_id: str,
|
||||
price_history: pd.DataFrame
|
||||
) -> Dict[str, Any]:
|
||||
"""Return response when insufficient data available."""
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'ingredient_id': ingredient_id,
|
||||
'forecasted_at': datetime.utcnow().isoformat(),
|
||||
'history_days': len(price_history),
|
||||
'forecast_horizon_days': 0,
|
||||
'price_stats': {},
|
||||
'seasonal_analysis': {'has_seasonality': False},
|
||||
'trend_analysis': {},
|
||||
'forecast': {},
|
||||
'volatility': {},
|
||||
'recommendations': {
|
||||
'action': 'insufficient_data',
|
||||
'reasoning_data': {
|
||||
'type': 'insufficient_data',
|
||||
'parameters': {
|
||||
'history_days': len(price_history),
|
||||
'min_required_days': 180
|
||||
}
|
||||
},
|
||||
'urgency': 'low'
|
||||
},
|
||||
'bulk_opportunities': {'has_bulk_opportunity': False},
|
||||
'insights': []
|
||||
}
|
||||
|
||||
def get_seasonal_patterns(self, ingredient_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get cached seasonal patterns for an ingredient."""
|
||||
return self.seasonal_patterns.get(ingredient_id)
|
||||
|
||||
def get_volatility_score(self, ingredient_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get cached volatility score for an ingredient."""
|
||||
return self.volatility_scores.get(ingredient_id)
|
||||
449
services/procurement/app/ml/price_insights_orchestrator.py
Normal file
449
services/procurement/app/ml/price_insights_orchestrator.py
Normal file
@@ -0,0 +1,449 @@
|
||||
"""
|
||||
Price Insights Orchestrator
|
||||
Coordinates price forecasting and insight posting
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
from typing import Dict, List, Any, Optional
|
||||
import structlog
|
||||
from datetime import datetime
|
||||
from uuid import UUID
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add shared clients to path
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
|
||||
from shared.clients.ai_insights_client import AIInsightsClient
|
||||
from shared.messaging import UnifiedEventPublisher
|
||||
|
||||
from app.ml.price_forecaster import PriceForecaster
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class PriceInsightsOrchestrator:
|
||||
"""
|
||||
Orchestrates price forecasting and insight generation workflow.
|
||||
|
||||
Workflow:
|
||||
1. Forecast prices from historical data
|
||||
2. Generate buy/wait/bulk recommendations
|
||||
3. Post insights to AI Insights Service
|
||||
4. Provide price forecasts for procurement planning
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_insights_base_url: str = "http://ai-insights-service:8000",
|
||||
event_publisher: Optional[UnifiedEventPublisher] = None
|
||||
):
|
||||
self.forecaster = PriceForecaster()
|
||||
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
|
||||
self.event_publisher = event_publisher
|
||||
|
||||
async def forecast_and_post_insights(
|
||||
self,
|
||||
tenant_id: str,
|
||||
ingredient_id: str,
|
||||
price_history: pd.DataFrame,
|
||||
forecast_horizon_days: int = 30,
|
||||
min_history_days: int = 180
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Complete workflow: Forecast prices and post insights.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
ingredient_id: Ingredient identifier
|
||||
price_history: Historical price data
|
||||
forecast_horizon_days: Days to forecast ahead
|
||||
min_history_days: Minimum days of history required
|
||||
|
||||
Returns:
|
||||
Workflow results with forecast and posted insights
|
||||
"""
|
||||
logger.info(
|
||||
"Starting price forecasting workflow",
|
||||
tenant_id=tenant_id,
|
||||
ingredient_id=ingredient_id,
|
||||
history_days=len(price_history)
|
||||
)
|
||||
|
||||
# Step 1: Forecast prices
|
||||
forecast_results = await self.forecaster.forecast_price(
|
||||
tenant_id=tenant_id,
|
||||
ingredient_id=ingredient_id,
|
||||
price_history=price_history,
|
||||
forecast_horizon_days=forecast_horizon_days,
|
||||
min_history_days=min_history_days
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Price forecasting complete",
|
||||
ingredient_id=ingredient_id,
|
||||
recommendation=forecast_results.get('recommendations', {}).get('action'),
|
||||
insights_generated=len(forecast_results.get('insights', []))
|
||||
)
|
||||
|
||||
# Step 2: Enrich insights with tenant_id and ingredient context
|
||||
enriched_insights = self._enrich_insights(
|
||||
forecast_results.get('insights', []),
|
||||
tenant_id,
|
||||
ingredient_id
|
||||
)
|
||||
|
||||
# Step 3: Post insights to AI Insights Service
|
||||
if enriched_insights:
|
||||
post_results = await self.ai_insights_client.create_insights_bulk(
|
||||
tenant_id=UUID(tenant_id),
|
||||
insights=enriched_insights
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Price insights posted to AI Insights Service",
|
||||
ingredient_id=ingredient_id,
|
||||
total=post_results['total'],
|
||||
successful=post_results['successful'],
|
||||
failed=post_results['failed']
|
||||
)
|
||||
else:
|
||||
post_results = {'total': 0, 'successful': 0, 'failed': 0}
|
||||
logger.info("No insights to post for ingredient", ingredient_id=ingredient_id)
|
||||
|
||||
# Step 4: Publish insight events to RabbitMQ
|
||||
created_insights = post_results.get('created_insights', [])
|
||||
if created_insights:
|
||||
ingredient_context = {'ingredient_id': ingredient_id}
|
||||
await self._publish_insight_events(
|
||||
tenant_id=tenant_id,
|
||||
insights=created_insights,
|
||||
ingredient_context=ingredient_context
|
||||
)
|
||||
|
||||
# Step 5: Return comprehensive results
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'ingredient_id': ingredient_id,
|
||||
'forecasted_at': forecast_results['forecasted_at'],
|
||||
'history_days': forecast_results['history_days'],
|
||||
'forecast': forecast_results.get('forecast', {}),
|
||||
'recommendation': forecast_results.get('recommendations', {}),
|
||||
'bulk_opportunity': forecast_results.get('bulk_opportunities', {}),
|
||||
'insights_generated': len(enriched_insights),
|
||||
'insights_posted': post_results['successful'],
|
||||
'insights_failed': post_results['failed'],
|
||||
'created_insights': post_results.get('created_insights', [])
|
||||
}
|
||||
|
||||
def _enrich_insights(
|
||||
self,
|
||||
insights: List[Dict[str, Any]],
|
||||
tenant_id: str,
|
||||
ingredient_id: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Enrich insights with required fields for AI Insights Service.
|
||||
|
||||
Args:
|
||||
insights: Raw insights from forecaster
|
||||
tenant_id: Tenant identifier
|
||||
ingredient_id: Ingredient identifier
|
||||
|
||||
Returns:
|
||||
Enriched insights ready for posting
|
||||
"""
|
||||
enriched = []
|
||||
|
||||
for insight in insights:
|
||||
# Add required tenant_id
|
||||
enriched_insight = insight.copy()
|
||||
enriched_insight['tenant_id'] = tenant_id
|
||||
|
||||
# Add ingredient context to metrics
|
||||
if 'metrics_json' not in enriched_insight:
|
||||
enriched_insight['metrics_json'] = {}
|
||||
|
||||
enriched_insight['metrics_json']['ingredient_id'] = ingredient_id
|
||||
|
||||
# Add source metadata
|
||||
enriched_insight['source_service'] = 'procurement'
|
||||
enriched_insight['source_model'] = 'price_forecaster'
|
||||
enriched_insight['detected_at'] = datetime.utcnow().isoformat()
|
||||
|
||||
enriched.append(enriched_insight)
|
||||
|
||||
return enriched
|
||||
|
||||
async def forecast_all_ingredients(
|
||||
self,
|
||||
tenant_id: str,
|
||||
ingredients_data: Dict[str, pd.DataFrame],
|
||||
forecast_horizon_days: int = 30,
|
||||
min_history_days: int = 180
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Forecast prices for all ingredients for a tenant.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
ingredients_data: Dict of {ingredient_id: price_history DataFrame}
|
||||
forecast_horizon_days: Days to forecast
|
||||
min_history_days: Minimum history required
|
||||
|
||||
Returns:
|
||||
Comprehensive forecasting results
|
||||
"""
|
||||
logger.info(
|
||||
"Forecasting prices for all ingredients",
|
||||
tenant_id=tenant_id,
|
||||
ingredients=len(ingredients_data)
|
||||
)
|
||||
|
||||
all_results = []
|
||||
total_insights_posted = 0
|
||||
buy_now_count = 0
|
||||
wait_count = 0
|
||||
bulk_opportunity_count = 0
|
||||
|
||||
# Forecast each ingredient
|
||||
for ingredient_id, price_history in ingredients_data.items():
|
||||
try:
|
||||
results = await self.forecast_and_post_insights(
|
||||
tenant_id=tenant_id,
|
||||
ingredient_id=ingredient_id,
|
||||
price_history=price_history,
|
||||
forecast_horizon_days=forecast_horizon_days,
|
||||
min_history_days=min_history_days
|
||||
)
|
||||
|
||||
all_results.append(results)
|
||||
total_insights_posted += results['insights_posted']
|
||||
|
||||
# Count recommendations
|
||||
action = results['recommendation'].get('action')
|
||||
if action == 'buy_now':
|
||||
buy_now_count += 1
|
||||
elif action in ['wait', 'wait_for_dip']:
|
||||
wait_count += 1
|
||||
|
||||
if results['bulk_opportunity'].get('has_bulk_opportunity'):
|
||||
bulk_opportunity_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error forecasting ingredient",
|
||||
ingredient_id=ingredient_id,
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
# Generate summary insight
|
||||
if buy_now_count > 0 or bulk_opportunity_count > 0:
|
||||
summary_insight = self._generate_portfolio_summary_insight(
|
||||
tenant_id, all_results, buy_now_count, wait_count, bulk_opportunity_count
|
||||
)
|
||||
|
||||
if summary_insight:
|
||||
enriched_summary = self._enrich_insights(
|
||||
[summary_insight], tenant_id, 'all_ingredients'
|
||||
)
|
||||
|
||||
post_results = await self.ai_insights_client.create_insights_bulk(
|
||||
tenant_id=UUID(tenant_id),
|
||||
insights=enriched_summary
|
||||
)
|
||||
|
||||
total_insights_posted += post_results['successful']
|
||||
|
||||
logger.info(
|
||||
"All ingredients forecasting complete",
|
||||
tenant_id=tenant_id,
|
||||
ingredients_forecasted=len(all_results),
|
||||
total_insights_posted=total_insights_posted,
|
||||
buy_now_recommendations=buy_now_count,
|
||||
bulk_opportunities=bulk_opportunity_count
|
||||
)
|
||||
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'forecasted_at': datetime.utcnow().isoformat(),
|
||||
'ingredients_forecasted': len(all_results),
|
||||
'ingredient_results': all_results,
|
||||
'total_insights_posted': total_insights_posted,
|
||||
'buy_now_count': buy_now_count,
|
||||
'wait_count': wait_count,
|
||||
'bulk_opportunity_count': bulk_opportunity_count
|
||||
}
|
||||
|
||||
async def _publish_insight_events(self, tenant_id, insights, ingredient_context=None):
|
||||
"""
|
||||
Publish insight events to RabbitMQ for alert processing.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
insights: List of created insights
|
||||
ingredient_context: Additional context about the ingredient
|
||||
"""
|
||||
if not self.event_publisher:
|
||||
logger.warning("No event publisher available for price insights")
|
||||
return
|
||||
|
||||
for insight in insights:
|
||||
# Determine severity based on confidence and priority
|
||||
confidence = insight.get('confidence', 0)
|
||||
priority = insight.get('priority', 'medium')
|
||||
|
||||
# Map priority to severity, with confidence as tiebreaker
|
||||
if priority == 'critical' or (priority == 'high' and confidence >= 70):
|
||||
severity = 'high'
|
||||
elif priority == 'high' or (priority == 'medium' and confidence >= 80):
|
||||
severity = 'medium'
|
||||
else:
|
||||
severity = 'low'
|
||||
|
||||
# Prepare the event data
|
||||
event_data = {
|
||||
'insight_id': insight.get('id'),
|
||||
'type': insight.get('type'),
|
||||
'title': insight.get('title'),
|
||||
'description': insight.get('description'),
|
||||
'category': insight.get('category'),
|
||||
'priority': insight.get('priority'),
|
||||
'confidence': confidence,
|
||||
'recommendation': insight.get('recommendation_actions', []),
|
||||
'impact_type': insight.get('impact_type'),
|
||||
'impact_value': insight.get('impact_value'),
|
||||
'ingredient_id': ingredient_context.get('ingredient_id') if ingredient_context else None,
|
||||
'timestamp': insight.get('detected_at', datetime.utcnow().isoformat()),
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'price_forecaster'
|
||||
}
|
||||
|
||||
try:
|
||||
await self.event_publisher.publish_recommendation(
|
||||
event_type='ai_price_forecast',
|
||||
tenant_id=tenant_id,
|
||||
severity=severity,
|
||||
data=event_data
|
||||
)
|
||||
logger.info(
|
||||
"Published price insight event",
|
||||
tenant_id=tenant_id,
|
||||
insight_id=insight.get('id'),
|
||||
severity=severity
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to publish price insight event",
|
||||
tenant_id=tenant_id,
|
||||
insight_id=insight.get('id'),
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
def _generate_portfolio_summary_insight(
|
||||
self,
|
||||
tenant_id: str,
|
||||
all_results: List[Dict[str, Any]],
|
||||
buy_now_count: int,
|
||||
wait_count: int,
|
||||
bulk_opportunity_count: int
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Generate portfolio-level summary insight.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
all_results: All ingredient forecast results
|
||||
buy_now_count: Number of buy now recommendations
|
||||
wait_count: Number of wait recommendations
|
||||
bulk_opportunity_count: Number of bulk opportunities
|
||||
|
||||
Returns:
|
||||
Summary insight or None
|
||||
"""
|
||||
if buy_now_count == 0 and bulk_opportunity_count == 0:
|
||||
return None
|
||||
|
||||
# Calculate potential savings from bulk opportunities
|
||||
total_potential_savings = 0
|
||||
for result in all_results:
|
||||
bulk_opp = result.get('bulk_opportunity', {})
|
||||
if bulk_opp.get('has_bulk_opportunity'):
|
||||
# Estimate savings (simplified)
|
||||
savings_pct = bulk_opp.get('potential_savings_pct', 0)
|
||||
total_potential_savings += savings_pct
|
||||
|
||||
avg_potential_savings = total_potential_savings / max(1, bulk_opportunity_count)
|
||||
|
||||
description_parts = []
|
||||
if buy_now_count > 0:
|
||||
description_parts.append(f'{buy_now_count} ingredients show price increases - purchase soon')
|
||||
if bulk_opportunity_count > 0:
|
||||
description_parts.append(f'{bulk_opportunity_count} ingredients have bulk buying opportunities (avg {avg_potential_savings:.1f}% savings)')
|
||||
|
||||
return {
|
||||
'type': 'recommendation',
|
||||
'priority': 'high' if buy_now_count > 2 else 'medium',
|
||||
'category': 'procurement',
|
||||
'title': f'Procurement Timing Opportunities: {buy_now_count + bulk_opportunity_count} Items',
|
||||
'description': 'Price forecast analysis identified procurement timing opportunities. ' + '. '.join(description_parts) + '.',
|
||||
'impact_type': 'cost_optimization',
|
||||
'impact_value': avg_potential_savings if bulk_opportunity_count > 0 else buy_now_count,
|
||||
'impact_unit': 'percentage' if bulk_opportunity_count > 0 else 'items',
|
||||
'confidence': 75,
|
||||
'metrics_json': {
|
||||
'ingredients_analyzed': len(all_results),
|
||||
'buy_now_count': buy_now_count,
|
||||
'wait_count': wait_count,
|
||||
'bulk_opportunity_count': bulk_opportunity_count,
|
||||
'avg_potential_savings_pct': round(avg_potential_savings, 2)
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Review Price Forecasts',
|
||||
'action': 'review_price_forecasts',
|
||||
'params': {'tenant_id': tenant_id}
|
||||
},
|
||||
{
|
||||
'label': 'Create Optimized Orders',
|
||||
'action': 'create_optimized_purchase_orders',
|
||||
'params': {'tenant_id': tenant_id}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'price_forecaster'
|
||||
}
|
||||
|
||||
async def get_price_forecast(
|
||||
self,
|
||||
ingredient_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get cached seasonal patterns for an ingredient.
|
||||
|
||||
Args:
|
||||
ingredient_id: Ingredient identifier
|
||||
|
||||
Returns:
|
||||
Seasonal patterns or None if not forecasted
|
||||
"""
|
||||
return self.forecaster.get_seasonal_patterns(ingredient_id)
|
||||
|
||||
async def get_volatility_assessment(
|
||||
self,
|
||||
ingredient_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get cached volatility assessment for an ingredient.
|
||||
|
||||
Args:
|
||||
ingredient_id: Ingredient identifier
|
||||
|
||||
Returns:
|
||||
Volatility assessment or None if not assessed
|
||||
"""
|
||||
return self.forecaster.get_volatility_score(ingredient_id)
|
||||
|
||||
async def close(self):
|
||||
"""Close HTTP client connections."""
|
||||
await self.ai_insights_client.close()
|
||||
399
services/procurement/app/ml/supplier_insights_orchestrator.py
Normal file
399
services/procurement/app/ml/supplier_insights_orchestrator.py
Normal file
@@ -0,0 +1,399 @@
|
||||
"""
|
||||
Supplier Insights Orchestrator
|
||||
Coordinates supplier performance analysis and insight posting
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
from typing import Dict, List, Any, Optional
|
||||
import structlog
|
||||
from datetime import datetime
|
||||
from uuid import UUID
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add shared clients to path
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
|
||||
from shared.clients.ai_insights_client import AIInsightsClient
|
||||
from shared.messaging import UnifiedEventPublisher
|
||||
|
||||
from app.ml.supplier_performance_predictor import SupplierPerformancePredictor
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class SupplierInsightsOrchestrator:
|
||||
"""
|
||||
Orchestrates supplier performance analysis and insight generation workflow.
|
||||
|
||||
Workflow:
|
||||
1. Analyze supplier performance from historical orders
|
||||
2. Generate insights for procurement risk management
|
||||
3. Post insights to AI Insights Service
|
||||
4. Publish recommendation events to RabbitMQ
|
||||
5. Provide supplier comparison and recommendations
|
||||
6. Track supplier reliability scores
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_insights_base_url: str = "http://ai-insights-service:8000",
|
||||
event_publisher: Optional[UnifiedEventPublisher] = None
|
||||
):
|
||||
self.predictor = SupplierPerformancePredictor()
|
||||
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
|
||||
self.event_publisher = event_publisher
|
||||
|
||||
async def analyze_and_post_supplier_insights(
|
||||
self,
|
||||
tenant_id: str,
|
||||
supplier_id: str,
|
||||
order_history: pd.DataFrame,
|
||||
min_orders: int = 10
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Complete workflow: Analyze supplier and post insights.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
supplier_id: Supplier identifier
|
||||
order_history: Historical order data
|
||||
min_orders: Minimum orders for analysis
|
||||
|
||||
Returns:
|
||||
Workflow results with analysis and posted insights
|
||||
"""
|
||||
logger.info(
|
||||
"Starting supplier performance analysis workflow",
|
||||
tenant_id=tenant_id,
|
||||
supplier_id=supplier_id,
|
||||
orders=len(order_history)
|
||||
)
|
||||
|
||||
# Step 1: Analyze supplier performance
|
||||
analysis_results = await self.predictor.analyze_supplier_performance(
|
||||
tenant_id=tenant_id,
|
||||
supplier_id=supplier_id,
|
||||
order_history=order_history,
|
||||
min_orders=min_orders
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Supplier analysis complete",
|
||||
supplier_id=supplier_id,
|
||||
reliability_score=analysis_results.get('reliability_score'),
|
||||
insights_generated=len(analysis_results.get('insights', []))
|
||||
)
|
||||
|
||||
# Step 2: Enrich insights with tenant_id and supplier context
|
||||
enriched_insights = self._enrich_insights(
|
||||
analysis_results.get('insights', []),
|
||||
tenant_id,
|
||||
supplier_id
|
||||
)
|
||||
|
||||
# Step 3: Post insights to AI Insights Service
|
||||
if enriched_insights:
|
||||
post_results = await self.ai_insights_client.create_insights_bulk(
|
||||
tenant_id=UUID(tenant_id),
|
||||
insights=enriched_insights
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Supplier insights posted to AI Insights Service",
|
||||
supplier_id=supplier_id,
|
||||
total=post_results['total'],
|
||||
successful=post_results['successful'],
|
||||
failed=post_results['failed']
|
||||
)
|
||||
else:
|
||||
post_results = {'total': 0, 'successful': 0, 'failed': 0}
|
||||
logger.info("No insights to post for supplier", supplier_id=supplier_id)
|
||||
|
||||
# Step 4: Publish insight events to RabbitMQ
|
||||
created_insights = post_results.get('created_insights', [])
|
||||
if created_insights:
|
||||
supplier_context = {'supplier_id': supplier_id}
|
||||
await self._publish_insight_events(
|
||||
tenant_id=tenant_id,
|
||||
insights=created_insights,
|
||||
supplier_context=supplier_context
|
||||
)
|
||||
|
||||
# Step 5: Return comprehensive results
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'supplier_id': supplier_id,
|
||||
'analyzed_at': analysis_results['analyzed_at'],
|
||||
'orders_analyzed': analysis_results['orders_analyzed'],
|
||||
'reliability_score': analysis_results.get('reliability_score'),
|
||||
'risk_assessment': analysis_results.get('risk_assessment', {}),
|
||||
'predictions': analysis_results.get('predictions', {}),
|
||||
'insights_generated': len(enriched_insights),
|
||||
'insights_posted': post_results['successful'],
|
||||
'insights_failed': post_results['failed'],
|
||||
'created_insights': post_results.get('created_insights', [])
|
||||
}
|
||||
|
||||
def _enrich_insights(
|
||||
self,
|
||||
insights: List[Dict[str, Any]],
|
||||
tenant_id: str,
|
||||
supplier_id: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Enrich insights with required fields for AI Insights Service.
|
||||
|
||||
Args:
|
||||
insights: Raw insights from predictor
|
||||
tenant_id: Tenant identifier
|
||||
supplier_id: Supplier identifier
|
||||
|
||||
Returns:
|
||||
Enriched insights ready for posting
|
||||
"""
|
||||
enriched = []
|
||||
|
||||
for insight in insights:
|
||||
# Add required tenant_id
|
||||
enriched_insight = insight.copy()
|
||||
enriched_insight['tenant_id'] = tenant_id
|
||||
|
||||
# Add supplier context to metrics
|
||||
if 'metrics_json' not in enriched_insight:
|
||||
enriched_insight['metrics_json'] = {}
|
||||
|
||||
enriched_insight['metrics_json']['supplier_id'] = supplier_id
|
||||
|
||||
# Add source metadata
|
||||
enriched_insight['source_service'] = 'procurement'
|
||||
enriched_insight['source_model'] = 'supplier_performance_predictor'
|
||||
enriched_insight['detected_at'] = datetime.utcnow().isoformat()
|
||||
|
||||
enriched.append(enriched_insight)
|
||||
|
||||
return enriched
|
||||
|
||||
async def _publish_insight_events(self, tenant_id, insights, supplier_context=None):
|
||||
"""
|
||||
Publish insight events to RabbitMQ for alert processing.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
insights: List of created insights
|
||||
supplier_context: Additional context about the supplier
|
||||
"""
|
||||
if not self.event_publisher:
|
||||
logger.warning("No event publisher available for supplier insights")
|
||||
return
|
||||
|
||||
for insight in insights:
|
||||
# Determine severity based on confidence and priority
|
||||
confidence = insight.get('confidence', 0)
|
||||
priority = insight.get('priority', 'medium')
|
||||
|
||||
# Map priority to severity, with confidence as tiebreaker
|
||||
if priority == 'critical' or (priority == 'high' and confidence >= 70):
|
||||
severity = 'high'
|
||||
elif priority == 'high' or (priority == 'medium' and confidence >= 80):
|
||||
severity = 'medium'
|
||||
else:
|
||||
severity = 'low'
|
||||
|
||||
# Prepare the event data
|
||||
event_data = {
|
||||
'insight_id': insight.get('id'),
|
||||
'type': insight.get('type'),
|
||||
'title': insight.get('title'),
|
||||
'description': insight.get('description'),
|
||||
'category': insight.get('category'),
|
||||
'priority': insight.get('priority'),
|
||||
'confidence': confidence,
|
||||
'recommendation': insight.get('recommendation_actions', []),
|
||||
'impact_type': insight.get('impact_type'),
|
||||
'impact_value': insight.get('impact_value'),
|
||||
'supplier_id': supplier_context.get('supplier_id') if supplier_context else None,
|
||||
'timestamp': insight.get('detected_at', datetime.utcnow().isoformat()),
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'supplier_performance_predictor'
|
||||
}
|
||||
|
||||
try:
|
||||
await self.event_publisher.publish_recommendation(
|
||||
event_type='ai_supplier_recommendation',
|
||||
tenant_id=tenant_id,
|
||||
severity=severity,
|
||||
data=event_data
|
||||
)
|
||||
logger.info(
|
||||
"Published supplier insight event",
|
||||
tenant_id=tenant_id,
|
||||
insight_id=insight.get('id'),
|
||||
severity=severity
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to publish supplier insight event",
|
||||
tenant_id=tenant_id,
|
||||
insight_id=insight.get('id'),
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
async def analyze_all_suppliers(
|
||||
self,
|
||||
tenant_id: str,
|
||||
suppliers_data: Dict[str, pd.DataFrame],
|
||||
min_orders: int = 10
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze all suppliers for a tenant and generate comparative insights.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
suppliers_data: Dict of {supplier_id: order_history DataFrame}
|
||||
min_orders: Minimum orders for analysis
|
||||
|
||||
Returns:
|
||||
Comprehensive analysis with supplier comparison
|
||||
"""
|
||||
logger.info(
|
||||
"Analyzing all suppliers for tenant",
|
||||
tenant_id=tenant_id,
|
||||
suppliers=len(suppliers_data)
|
||||
)
|
||||
|
||||
all_results = []
|
||||
total_insights_posted = 0
|
||||
|
||||
# Analyze each supplier
|
||||
for supplier_id, order_history in suppliers_data.items():
|
||||
try:
|
||||
results = await self.analyze_and_post_supplier_insights(
|
||||
tenant_id=tenant_id,
|
||||
supplier_id=supplier_id,
|
||||
order_history=order_history,
|
||||
min_orders=min_orders
|
||||
)
|
||||
|
||||
all_results.append(results)
|
||||
total_insights_posted += results['insights_posted']
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error analyzing supplier",
|
||||
supplier_id=supplier_id,
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
# Compare suppliers
|
||||
comparison = self.predictor.compare_suppliers(
|
||||
[r for r in all_results if r.get('reliability_score') is not None]
|
||||
)
|
||||
|
||||
# Generate comparative insights if needed
|
||||
comparative_insights = self._generate_comparative_insights(
|
||||
tenant_id, comparison
|
||||
)
|
||||
|
||||
if comparative_insights:
|
||||
enriched_comparative = self._enrich_insights(
|
||||
comparative_insights, tenant_id, 'all_suppliers'
|
||||
)
|
||||
|
||||
post_results = await self.ai_insights_client.create_insights_bulk(
|
||||
tenant_id=UUID(tenant_id),
|
||||
insights=enriched_comparative
|
||||
)
|
||||
|
||||
total_insights_posted += post_results['successful']
|
||||
|
||||
logger.info(
|
||||
"All suppliers analysis complete",
|
||||
tenant_id=tenant_id,
|
||||
suppliers_analyzed=len(all_results),
|
||||
total_insights_posted=total_insights_posted
|
||||
)
|
||||
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'analyzed_at': datetime.utcnow().isoformat(),
|
||||
'suppliers_analyzed': len(all_results),
|
||||
'supplier_results': all_results,
|
||||
'comparison': comparison,
|
||||
'total_insights_posted': total_insights_posted
|
||||
}
|
||||
|
||||
def _generate_comparative_insights(
|
||||
self,
|
||||
tenant_id: str,
|
||||
comparison: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generate insights from supplier comparison.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
comparison: Supplier comparison results
|
||||
|
||||
Returns:
|
||||
List of comparative insights
|
||||
"""
|
||||
insights = []
|
||||
|
||||
if 'recommendations' in comparison and comparison['recommendations']:
|
||||
for rec in comparison['recommendations']:
|
||||
if 'URGENT' in rec['recommendation']:
|
||||
priority = 'critical'
|
||||
elif 'high-risk' in rec.get('reason', '').lower():
|
||||
priority = 'high'
|
||||
else:
|
||||
priority = 'medium'
|
||||
|
||||
insights.append({
|
||||
'type': 'recommendation',
|
||||
'priority': priority,
|
||||
'category': 'procurement',
|
||||
'title': 'Supplier Comparison: Action Required',
|
||||
'description': rec['recommendation'],
|
||||
'impact_type': 'cost_optimization',
|
||||
'impact_value': 0,
|
||||
'impact_unit': 'recommendation',
|
||||
'confidence': 85,
|
||||
'metrics_json': {
|
||||
'comparison_type': 'multi_supplier',
|
||||
'suppliers_compared': comparison['suppliers_compared'],
|
||||
'top_supplier': comparison.get('top_supplier'),
|
||||
'top_score': comparison.get('top_supplier_score'),
|
||||
'reason': rec.get('reason', '')
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Review Supplier Portfolio',
|
||||
'action': 'review_supplier_portfolio',
|
||||
'params': {'tenant_id': tenant_id}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'supplier_performance_predictor'
|
||||
})
|
||||
|
||||
return insights
|
||||
|
||||
async def get_supplier_risk_score(
|
||||
self,
|
||||
supplier_id: str
|
||||
) -> Optional[int]:
|
||||
"""
|
||||
Get cached reliability score for a supplier.
|
||||
|
||||
Args:
|
||||
supplier_id: Supplier identifier
|
||||
|
||||
Returns:
|
||||
Reliability score (0-100) or None if not analyzed
|
||||
"""
|
||||
return self.predictor.get_supplier_reliability_score(supplier_id)
|
||||
|
||||
async def close(self):
|
||||
"""Close HTTP client connections."""
|
||||
await self.ai_insights_client.close()
|
||||
701
services/procurement/app/ml/supplier_performance_predictor.py
Normal file
701
services/procurement/app/ml/supplier_performance_predictor.py
Normal file
@@ -0,0 +1,701 @@
|
||||
"""
|
||||
Supplier Performance Predictor
|
||||
Predicts supplier reliability, delivery delays, and quality issues
|
||||
Generates insights for procurement risk management
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from typing import Dict, List, Any, Optional, Tuple
|
||||
import structlog
|
||||
from datetime import datetime, timedelta
|
||||
from collections import defaultdict
|
||||
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class SupplierPerformancePredictor:
|
||||
"""
|
||||
Predicts supplier performance metrics for procurement risk management.
|
||||
|
||||
Capabilities:
|
||||
1. Delivery delay probability prediction
|
||||
2. Quality issue likelihood scoring
|
||||
3. Supplier reliability scoring (0-100)
|
||||
4. Alternative supplier recommendations
|
||||
5. Procurement risk assessment
|
||||
6. Insight generation for high-risk suppliers
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.delay_model = None
|
||||
self.quality_model = None
|
||||
self.reliability_scores = {}
|
||||
self.scaler = StandardScaler()
|
||||
self.feature_columns = []
|
||||
|
||||
async def analyze_supplier_performance(
|
||||
self,
|
||||
tenant_id: str,
|
||||
supplier_id: str,
|
||||
order_history: pd.DataFrame,
|
||||
min_orders: int = 10
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze historical supplier performance and generate insights.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
supplier_id: Supplier identifier
|
||||
order_history: Historical orders with columns:
|
||||
- order_date
|
||||
- expected_delivery_date
|
||||
- actual_delivery_date
|
||||
- order_quantity
|
||||
- received_quantity
|
||||
- quality_issues (bool)
|
||||
- quality_score (0-100)
|
||||
- order_value
|
||||
min_orders: Minimum orders required for analysis
|
||||
|
||||
Returns:
|
||||
Dictionary with performance metrics and insights
|
||||
"""
|
||||
logger.info(
|
||||
"Analyzing supplier performance",
|
||||
tenant_id=tenant_id,
|
||||
supplier_id=supplier_id,
|
||||
orders=len(order_history)
|
||||
)
|
||||
|
||||
if len(order_history) < min_orders:
|
||||
logger.warning(
|
||||
"Insufficient order history",
|
||||
supplier_id=supplier_id,
|
||||
orders=len(order_history),
|
||||
required=min_orders
|
||||
)
|
||||
return self._insufficient_data_response(tenant_id, supplier_id)
|
||||
|
||||
# Calculate performance metrics
|
||||
metrics = self._calculate_performance_metrics(order_history)
|
||||
|
||||
# Calculate reliability score
|
||||
reliability_score = self._calculate_reliability_score(metrics)
|
||||
|
||||
# Predict future performance
|
||||
predictions = self._predict_future_performance(order_history, metrics)
|
||||
|
||||
# Assess procurement risk
|
||||
risk_assessment = self._assess_procurement_risk(
|
||||
metrics, reliability_score, predictions
|
||||
)
|
||||
|
||||
# Generate insights
|
||||
insights = self._generate_supplier_insights(
|
||||
tenant_id, supplier_id, metrics, reliability_score,
|
||||
risk_assessment, predictions
|
||||
)
|
||||
|
||||
# Store reliability score
|
||||
self.reliability_scores[supplier_id] = reliability_score
|
||||
|
||||
logger.info(
|
||||
"Supplier performance analysis complete",
|
||||
supplier_id=supplier_id,
|
||||
reliability_score=reliability_score,
|
||||
insights_generated=len(insights)
|
||||
)
|
||||
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'supplier_id': supplier_id,
|
||||
'analyzed_at': datetime.utcnow().isoformat(),
|
||||
'orders_analyzed': len(order_history),
|
||||
'metrics': metrics,
|
||||
'reliability_score': reliability_score,
|
||||
'predictions': predictions,
|
||||
'risk_assessment': risk_assessment,
|
||||
'insights': insights
|
||||
}
|
||||
|
||||
def _calculate_performance_metrics(
|
||||
self,
|
||||
order_history: pd.DataFrame
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Calculate comprehensive supplier performance metrics.
|
||||
|
||||
Args:
|
||||
order_history: Historical order data
|
||||
|
||||
Returns:
|
||||
Dictionary of performance metrics
|
||||
"""
|
||||
# Ensure datetime columns
|
||||
order_history['order_date'] = pd.to_datetime(order_history['order_date'])
|
||||
order_history['expected_delivery_date'] = pd.to_datetime(order_history['expected_delivery_date'])
|
||||
order_history['actual_delivery_date'] = pd.to_datetime(order_history['actual_delivery_date'])
|
||||
|
||||
# Calculate delivery delays
|
||||
order_history['delivery_delay_days'] = (
|
||||
order_history['actual_delivery_date'] - order_history['expected_delivery_date']
|
||||
).dt.days
|
||||
|
||||
order_history['is_delayed'] = order_history['delivery_delay_days'] > 0
|
||||
order_history['is_early'] = order_history['delivery_delay_days'] < 0
|
||||
|
||||
# Calculate quantity accuracy
|
||||
order_history['quantity_accuracy'] = (
|
||||
order_history['received_quantity'] / order_history['order_quantity']
|
||||
)
|
||||
|
||||
order_history['is_short_delivery'] = order_history['quantity_accuracy'] < 1.0
|
||||
order_history['is_over_delivery'] = order_history['quantity_accuracy'] > 1.0
|
||||
|
||||
metrics = {
|
||||
# Delivery metrics
|
||||
'total_orders': int(len(order_history)),
|
||||
'on_time_orders': int((~order_history['is_delayed']).sum()),
|
||||
'delayed_orders': int(order_history['is_delayed'].sum()),
|
||||
'on_time_rate': float((~order_history['is_delayed']).mean() * 100),
|
||||
'avg_delivery_delay_days': float(order_history[order_history['is_delayed']]['delivery_delay_days'].mean()) if order_history['is_delayed'].any() else 0.0,
|
||||
'max_delivery_delay_days': int(order_history['delivery_delay_days'].max()),
|
||||
'delivery_delay_std': float(order_history['delivery_delay_days'].std()),
|
||||
|
||||
# Quantity accuracy metrics
|
||||
'avg_quantity_accuracy': float(order_history['quantity_accuracy'].mean() * 100),
|
||||
'short_deliveries': int(order_history['is_short_delivery'].sum()),
|
||||
'short_delivery_rate': float(order_history['is_short_delivery'].mean() * 100),
|
||||
|
||||
# Quality metrics
|
||||
'quality_issues': int(order_history['quality_issues'].sum()) if 'quality_issues' in order_history.columns else 0,
|
||||
'quality_issue_rate': float(order_history['quality_issues'].mean() * 100) if 'quality_issues' in order_history.columns else 0.0,
|
||||
'avg_quality_score': float(order_history['quality_score'].mean()) if 'quality_score' in order_history.columns else 100.0,
|
||||
|
||||
# Consistency metrics
|
||||
'delivery_consistency': float(100 - order_history['delivery_delay_days'].std() * 10), # Lower variance = higher consistency
|
||||
'quantity_consistency': float(100 - (order_history['quantity_accuracy'].std() * 100)),
|
||||
|
||||
# Recent trend (last 30 days vs overall)
|
||||
'recent_on_time_rate': self._calculate_recent_trend(order_history, 'is_delayed', days=30),
|
||||
|
||||
# Cost metrics
|
||||
'total_order_value': float(order_history['order_value'].sum()) if 'order_value' in order_history.columns else 0.0,
|
||||
'avg_order_value': float(order_history['order_value'].mean()) if 'order_value' in order_history.columns else 0.0
|
||||
}
|
||||
|
||||
# Ensure all metrics are valid (no NaN)
|
||||
for key, value in metrics.items():
|
||||
if isinstance(value, float) and np.isnan(value):
|
||||
metrics[key] = 0.0
|
||||
|
||||
return metrics
|
||||
|
||||
def _calculate_recent_trend(
|
||||
self,
|
||||
order_history: pd.DataFrame,
|
||||
metric_column: str,
|
||||
days: int = 30
|
||||
) -> float:
|
||||
"""Calculate recent trend for a metric."""
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days)
|
||||
recent_orders = order_history[order_history['order_date'] >= cutoff_date]
|
||||
|
||||
if len(recent_orders) < 3:
|
||||
return 0.0 # Not enough recent data
|
||||
|
||||
if metric_column == 'is_delayed':
|
||||
return float((~recent_orders['is_delayed']).mean() * 100)
|
||||
else:
|
||||
return float(recent_orders[metric_column].mean() * 100)
|
||||
|
||||
def _calculate_reliability_score(
|
||||
self,
|
||||
metrics: Dict[str, Any]
|
||||
) -> int:
|
||||
"""
|
||||
Calculate overall supplier reliability score (0-100).
|
||||
|
||||
Factors:
|
||||
- On-time delivery rate (40%)
|
||||
- Quantity accuracy (20%)
|
||||
- Quality score (25%)
|
||||
- Consistency (15%)
|
||||
"""
|
||||
# On-time delivery score (40 points)
|
||||
on_time_score = metrics['on_time_rate'] * 0.40
|
||||
|
||||
# Quantity accuracy score (20 points)
|
||||
quantity_score = min(100, metrics['avg_quantity_accuracy']) * 0.20
|
||||
|
||||
# Quality score (25 points)
|
||||
quality_score = metrics['avg_quality_score'] * 0.25
|
||||
|
||||
# Consistency score (15 points)
|
||||
# Average of delivery and quantity consistency
|
||||
consistency_score = (
|
||||
(metrics['delivery_consistency'] + metrics['quantity_consistency']) / 2
|
||||
) * 0.15
|
||||
|
||||
total_score = on_time_score + quantity_score + quality_score + consistency_score
|
||||
|
||||
# Penalties
|
||||
# Severe penalty for high quality issue rate
|
||||
if metrics['quality_issue_rate'] > 10:
|
||||
total_score *= 0.8 # 20% penalty
|
||||
|
||||
# Penalty for high short delivery rate
|
||||
if metrics['short_delivery_rate'] > 15:
|
||||
total_score *= 0.9 # 10% penalty
|
||||
|
||||
return int(round(max(0, min(100, total_score))))
|
||||
|
||||
def _predict_future_performance(
|
||||
self,
|
||||
order_history: pd.DataFrame,
|
||||
metrics: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Predict future supplier performance based on trends.
|
||||
|
||||
Args:
|
||||
order_history: Historical order data
|
||||
metrics: Calculated performance metrics
|
||||
|
||||
Returns:
|
||||
Dictionary of predictions
|
||||
"""
|
||||
# Simple trend-based predictions
|
||||
# For production, could use ML models trained on multi-supplier data
|
||||
|
||||
predictions = {
|
||||
'next_order_delay_probability': 0.0,
|
||||
'next_order_quality_issue_probability': 0.0,
|
||||
'predicted_delivery_days': 0,
|
||||
'confidence': 0
|
||||
}
|
||||
|
||||
# Delay probability based on historical rate and recent trend
|
||||
historical_delay_rate = metrics['delayed_orders'] / max(1, metrics['total_orders'])
|
||||
recent_on_time_rate = metrics['recent_on_time_rate'] / 100
|
||||
|
||||
# Weight recent performance higher
|
||||
predicted_on_time_prob = (historical_delay_rate * 0.3) + ((1 - recent_on_time_rate) * 0.7)
|
||||
predictions['next_order_delay_probability'] = float(min(1.0, max(0.0, predicted_on_time_prob)))
|
||||
|
||||
# Quality issue probability
|
||||
if metrics['quality_issues'] > 0:
|
||||
quality_issue_prob = metrics['quality_issue_rate'] / 100
|
||||
predictions['next_order_quality_issue_probability'] = float(quality_issue_prob)
|
||||
|
||||
# Predicted delivery days (expected delay)
|
||||
if metrics['avg_delivery_delay_days'] > 0:
|
||||
predictions['predicted_delivery_days'] = int(round(metrics['avg_delivery_delay_days']))
|
||||
|
||||
# Confidence based on data quantity and recency
|
||||
if metrics['total_orders'] >= 50:
|
||||
predictions['confidence'] = 90
|
||||
elif metrics['total_orders'] >= 30:
|
||||
predictions['confidence'] = 80
|
||||
elif metrics['total_orders'] >= 20:
|
||||
predictions['confidence'] = 70
|
||||
else:
|
||||
predictions['confidence'] = 60
|
||||
|
||||
return predictions
|
||||
|
||||
def _assess_procurement_risk(
|
||||
self,
|
||||
metrics: Dict[str, Any],
|
||||
reliability_score: int,
|
||||
predictions: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Assess overall procurement risk for this supplier.
|
||||
|
||||
Risk levels: low, medium, high, critical
|
||||
"""
|
||||
risk_factors = []
|
||||
risk_score = 0 # 0-100, higher = more risky
|
||||
|
||||
# Low reliability
|
||||
if reliability_score < 60:
|
||||
risk_factors.append('Low reliability score')
|
||||
risk_score += 30
|
||||
elif reliability_score < 75:
|
||||
risk_factors.append('Medium reliability score')
|
||||
risk_score += 15
|
||||
|
||||
# High delay probability
|
||||
if predictions['next_order_delay_probability'] > 0.5:
|
||||
risk_factors.append('High delay probability')
|
||||
risk_score += 25
|
||||
elif predictions['next_order_delay_probability'] > 0.3:
|
||||
risk_factors.append('Moderate delay probability')
|
||||
risk_score += 15
|
||||
|
||||
# Quality issues
|
||||
if metrics['quality_issue_rate'] > 15:
|
||||
risk_factors.append('High quality issue rate')
|
||||
risk_score += 25
|
||||
elif metrics['quality_issue_rate'] > 5:
|
||||
risk_factors.append('Moderate quality issue rate')
|
||||
risk_score += 10
|
||||
|
||||
# Quantity accuracy issues
|
||||
if metrics['short_delivery_rate'] > 20:
|
||||
risk_factors.append('Frequent short deliveries')
|
||||
risk_score += 15
|
||||
elif metrics['short_delivery_rate'] > 10:
|
||||
risk_factors.append('Occasional short deliveries')
|
||||
risk_score += 8
|
||||
|
||||
# Low consistency
|
||||
if metrics['delivery_consistency'] < 60:
|
||||
risk_factors.append('Inconsistent delivery timing')
|
||||
risk_score += 10
|
||||
|
||||
# Determine risk level
|
||||
if risk_score >= 70:
|
||||
risk_level = 'critical'
|
||||
elif risk_score >= 50:
|
||||
risk_level = 'high'
|
||||
elif risk_score >= 30:
|
||||
risk_level = 'medium'
|
||||
else:
|
||||
risk_level = 'low'
|
||||
|
||||
return {
|
||||
'risk_level': risk_level,
|
||||
'risk_score': min(100, risk_score),
|
||||
'risk_factors': risk_factors,
|
||||
'recommendation': self._get_risk_recommendation(risk_level, risk_factors)
|
||||
}
|
||||
|
||||
def _get_risk_recommendation(
|
||||
self,
|
||||
risk_level: str,
|
||||
risk_factors: List[str]
|
||||
) -> str:
|
||||
"""Generate risk mitigation recommendation."""
|
||||
if risk_level == 'critical':
|
||||
return 'URGENT: Consider switching to alternative supplier. Current supplier poses significant operational risk.'
|
||||
elif risk_level == 'high':
|
||||
return 'HIGH PRIORITY: Increase safety stock and have backup supplier ready. Monitor closely.'
|
||||
elif risk_level == 'medium':
|
||||
return 'MONITOR: Keep standard safety stock. Review performance quarterly.'
|
||||
else:
|
||||
return 'LOW RISK: Supplier performing well. Maintain current relationship.'
|
||||
|
||||
def _generate_supplier_insights(
|
||||
self,
|
||||
tenant_id: str,
|
||||
supplier_id: str,
|
||||
metrics: Dict[str, Any],
|
||||
reliability_score: int,
|
||||
risk_assessment: Dict[str, Any],
|
||||
predictions: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generate actionable insights for procurement team.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
supplier_id: Supplier ID
|
||||
metrics: Performance metrics
|
||||
reliability_score: Overall reliability (0-100)
|
||||
risk_assessment: Risk assessment results
|
||||
predictions: Future performance predictions
|
||||
|
||||
Returns:
|
||||
List of insight dictionaries
|
||||
"""
|
||||
insights = []
|
||||
|
||||
# Insight 1: Low reliability alert
|
||||
if reliability_score < 60:
|
||||
insights.append({
|
||||
'type': 'alert',
|
||||
'priority': 'critical' if reliability_score < 50 else 'high',
|
||||
'category': 'procurement',
|
||||
'title': f'Low Supplier Reliability: {reliability_score}/100',
|
||||
'description': f'Supplier {supplier_id} has low reliability score of {reliability_score}. On-time rate: {metrics["on_time_rate"]:.1f}%, Quality: {metrics["avg_quality_score"]:.1f}. Consider alternative suppliers.',
|
||||
'impact_type': 'operational_risk',
|
||||
'impact_value': 100 - reliability_score,
|
||||
'impact_unit': 'risk_points',
|
||||
'confidence': 85,
|
||||
'metrics_json': {
|
||||
'supplier_id': supplier_id,
|
||||
'reliability_score': reliability_score,
|
||||
'on_time_rate': round(metrics['on_time_rate'], 2),
|
||||
'quality_score': round(metrics['avg_quality_score'], 2),
|
||||
'quality_issue_rate': round(metrics['quality_issue_rate'], 2),
|
||||
'delayed_orders': metrics['delayed_orders'],
|
||||
'total_orders': metrics['total_orders']
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Find Alternative Supplier',
|
||||
'action': 'search_alternative_suppliers',
|
||||
'params': {'current_supplier_id': supplier_id}
|
||||
},
|
||||
{
|
||||
'label': 'Increase Safety Stock',
|
||||
'action': 'adjust_safety_stock',
|
||||
'params': {'supplier_id': supplier_id, 'multiplier': 1.5}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'supplier_performance_predictor'
|
||||
})
|
||||
|
||||
# Insight 2: High delay probability
|
||||
if predictions['next_order_delay_probability'] > 0.4:
|
||||
delay_prob_pct = predictions['next_order_delay_probability'] * 100
|
||||
insights.append({
|
||||
'type': 'prediction',
|
||||
'priority': 'high' if delay_prob_pct > 60 else 'medium',
|
||||
'category': 'procurement',
|
||||
'title': f'High Delay Risk: {delay_prob_pct:.0f}% Probability',
|
||||
'description': f'Supplier {supplier_id} has {delay_prob_pct:.0f}% probability of delaying next order. Expected delay: {predictions["predicted_delivery_days"]} days. Plan accordingly.',
|
||||
'impact_type': 'operational_risk',
|
||||
'impact_value': delay_prob_pct,
|
||||
'impact_unit': 'probability_percent',
|
||||
'confidence': predictions['confidence'],
|
||||
'metrics_json': {
|
||||
'supplier_id': supplier_id,
|
||||
'delay_probability': round(delay_prob_pct, 2),
|
||||
'predicted_delay_days': predictions['predicted_delivery_days'],
|
||||
'historical_delay_rate': round(metrics['delayed_orders'] / max(1, metrics['total_orders']) * 100, 2),
|
||||
'avg_delay_days': round(metrics['avg_delivery_delay_days'], 2)
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Order Earlier',
|
||||
'action': 'adjust_order_lead_time',
|
||||
'params': {
|
||||
'supplier_id': supplier_id,
|
||||
'additional_days': predictions['predicted_delivery_days'] + 2
|
||||
}
|
||||
},
|
||||
{
|
||||
'label': 'Increase Safety Stock',
|
||||
'action': 'adjust_safety_stock',
|
||||
'params': {'supplier_id': supplier_id, 'multiplier': 1.3}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'supplier_performance_predictor'
|
||||
})
|
||||
|
||||
# Insight 3: Quality issues
|
||||
if metrics['quality_issue_rate'] > 10:
|
||||
insights.append({
|
||||
'type': 'alert',
|
||||
'priority': 'high',
|
||||
'category': 'procurement',
|
||||
'title': f'Quality Issues: {metrics["quality_issue_rate"]:.1f}% of Orders',
|
||||
'description': f'Supplier {supplier_id} has quality issues in {metrics["quality_issue_rate"]:.1f}% of orders ({metrics["quality_issues"]} of {metrics["total_orders"]}). This impacts product quality and customer satisfaction.',
|
||||
'impact_type': 'quality_risk',
|
||||
'impact_value': metrics['quality_issue_rate'],
|
||||
'impact_unit': 'percentage',
|
||||
'confidence': 90,
|
||||
'metrics_json': {
|
||||
'supplier_id': supplier_id,
|
||||
'quality_issue_rate': round(metrics['quality_issue_rate'], 2),
|
||||
'quality_issues': metrics['quality_issues'],
|
||||
'avg_quality_score': round(metrics['avg_quality_score'], 2)
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Review Supplier Quality',
|
||||
'action': 'schedule_supplier_review',
|
||||
'params': {'supplier_id': supplier_id, 'reason': 'quality_issues'}
|
||||
},
|
||||
{
|
||||
'label': 'Increase Inspection',
|
||||
'action': 'increase_quality_checks',
|
||||
'params': {'supplier_id': supplier_id}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'supplier_performance_predictor'
|
||||
})
|
||||
|
||||
# Insight 4: Excellent performance (positive insight)
|
||||
if reliability_score >= 90:
|
||||
insights.append({
|
||||
'type': 'insight',
|
||||
'priority': 'low',
|
||||
'category': 'procurement',
|
||||
'title': f'Excellent Supplier Performance: {reliability_score}/100',
|
||||
'description': f'Supplier {supplier_id} demonstrates excellent performance with {reliability_score} reliability score. On-time: {metrics["on_time_rate"]:.1f}%, Quality: {metrics["avg_quality_score"]:.1f}. Consider expanding partnership.',
|
||||
'impact_type': 'positive_performance',
|
||||
'impact_value': reliability_score,
|
||||
'impact_unit': 'score',
|
||||
'confidence': 90,
|
||||
'metrics_json': {
|
||||
'supplier_id': supplier_id,
|
||||
'reliability_score': reliability_score,
|
||||
'on_time_rate': round(metrics['on_time_rate'], 2),
|
||||
'quality_score': round(metrics['avg_quality_score'], 2)
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Increase Order Volume',
|
||||
'action': 'adjust_supplier_allocation',
|
||||
'params': {'supplier_id': supplier_id, 'increase_pct': 20}
|
||||
},
|
||||
{
|
||||
'label': 'Negotiate Better Terms',
|
||||
'action': 'initiate_negotiation',
|
||||
'params': {'supplier_id': supplier_id, 'reason': 'volume_increase'}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'supplier_performance_predictor'
|
||||
})
|
||||
|
||||
# Insight 5: Performance decline
|
||||
if metrics['recent_on_time_rate'] > 0 and metrics['recent_on_time_rate'] < metrics['on_time_rate'] - 15:
|
||||
insights.append({
|
||||
'type': 'alert',
|
||||
'priority': 'medium',
|
||||
'category': 'procurement',
|
||||
'title': 'Supplier Performance Decline Detected',
|
||||
'description': f'Supplier {supplier_id} recent performance ({metrics["recent_on_time_rate"]:.1f}% on-time) is significantly worse than historical average ({metrics["on_time_rate"]:.1f}%). Investigate potential issues.',
|
||||
'impact_type': 'performance_decline',
|
||||
'impact_value': metrics['on_time_rate'] - metrics['recent_on_time_rate'],
|
||||
'impact_unit': 'percentage_points',
|
||||
'confidence': 75,
|
||||
'metrics_json': {
|
||||
'supplier_id': supplier_id,
|
||||
'recent_on_time_rate': round(metrics['recent_on_time_rate'], 2),
|
||||
'historical_on_time_rate': round(metrics['on_time_rate'], 2),
|
||||
'decline': round(metrics['on_time_rate'] - metrics['recent_on_time_rate'], 2)
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Contact Supplier',
|
||||
'action': 'schedule_supplier_meeting',
|
||||
'params': {'supplier_id': supplier_id, 'reason': 'performance_decline'}
|
||||
},
|
||||
{
|
||||
'label': 'Monitor Closely',
|
||||
'action': 'increase_monitoring_frequency',
|
||||
'params': {'supplier_id': supplier_id}
|
||||
}
|
||||
],
|
||||
'source_service': 'procurement',
|
||||
'source_model': 'supplier_performance_predictor'
|
||||
})
|
||||
|
||||
logger.info(
|
||||
"Generated supplier insights",
|
||||
supplier_id=supplier_id,
|
||||
insights=len(insights)
|
||||
)
|
||||
|
||||
return insights
|
||||
|
||||
def _insufficient_data_response(
|
||||
self,
|
||||
tenant_id: str,
|
||||
supplier_id: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Return response when insufficient data available."""
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'supplier_id': supplier_id,
|
||||
'analyzed_at': datetime.utcnow().isoformat(),
|
||||
'orders_analyzed': 0,
|
||||
'metrics': {},
|
||||
'reliability_score': None,
|
||||
'predictions': {},
|
||||
'risk_assessment': {
|
||||
'risk_level': 'unknown',
|
||||
'risk_score': None,
|
||||
'risk_factors': ['Insufficient historical data'],
|
||||
'recommendation': 'Collect more order history before assessing supplier performance.'
|
||||
},
|
||||
'insights': []
|
||||
}
|
||||
|
||||
def compare_suppliers(
|
||||
self,
|
||||
suppliers_analysis: List[Dict[str, Any]],
|
||||
product_category: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Compare multiple suppliers and provide recommendations.
|
||||
|
||||
Args:
|
||||
suppliers_analysis: List of supplier analysis results
|
||||
product_category: Optional product category filter
|
||||
|
||||
Returns:
|
||||
Comparison report with recommendations
|
||||
"""
|
||||
if not suppliers_analysis:
|
||||
return {'error': 'No suppliers to compare'}
|
||||
|
||||
# Sort by reliability score
|
||||
ranked_suppliers = sorted(
|
||||
suppliers_analysis,
|
||||
key=lambda x: x.get('reliability_score', 0),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
comparison = {
|
||||
'analyzed_at': datetime.utcnow().isoformat(),
|
||||
'suppliers_compared': len(ranked_suppliers),
|
||||
'product_category': product_category,
|
||||
'top_supplier': ranked_suppliers[0]['supplier_id'],
|
||||
'top_supplier_score': ranked_suppliers[0]['reliability_score'],
|
||||
'bottom_supplier': ranked_suppliers[-1]['supplier_id'],
|
||||
'bottom_supplier_score': ranked_suppliers[-1]['reliability_score'],
|
||||
'ranked_suppliers': [
|
||||
{
|
||||
'supplier_id': s['supplier_id'],
|
||||
'reliability_score': s['reliability_score'],
|
||||
'risk_level': s['risk_assessment']['risk_level']
|
||||
}
|
||||
for s in ranked_suppliers
|
||||
],
|
||||
'recommendations': []
|
||||
}
|
||||
|
||||
# Generate comparison insights
|
||||
if len(ranked_suppliers) >= 2:
|
||||
score_gap = ranked_suppliers[0]['reliability_score'] - ranked_suppliers[-1]['reliability_score']
|
||||
|
||||
if score_gap > 30:
|
||||
comparison['recommendations'].append({
|
||||
'recommendation': f'Consider consolidating orders with top supplier {ranked_suppliers[0]["supplier_id"]} (score: {ranked_suppliers[0]["reliability_score"]})',
|
||||
'reason': f'Significant performance gap ({score_gap} points) from lowest performer'
|
||||
})
|
||||
|
||||
# Check for high-risk suppliers
|
||||
high_risk = [s for s in ranked_suppliers if s['risk_assessment']['risk_level'] in ['high', 'critical']]
|
||||
if high_risk:
|
||||
comparison['recommendations'].append({
|
||||
'recommendation': f'URGENT: Replace {len(high_risk)} high-risk supplier(s)',
|
||||
'reason': 'Significant operational risk from unreliable suppliers',
|
||||
'affected_suppliers': [s['supplier_id'] for s in high_risk]
|
||||
})
|
||||
|
||||
return comparison
|
||||
|
||||
def get_supplier_reliability_score(self, supplier_id: str) -> Optional[int]:
|
||||
"""Get cached reliability score for a supplier."""
|
||||
return self.reliability_scores.get(supplier_id)
|
||||
Reference in New Issue
Block a user