Files
bakery-ia/services/suppliers/app/services/performance_service.py
2025-12-05 20:07:01 +01:00

863 lines
37 KiB
Python

# ================================================================
# services/suppliers/app/services/performance_service.py
# ================================================================
"""
Supplier Performance Tracking Service
Comprehensive supplier performance calculation, tracking, and analytics
"""
from datetime import datetime, timedelta, timezone
from typing import List, Optional, Dict, Any, Tuple
from uuid import UUID
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, func, and_, or_, desc, asc
from sqlalchemy.orm import selectinload
import structlog
from decimal import Decimal
from app.models.suppliers import (
Supplier, PurchaseOrder, Delivery, SupplierQualityReview,
PurchaseOrderStatus, DeliveryStatus, QualityRating, DeliveryRating
)
from app.models.performance import (
SupplierPerformanceMetric, SupplierScorecard, SupplierAlert,
PerformanceMetricType, PerformancePeriod, AlertType, AlertSeverity,
AlertStatus
)
from app.schemas.performance import (
PerformanceMetricCreate, ScorecardCreate, AlertCreate,
PerformanceDashboardSummary, SupplierPerformanceInsights,
PerformanceAnalytics, BusinessModelInsights
)
from app.core.config import settings
from shared.database.transactions import transactional
logger = structlog.get_logger()
class PerformanceTrackingService:
"""Service for tracking and calculating supplier performance metrics"""
def __init__(self):
self.logger = logger.bind(service="performance_tracking")
@transactional
async def calculate_supplier_performance(
self,
db: AsyncSession,
supplier_id: UUID,
tenant_id: UUID,
period: PerformancePeriod,
period_start: datetime,
period_end: datetime
) -> SupplierPerformanceMetric:
"""Calculate comprehensive performance metrics for a supplier"""
try:
self.logger.info("Calculating supplier performance",
supplier_id=str(supplier_id),
period=period.value,
period_start=period_start.isoformat(),
period_end=period_end.isoformat())
# Get base data for calculations
orders_data = await self._get_orders_data(db, supplier_id, tenant_id, period_start, period_end)
deliveries_data = await self._get_deliveries_data(db, supplier_id, tenant_id, period_start, period_end)
quality_data = await self._get_quality_data(db, supplier_id, tenant_id, period_start, period_end)
# Calculate delivery performance
delivery_performance = await self._calculate_delivery_performance(
orders_data, deliveries_data
)
# Calculate quality performance
quality_performance = await self._calculate_quality_performance(
deliveries_data, quality_data
)
# Calculate cost performance
cost_performance = await self._calculate_cost_performance(
orders_data, deliveries_data
)
# Calculate service performance
service_performance = await self._calculate_service_performance(
orders_data, quality_data
)
# Calculate overall performance (weighted average)
overall_performance = (
delivery_performance * 0.30 +
quality_performance * 0.30 +
cost_performance * 0.20 +
service_performance * 0.20
)
# Create performance metrics for each category
performance_metrics = []
metrics_to_create = [
(PerformanceMetricType.DELIVERY_PERFORMANCE, delivery_performance),
(PerformanceMetricType.QUALITY_SCORE, quality_performance),
(PerformanceMetricType.PRICE_COMPETITIVENESS, cost_performance),
(PerformanceMetricType.COMMUNICATION_RATING, service_performance)
]
for metric_type, value in metrics_to_create:
# Get previous period value for trend calculation
previous_value = await self._get_previous_period_value(
db, supplier_id, tenant_id, metric_type, period, period_start
)
# Calculate trend
trend_direction, trend_percentage = self._calculate_trend(value, previous_value)
# Prepare detailed metrics data
metrics_data = await self._prepare_detailed_metrics(
metric_type, orders_data, deliveries_data, quality_data
)
# Create performance metric
metric_create = PerformanceMetricCreate(
supplier_id=supplier_id,
metric_type=metric_type,
period=period,
period_start=period_start,
period_end=period_end,
metric_value=value,
target_value=self._get_target_value(metric_type),
total_orders=orders_data.get('total_orders', 0),
total_deliveries=deliveries_data.get('total_deliveries', 0),
on_time_deliveries=deliveries_data.get('on_time_deliveries', 0),
late_deliveries=deliveries_data.get('late_deliveries', 0),
quality_issues=quality_data.get('quality_issues', 0),
total_amount=orders_data.get('total_amount', Decimal('0')),
metrics_data=metrics_data
)
performance_metric = SupplierPerformanceMetric(
tenant_id=tenant_id,
supplier_id=supplier_id,
metric_type=metric_create.metric_type,
period=metric_create.period,
period_start=metric_create.period_start,
period_end=metric_create.period_end,
metric_value=metric_create.metric_value,
target_value=metric_create.target_value,
previous_value=previous_value,
total_orders=metric_create.total_orders,
total_deliveries=metric_create.total_deliveries,
on_time_deliveries=metric_create.on_time_deliveries,
late_deliveries=metric_create.late_deliveries,
quality_issues=metric_create.quality_issues,
total_amount=metric_create.total_amount,
metrics_data=metric_create.metrics_data,
trend_direction=trend_direction,
trend_percentage=trend_percentage,
calculated_at=datetime.now(timezone.utc)
)
db.add(performance_metric)
performance_metrics.append(performance_metric)
await db.flush()
# Update supplier's overall performance ratings
await self._update_supplier_ratings(db, supplier_id, overall_performance, quality_performance)
self.logger.info("Supplier performance calculated successfully",
supplier_id=str(supplier_id),
overall_performance=overall_performance)
# Return the overall performance metric
return performance_metrics[0] if performance_metrics else None
except Exception as e:
self.logger.error("Error calculating supplier performance",
supplier_id=str(supplier_id),
error=str(e))
raise
async def _get_orders_data(
self,
db: AsyncSession,
supplier_id: UUID,
tenant_id: UUID,
period_start: datetime,
period_end: datetime
) -> Dict[str, Any]:
"""Get orders data for performance calculation"""
query = select(
func.count(PurchaseOrder.id).label('total_orders'),
func.sum(PurchaseOrder.total_amount).label('total_amount'),
func.avg(PurchaseOrder.total_amount).label('avg_order_value'),
func.count(
PurchaseOrder.id.filter(
PurchaseOrder.status == PurchaseOrderStatus.COMPLETED
)
).label('completed_orders')
).where(
and_(
PurchaseOrder.supplier_id == supplier_id,
PurchaseOrder.tenant_id == tenant_id,
PurchaseOrder.order_date >= period_start,
PurchaseOrder.order_date <= period_end
)
)
result = await db.execute(query)
row = result.first()
return {
'total_orders': row.total_orders or 0,
'total_amount': row.total_amount or Decimal('0'),
'avg_order_value': row.avg_order_value or Decimal('0'),
'completed_orders': row.completed_orders or 0
}
async def _get_deliveries_data(
self,
db: AsyncSession,
supplier_id: UUID,
tenant_id: UUID,
period_start: datetime,
period_end: datetime
) -> Dict[str, Any]:
"""Get deliveries data for performance calculation"""
# Get delivery statistics
query = select(
func.count(Delivery.id).label('total_deliveries'),
func.count(
Delivery.id.filter(
and_(
Delivery.actual_arrival <= Delivery.scheduled_date,
Delivery.status == DeliveryStatus.DELIVERED
)
)
).label('on_time_deliveries'),
func.count(
Delivery.id.filter(
and_(
Delivery.actual_arrival > Delivery.scheduled_date,
Delivery.status == DeliveryStatus.DELIVERED
)
)
).label('late_deliveries'),
func.avg(
func.extract('epoch', Delivery.actual_arrival - Delivery.scheduled_date) / 3600
).label('avg_delay_hours')
).where(
and_(
Delivery.supplier_id == supplier_id,
Delivery.tenant_id == tenant_id,
Delivery.scheduled_date >= period_start,
Delivery.scheduled_date <= period_end,
Delivery.status.in_([DeliveryStatus.DELIVERED, DeliveryStatus.PARTIALLY_DELIVERED])
)
)
result = await db.execute(query)
row = result.first()
return {
'total_deliveries': row.total_deliveries or 0,
'on_time_deliveries': row.on_time_deliveries or 0,
'late_deliveries': row.late_deliveries or 0,
'avg_delay_hours': row.avg_delay_hours or 0
}
async def _get_quality_data(
self,
db: AsyncSession,
supplier_id: UUID,
tenant_id: UUID,
period_start: datetime,
period_end: datetime
) -> Dict[str, Any]:
"""Get quality data for performance calculation"""
query = select(
func.count(SupplierQualityReview.id).label('total_reviews'),
func.avg(
func.cast(SupplierQualityReview.quality_rating, func.Float)
).label('avg_quality_rating'),
func.avg(
func.cast(SupplierQualityReview.delivery_rating, func.Float)
).label('avg_delivery_rating'),
func.avg(SupplierQualityReview.communication_rating).label('avg_communication_rating'),
func.count(
SupplierQualityReview.id.filter(
SupplierQualityReview.quality_issues.isnot(None)
)
).label('quality_issues')
).where(
and_(
SupplierQualityReview.supplier_id == supplier_id,
SupplierQualityReview.tenant_id == tenant_id,
SupplierQualityReview.review_date >= period_start,
SupplierQualityReview.review_date <= period_end
)
)
result = await db.execute(query)
row = result.first()
return {
'total_reviews': row.total_reviews or 0,
'avg_quality_rating': row.avg_quality_rating or 0,
'avg_delivery_rating': row.avg_delivery_rating or 0,
'avg_communication_rating': row.avg_communication_rating or 0,
'quality_issues': row.quality_issues or 0
}
async def _calculate_delivery_performance(
self,
orders_data: Dict[str, Any],
deliveries_data: Dict[str, Any]
) -> float:
"""Calculate delivery performance score (0-100)"""
total_deliveries = deliveries_data.get('total_deliveries', 0)
if total_deliveries == 0:
return 0.0
on_time_deliveries = deliveries_data.get('on_time_deliveries', 0)
on_time_rate = (on_time_deliveries / total_deliveries) * 100
# Apply penalty for average delay
avg_delay_hours = deliveries_data.get('avg_delay_hours', 0)
delay_penalty = min(avg_delay_hours * 2, 20) # Max 20 point penalty
performance_score = max(on_time_rate - delay_penalty, 0)
return min(performance_score, 100.0)
async def _calculate_quality_performance(
self,
deliveries_data: Dict[str, Any],
quality_data: Dict[str, Any]
) -> float:
"""Calculate quality performance score (0-100)"""
total_reviews = quality_data.get('total_reviews', 0)
if total_reviews == 0:
return 50.0 # Default score when no reviews
# Base quality score from ratings
avg_quality_rating = quality_data.get('avg_quality_rating', 0)
base_score = (avg_quality_rating / 5.0) * 100
# Apply penalty for quality issues
quality_issues = quality_data.get('quality_issues', 0)
issue_penalty = min(quality_issues * 5, 30) # Max 30 point penalty
performance_score = max(base_score - issue_penalty, 0)
return min(performance_score, 100.0)
async def _calculate_cost_performance(
self,
orders_data: Dict[str, Any],
deliveries_data: Dict[str, Any]
) -> float:
"""Calculate cost performance score (0-100)"""
# For now, return a baseline score
# In future, implement price comparison with market rates
return 75.0
async def _calculate_service_performance(
self,
orders_data: Dict[str, Any],
quality_data: Dict[str, Any]
) -> float:
"""Calculate service performance score (0-100)"""
total_reviews = quality_data.get('total_reviews', 0)
if total_reviews == 0:
return 50.0 # Default score when no reviews
avg_communication_rating = quality_data.get('avg_communication_rating', 0)
return (avg_communication_rating / 5.0) * 100
def _calculate_trend(self, current_value: float, previous_value: Optional[float]) -> Tuple[Optional[str], Optional[float]]:
"""Calculate performance trend"""
if previous_value is None or previous_value == 0:
return None, None
change_percentage = ((current_value - previous_value) / previous_value) * 100
if abs(change_percentage) < 2: # Less than 2% change considered stable
trend_direction = "stable"
elif change_percentage > 0:
trend_direction = "improving"
else:
trend_direction = "declining"
return trend_direction, change_percentage
async def _get_previous_period_value(
self,
db: AsyncSession,
supplier_id: UUID,
tenant_id: UUID,
metric_type: PerformanceMetricType,
period: PerformancePeriod,
current_period_start: datetime
) -> Optional[float]:
"""Get the previous period's value for trend calculation"""
# Calculate previous period dates
if period == PerformancePeriod.DAILY:
previous_start = current_period_start - timedelta(days=1)
previous_end = current_period_start
elif period == PerformancePeriod.WEEKLY:
previous_start = current_period_start - timedelta(weeks=1)
previous_end = current_period_start
elif period == PerformancePeriod.MONTHLY:
previous_start = current_period_start - timedelta(days=30)
previous_end = current_period_start
elif period == PerformancePeriod.QUARTERLY:
previous_start = current_period_start - timedelta(days=90)
previous_end = current_period_start
else: # YEARLY
previous_start = current_period_start - timedelta(days=365)
previous_end = current_period_start
query = select(SupplierPerformanceMetric.metric_value).where(
and_(
SupplierPerformanceMetric.supplier_id == supplier_id,
SupplierPerformanceMetric.tenant_id == tenant_id,
SupplierPerformanceMetric.metric_type == metric_type,
SupplierPerformanceMetric.period == period,
SupplierPerformanceMetric.period_start >= previous_start,
SupplierPerformanceMetric.period_start < previous_end
)
).order_by(desc(SupplierPerformanceMetric.period_start)).limit(1)
result = await db.execute(query)
row = result.first()
return row[0] if row else None
def _get_target_value(self, metric_type: PerformanceMetricType) -> float:
"""Get target value for metric type"""
targets = {
PerformanceMetricType.DELIVERY_PERFORMANCE: settings.GOOD_DELIVERY_RATE,
PerformanceMetricType.QUALITY_SCORE: settings.GOOD_QUALITY_RATE,
PerformanceMetricType.PRICE_COMPETITIVENESS: 80.0,
PerformanceMetricType.COMMUNICATION_RATING: 80.0,
PerformanceMetricType.ORDER_ACCURACY: 95.0,
PerformanceMetricType.RESPONSE_TIME: 90.0,
PerformanceMetricType.COMPLIANCE_SCORE: 95.0,
PerformanceMetricType.FINANCIAL_STABILITY: 85.0
}
return targets.get(metric_type, 80.0)
async def _prepare_detailed_metrics(
self,
metric_type: PerformanceMetricType,
orders_data: Dict[str, Any],
deliveries_data: Dict[str, Any],
quality_data: Dict[str, Any]
) -> Dict[str, Any]:
"""Prepare detailed metrics breakdown"""
if metric_type == PerformanceMetricType.DELIVERY_PERFORMANCE:
return {
"on_time_rate": (deliveries_data.get('on_time_deliveries', 0) /
max(deliveries_data.get('total_deliveries', 1), 1)) * 100,
"avg_delay_hours": deliveries_data.get('avg_delay_hours', 0),
"late_delivery_count": deliveries_data.get('late_deliveries', 0)
}
elif metric_type == PerformanceMetricType.QUALITY_SCORE:
return {
"avg_quality_rating": quality_data.get('avg_quality_rating', 0),
"quality_issues_count": quality_data.get('quality_issues', 0),
"total_reviews": quality_data.get('total_reviews', 0)
}
else:
return {}
async def _update_supplier_ratings(
self,
db: AsyncSession,
supplier_id: UUID,
overall_performance: float,
quality_performance: float
) -> None:
"""Update supplier's overall ratings"""
stmt = select(Supplier).where(Supplier.id == supplier_id)
result = await db.execute(stmt)
supplier = result.scalar_one_or_none()
if supplier:
supplier.quality_rating = quality_performance / 20 # Convert to 1-5 scale
supplier.delivery_rating = overall_performance / 20 # Convert to 1-5 scale
db.add(supplier)
class AlertService:
"""Service for managing supplier alerts"""
def __init__(self):
self.logger = logger.bind(service="alert_service")
@transactional
async def evaluate_performance_alerts(
self,
db: AsyncSession,
tenant_id: UUID,
supplier_id: Optional[UUID] = None
) -> List[SupplierAlert]:
"""Evaluate and create performance-based alerts"""
try:
alerts_created = []
# Get suppliers to evaluate
if supplier_id:
supplier_filter = and_(Supplier.id == supplier_id, Supplier.tenant_id == tenant_id)
else:
supplier_filter = and_(Supplier.tenant_id == tenant_id, Supplier.status == "active")
stmt = select(Supplier).where(supplier_filter)
result = await db.execute(stmt)
suppliers = result.scalars().all()
for supplier in suppliers:
# Get recent performance metrics
recent_metrics = await self._get_recent_performance_metrics(db, supplier.id, tenant_id)
# Evaluate delivery performance alerts
delivery_alerts = await self._evaluate_delivery_alerts(db, supplier, recent_metrics)
alerts_created.extend(delivery_alerts)
# Evaluate quality alerts
quality_alerts = await self._evaluate_quality_alerts(db, supplier, recent_metrics)
alerts_created.extend(quality_alerts)
# Evaluate cost variance alerts
cost_alerts = await self._evaluate_cost_alerts(db, supplier, recent_metrics)
alerts_created.extend(cost_alerts)
return alerts_created
except Exception as e:
self.logger.error("Error evaluating performance alerts", error=str(e))
raise
async def _get_recent_performance_metrics(
self,
db: AsyncSession,
supplier_id: UUID,
tenant_id: UUID
) -> Dict[PerformanceMetricType, SupplierPerformanceMetric]:
"""Get recent performance metrics for a supplier"""
query = select(SupplierPerformanceMetric).where(
and_(
SupplierPerformanceMetric.supplier_id == supplier_id,
SupplierPerformanceMetric.tenant_id == tenant_id,
SupplierPerformanceMetric.calculated_at >= datetime.now(timezone.utc) - timedelta(days=7)
)
).order_by(desc(SupplierPerformanceMetric.calculated_at))
result = await db.execute(query)
metrics = result.scalars().all()
# Return the most recent metric for each type
metrics_dict = {}
for metric in metrics:
if metric.metric_type not in metrics_dict:
metrics_dict[metric.metric_type] = metric
return metrics_dict
async def _evaluate_delivery_alerts(
self,
db: AsyncSession,
supplier: Supplier,
metrics: Dict[PerformanceMetricType, SupplierPerformanceMetric]
) -> List[SupplierAlert]:
"""Evaluate delivery performance alerts"""
alerts = []
delivery_metric = metrics.get(PerformanceMetricType.DELIVERY_PERFORMANCE)
if not delivery_metric:
return alerts
# Poor delivery performance alert
if delivery_metric.metric_value < settings.POOR_DELIVERY_RATE:
severity = AlertSeverity.CRITICAL if delivery_metric.metric_value < 70 else AlertSeverity.HIGH
alert = SupplierAlert(
tenant_id=supplier.tenant_id,
supplier_id=supplier.id,
alert_type=AlertType.POOR_QUALITY,
severity=severity,
title=f"Poor Delivery Performance - {supplier.name}",
message=f"Delivery performance has dropped to {delivery_metric.metric_value:.1f}%",
description=f"Supplier {supplier.name} delivery performance is below acceptable threshold",
trigger_value=delivery_metric.metric_value,
threshold_value=settings.POOR_DELIVERY_RATE,
metric_type=PerformanceMetricType.DELIVERY_PERFORMANCE,
performance_metric_id=delivery_metric.id,
priority_score=90 if severity == AlertSeverity.CRITICAL else 70,
business_impact="high" if severity == AlertSeverity.CRITICAL else "medium",
recommended_actions=[
{"action": "Review delivery processes with supplier"},
{"action": "Request delivery improvement plan"},
{"action": "Consider alternative suppliers"}
]
)
db.add(alert)
alerts.append(alert)
return alerts
async def _evaluate_quality_alerts(
self,
db: AsyncSession,
supplier: Supplier,
metrics: Dict[PerformanceMetricType, SupplierPerformanceMetric]
) -> List[SupplierAlert]:
"""Evaluate quality performance alerts"""
alerts = []
quality_metric = metrics.get(PerformanceMetricType.QUALITY_SCORE)
if not quality_metric:
return alerts
# Poor quality performance alert
if quality_metric.metric_value < settings.POOR_QUALITY_RATE:
severity = AlertSeverity.CRITICAL if quality_metric.metric_value < 70 else AlertSeverity.HIGH
alert = SupplierAlert(
tenant_id=supplier.tenant_id,
supplier_id=supplier.id,
alert_type=AlertType.POOR_QUALITY,
severity=severity,
title=f"Poor Quality Performance - {supplier.name}",
message=f"Quality performance has dropped to {quality_metric.metric_value:.1f}%",
description=f"Supplier {supplier.name} quality performance is below acceptable threshold",
trigger_value=quality_metric.metric_value,
threshold_value=settings.POOR_QUALITY_RATE,
metric_type=PerformanceMetricType.QUALITY_SCORE,
performance_metric_id=quality_metric.id,
priority_score=95 if severity == AlertSeverity.CRITICAL else 75,
business_impact="high" if severity == AlertSeverity.CRITICAL else "medium",
recommended_actions=[
{"action": "Conduct quality audit with supplier"},
{"action": "Request quality improvement plan"},
{"action": "Increase incoming inspection frequency"}
]
)
db.add(alert)
alerts.append(alert)
return alerts
async def _evaluate_cost_alerts(
self,
db: AsyncSession,
supplier: Supplier,
metrics: Dict[PerformanceMetricType, SupplierPerformanceMetric]
) -> List[SupplierAlert]:
"""Evaluate cost variance alerts based on historical pricing"""
alerts = []
try:
from shared.clients.procurement_client import ProcurementServiceClient
from shared.config.base import get_settings
from datetime import timedelta
from collections import defaultdict
from decimal import Decimal
# Configuration thresholds
WARNING_THRESHOLD = Decimal('0.10') # 10% variance
CRITICAL_THRESHOLD = Decimal('0.20') # 20% variance
SAVINGS_THRESHOLD = Decimal('0.10') # 10% decrease
MIN_SAMPLE_SIZE = 3
LOOKBACK_DAYS = 30
config = get_settings()
procurement_client = ProcurementServiceClient(config, "suppliers")
# Get purchase orders for this supplier from last 60 days (30 days lookback + 30 days current)
date_to = datetime.now(timezone.utc).date()
date_from = date_to - timedelta(days=LOOKBACK_DAYS * 2)
purchase_orders = await procurement_client.get_purchase_orders_by_supplier(
tenant_id=str(supplier.tenant_id),
supplier_id=str(supplier.id),
date_from=date_from,
date_to=date_to,
status=None # Get all statuses
)
if not purchase_orders or len(purchase_orders) < MIN_SAMPLE_SIZE:
self.logger.debug("Insufficient purchase order history for cost variance analysis",
supplier_id=str(supplier.id),
po_count=len(purchase_orders) if purchase_orders else 0)
return alerts
# Group items by ingredient/product and calculate price statistics
ingredient_prices = defaultdict(list)
cutoff_date = date_to - timedelta(days=LOOKBACK_DAYS)
for po in purchase_orders:
po_date = datetime.fromisoformat(po.get('created_at').replace('Z', '+00:00')).date() if po.get('created_at') else None
if not po_date:
continue
# Process items in the PO
for item in po.get('items', []):
ingredient_id = item.get('ingredient_id')
ingredient_name = item.get('ingredient_name') or item.get('product_name', 'Unknown')
unit_price = Decimal(str(item.get('unit_price', 0)))
if not ingredient_id or unit_price <= 0:
continue
# Categorize as historical (for baseline) or recent (for comparison)
is_recent = po_date >= cutoff_date
ingredient_prices[ingredient_id].append({
'price': unit_price,
'date': po_date,
'name': ingredient_name,
'is_recent': is_recent
})
# Analyze each ingredient for cost variance
for ingredient_id, price_history in ingredient_prices.items():
if len(price_history) < MIN_SAMPLE_SIZE:
continue
# Split into historical baseline and recent prices
historical_prices = [p['price'] for p in price_history if not p['is_recent']]
recent_prices = [p['price'] for p in price_history if p['is_recent']]
if not historical_prices or not recent_prices:
continue
# Calculate averages
avg_historical = sum(historical_prices) / len(historical_prices)
avg_recent = sum(recent_prices) / len(recent_prices)
if avg_historical == 0:
continue
# Calculate variance
variance = (avg_recent - avg_historical) / avg_historical
ingredient_name = price_history[0]['name']
# Generate alerts based on variance
if variance >= CRITICAL_THRESHOLD:
# Critical price increase alert
alert = SupplierAlert(
tenant_id=supplier.tenant_id,
supplier_id=supplier.id,
alert_type=AlertType.cost_variance,
severity=AlertSeverity.critical,
status=AlertStatus.active,
title=f"Critical Price Increase: {ingredient_name}",
description=(
f"Significant price increase detected for {ingredient_name}. "
f"Average price increased from ${avg_historical:.2f} to ${avg_recent:.2f} "
f"({variance * 100:.1f}% increase) over the last {LOOKBACK_DAYS} days."
),
affected_products=ingredient_name,
detection_date=datetime.now(timezone.utc),
metadata={
"ingredient_id": str(ingredient_id),
"ingredient_name": ingredient_name,
"avg_historical_price": float(avg_historical),
"avg_recent_price": float(avg_recent),
"variance_percent": float(variance * 100),
"historical_sample_size": len(historical_prices),
"recent_sample_size": len(recent_prices),
"lookback_days": LOOKBACK_DAYS
},
recommended_actions=[
{"action": "Contact supplier to negotiate pricing"},
{"action": "Request explanation for price increase"},
{"action": "Evaluate alternative suppliers for this ingredient"},
{"action": "Review contract terms and pricing agreements"}
]
)
db.add(alert)
alerts.append(alert)
elif variance >= WARNING_THRESHOLD:
# Warning price increase alert
alert = SupplierAlert(
tenant_id=supplier.tenant_id,
supplier_id=supplier.id,
alert_type=AlertType.cost_variance,
severity=AlertSeverity.warning,
status=AlertStatus.active,
title=f"Price Increase Detected: {ingredient_name}",
description=(
f"Moderate price increase detected for {ingredient_name}. "
f"Average price increased from ${avg_historical:.2f} to ${avg_recent:.2f} "
f"({variance * 100:.1f}% increase) over the last {LOOKBACK_DAYS} days."
),
affected_products=ingredient_name,
detection_date=datetime.now(timezone.utc),
metadata={
"ingredient_id": str(ingredient_id),
"ingredient_name": ingredient_name,
"avg_historical_price": float(avg_historical),
"avg_recent_price": float(avg_recent),
"variance_percent": float(variance * 100),
"historical_sample_size": len(historical_prices),
"recent_sample_size": len(recent_prices),
"lookback_days": LOOKBACK_DAYS
},
recommended_actions=[
{"action": "Monitor pricing trend over next few orders"},
{"action": "Contact supplier to discuss pricing"},
{"action": "Review market prices for this ingredient"}
]
)
db.add(alert)
alerts.append(alert)
elif variance <= -SAVINGS_THRESHOLD:
# Cost savings opportunity alert
alert = SupplierAlert(
tenant_id=supplier.tenant_id,
supplier_id=supplier.id,
alert_type=AlertType.cost_variance,
severity=AlertSeverity.info,
status=AlertStatus.active,
title=f"Cost Savings Opportunity: {ingredient_name}",
description=(
f"Favorable price decrease detected for {ingredient_name}. "
f"Average price decreased from ${avg_historical:.2f} to ${avg_recent:.2f} "
f"({abs(variance) * 100:.1f}% decrease) over the last {LOOKBACK_DAYS} days. "
f"Consider increasing order volumes to capitalize on lower pricing."
),
affected_products=ingredient_name,
detection_date=datetime.now(timezone.utc),
metadata={
"ingredient_id": str(ingredient_id),
"ingredient_name": ingredient_name,
"avg_historical_price": float(avg_historical),
"avg_recent_price": float(avg_recent),
"variance_percent": float(variance * 100),
"historical_sample_size": len(historical_prices),
"recent_sample_size": len(recent_prices),
"lookback_days": LOOKBACK_DAYS
},
recommended_actions=[
{"action": "Consider increasing order quantities"},
{"action": "Negotiate long-term pricing lock at current rates"},
{"action": "Update forecast to account for favorable pricing"}
]
)
db.add(alert)
alerts.append(alert)
if alerts:
self.logger.info("Cost variance alerts generated",
supplier_id=str(supplier.id),
alert_count=len(alerts))
except Exception as e:
self.logger.error("Error evaluating cost variance alerts",
supplier_id=str(supplier.id),
error=str(e),
exc_info=True)
return alerts