Add more services

This commit is contained in:
Urtzi Alfaro
2025-08-21 20:28:14 +02:00
parent d6fd53e461
commit c6dd6fd1de
85 changed files with 17842 additions and 1828 deletions

View File

@@ -0,0 +1,14 @@
# ================================================================
# services/production/app/services/__init__.py
# ================================================================
"""
Business logic services
"""
from .production_service import ProductionService
from .production_alert_service import ProductionAlertService
__all__ = [
"ProductionService",
"ProductionAlertService"
]

View File

@@ -0,0 +1,435 @@
"""
Production Alert Service
Business logic for production alerts and notifications
"""
from typing import Optional, List, Dict, Any
from datetime import datetime, date, timedelta
from uuid import UUID
import structlog
from shared.database.transactions import transactional
from shared.notifications.alert_integration import AlertIntegration
from shared.config.base import BaseServiceSettings
from app.repositories.production_alert_repository import ProductionAlertRepository
from app.repositories.production_batch_repository import ProductionBatchRepository
from app.repositories.production_capacity_repository import ProductionCapacityRepository
from app.models.production import ProductionAlert, AlertSeverity, ProductionStatus
from app.schemas.production import ProductionAlertCreate
logger = structlog.get_logger()
class ProductionAlertService:
"""Production alert service with comprehensive monitoring"""
def __init__(self, database_manager, config: BaseServiceSettings):
self.database_manager = database_manager
self.config = config
self.alert_integration = AlertIntegration()
@transactional
async def check_production_capacity_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Monitor production capacity and generate alerts"""
alerts = []
try:
async with self.database_manager.get_session() as session:
batch_repo = ProductionBatchRepository(session)
capacity_repo = ProductionCapacityRepository(session)
alert_repo = ProductionAlertRepository(session)
today = date.today()
# Check capacity exceeded alert
todays_batches = await batch_repo.get_batches_by_date_range(
str(tenant_id), today, today
)
# Calculate total planned hours for today
total_planned_hours = sum(
batch.planned_duration_minutes / 60
for batch in todays_batches
if batch.status != ProductionStatus.CANCELLED
)
# Get available capacity
available_capacity = await capacity_repo.get_capacity_utilization_summary(
str(tenant_id), today, today
)
total_capacity = available_capacity.get("total_capacity_units", 8.0)
if total_planned_hours > total_capacity:
excess_hours = total_planned_hours - total_capacity
alert_data = ProductionAlertCreate(
alert_type="production_capacity_exceeded",
severity=AlertSeverity.HIGH,
title="Capacidad de Producción Excedida",
message=f"🔥 Capacidad excedida: {excess_hours:.1f}h extra necesarias para completar la producción de hoy",
recommended_actions=[
"reschedule_batches",
"outsource_production",
"adjust_menu",
"extend_working_hours"
],
impact_level="high",
estimated_time_impact_minutes=int(excess_hours * 60),
alert_data={
"excess_hours": excess_hours,
"total_planned_hours": total_planned_hours,
"available_capacity_hours": total_capacity,
"affected_batches": len(todays_batches)
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check production delay alert
current_time = datetime.utcnow()
cutoff_time = current_time + timedelta(hours=4) # 4 hours ahead
urgent_batches = await batch_repo.get_urgent_batches(str(tenant_id), 4)
delayed_batches = [
batch for batch in urgent_batches
if batch.planned_start_time <= current_time and batch.status == ProductionStatus.PENDING
]
for batch in delayed_batches:
delay_minutes = int((current_time - batch.planned_start_time).total_seconds() / 60)
if delay_minutes > self.config.PRODUCTION_DELAY_THRESHOLD_MINUTES:
alert_data = ProductionAlertCreate(
alert_type="production_delay",
severity=AlertSeverity.HIGH,
title="Retraso en Producción",
message=f"⏰ Retraso: {batch.product_name} debía haber comenzado hace {delay_minutes} minutos",
batch_id=batch.id,
recommended_actions=[
"start_production_immediately",
"notify_staff",
"prepare_alternatives",
"update_customers"
],
impact_level="high",
estimated_time_impact_minutes=delay_minutes,
alert_data={
"batch_number": batch.batch_number,
"product_name": batch.product_name,
"planned_start_time": batch.planned_start_time.isoformat(),
"delay_minutes": delay_minutes,
"affects_opening": delay_minutes > 120 # 2 hours
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check cost spike alert
high_cost_batches = [
batch for batch in todays_batches
if batch.estimated_cost and batch.estimated_cost > 100 # Threshold
]
if high_cost_batches:
total_high_cost = sum(batch.estimated_cost for batch in high_cost_batches)
alert_data = ProductionAlertCreate(
alert_type="production_cost_spike",
severity=AlertSeverity.MEDIUM,
title="Costos de Producción Elevados",
message=f"💰 Costos altos detectados: {len(high_cost_batches)} lotes con costo total de {total_high_cost:.2f}",
recommended_actions=[
"review_ingredient_costs",
"optimize_recipe",
"negotiate_supplier_prices",
"adjust_menu_pricing"
],
impact_level="medium",
estimated_cost_impact=total_high_cost,
alert_data={
"high_cost_batches": len(high_cost_batches),
"total_cost": total_high_cost,
"average_cost": total_high_cost / len(high_cost_batches),
"affected_products": [batch.product_name for batch in high_cost_batches]
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Send alerts using notification service
await self._send_alerts(tenant_id, alerts)
return alerts
except Exception as e:
logger.error("Error checking production capacity alerts",
error=str(e), tenant_id=str(tenant_id))
return []
@transactional
async def check_quality_control_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Monitor quality control issues and generate alerts"""
alerts = []
try:
async with self.database_manager.get_session() as session:
alert_repo = ProductionAlertRepository(session)
batch_repo = ProductionBatchRepository(session)
# Check for batches with low yield
last_week = date.today() - timedelta(days=7)
recent_batches = await batch_repo.get_batches_by_date_range(
str(tenant_id), last_week, date.today(), ProductionStatus.COMPLETED
)
low_yield_batches = [
batch for batch in recent_batches
if batch.yield_percentage and batch.yield_percentage < self.config.LOW_YIELD_ALERT_THRESHOLD * 100
]
if low_yield_batches:
avg_yield = sum(batch.yield_percentage for batch in low_yield_batches) / len(low_yield_batches)
alert_data = ProductionAlertCreate(
alert_type="low_yield_detected",
severity=AlertSeverity.MEDIUM,
title="Rendimiento Bajo Detectado",
message=f"📉 Rendimiento bajo: {len(low_yield_batches)} lotes con rendimiento promedio {avg_yield:.1f}%",
recommended_actions=[
"review_recipes",
"check_ingredient_quality",
"training_staff",
"equipment_calibration"
],
impact_level="medium",
alert_data={
"low_yield_batches": len(low_yield_batches),
"average_yield": avg_yield,
"threshold": self.config.LOW_YIELD_ALERT_THRESHOLD * 100,
"affected_products": list(set(batch.product_name for batch in low_yield_batches))
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check for recurring quality issues
quality_issues = [
batch for batch in recent_batches
if batch.quality_score and batch.quality_score < self.config.QUALITY_SCORE_THRESHOLD
]
if len(quality_issues) >= 3: # 3 or more quality issues in a week
avg_quality = sum(batch.quality_score for batch in quality_issues) / len(quality_issues)
alert_data = ProductionAlertCreate(
alert_type="recurring_quality_issues",
severity=AlertSeverity.HIGH,
title="Problemas de Calidad Recurrentes",
message=f"⚠️ Problemas de calidad: {len(quality_issues)} lotes con calidad promedio {avg_quality:.1f}/10",
recommended_actions=[
"quality_audit",
"staff_retraining",
"equipment_maintenance",
"supplier_review"
],
impact_level="high",
alert_data={
"quality_issues_count": len(quality_issues),
"average_quality_score": avg_quality,
"threshold": self.config.QUALITY_SCORE_THRESHOLD,
"trend": "declining"
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Send alerts
await self._send_alerts(tenant_id, alerts)
return alerts
except Exception as e:
logger.error("Error checking quality control alerts",
error=str(e), tenant_id=str(tenant_id))
return []
@transactional
async def check_equipment_maintenance_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Monitor equipment status and generate maintenance alerts"""
alerts = []
try:
async with self.database_manager.get_session() as session:
capacity_repo = ProductionCapacityRepository(session)
alert_repo = ProductionAlertRepository(session)
# Get equipment that needs maintenance
today = date.today()
equipment_capacity = await capacity_repo.get_multi(
filters={
"tenant_id": str(tenant_id),
"resource_type": "equipment",
"date": today
}
)
for equipment in equipment_capacity:
# Check if maintenance is overdue
if equipment.last_maintenance_date:
days_since_maintenance = (today - equipment.last_maintenance_date.date()).days
if days_since_maintenance > 30: # 30 days threshold
alert_data = ProductionAlertCreate(
alert_type="equipment_maintenance_overdue",
severity=AlertSeverity.MEDIUM,
title="Mantenimiento de Equipo Vencido",
message=f"🔧 Mantenimiento vencido: {equipment.resource_name} - {days_since_maintenance} días sin mantenimiento",
recommended_actions=[
"schedule_maintenance",
"equipment_inspection",
"backup_equipment_ready"
],
impact_level="medium",
alert_data={
"equipment_id": equipment.resource_id,
"equipment_name": equipment.resource_name,
"days_since_maintenance": days_since_maintenance,
"last_maintenance": equipment.last_maintenance_date.isoformat() if equipment.last_maintenance_date else None
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Check equipment efficiency
if equipment.efficiency_rating and equipment.efficiency_rating < 0.8: # 80% threshold
alert_data = ProductionAlertCreate(
alert_type="equipment_efficiency_low",
severity=AlertSeverity.MEDIUM,
title="Eficiencia de Equipo Baja",
message=f"📊 Eficiencia baja: {equipment.resource_name} operando al {equipment.efficiency_rating*100:.1f}%",
recommended_actions=[
"equipment_calibration",
"maintenance_check",
"replace_parts"
],
impact_level="medium",
alert_data={
"equipment_id": equipment.resource_id,
"equipment_name": equipment.resource_name,
"efficiency_rating": equipment.efficiency_rating,
"threshold": 0.8
}
)
alert = await alert_repo.create_alert({
**alert_data.model_dump(),
"tenant_id": tenant_id
})
alerts.append(alert)
# Send alerts
await self._send_alerts(tenant_id, alerts)
return alerts
except Exception as e:
logger.error("Error checking equipment maintenance alerts",
error=str(e), tenant_id=str(tenant_id))
return []
async def _send_alerts(self, tenant_id: UUID, alerts: List[ProductionAlert]):
"""Send alerts using notification service with proper urgency handling"""
try:
for alert in alerts:
# Determine delivery channels based on severity
channels = self._get_channels_by_severity(alert.severity)
# Send notification using alert integration
await self.alert_integration.send_alert(
tenant_id=str(tenant_id),
message=alert.message,
alert_type=alert.alert_type,
severity=alert.severity.value,
channels=channels,
data={
"actions": alert.recommended_actions or [],
"alert_id": str(alert.id)
}
)
logger.info("Sent production alert notification",
alert_id=str(alert.id),
alert_type=alert.alert_type,
severity=alert.severity.value,
channels=channels)
except Exception as e:
logger.error("Error sending alert notifications",
error=str(e), tenant_id=str(tenant_id))
def _get_channels_by_severity(self, severity: AlertSeverity) -> List[str]:
"""Map severity to delivery channels following user-centric analysis"""
if severity == AlertSeverity.CRITICAL:
return ["whatsapp", "email", "dashboard", "sms"]
elif severity == AlertSeverity.HIGH:
return ["whatsapp", "email", "dashboard"]
elif severity == AlertSeverity.MEDIUM:
return ["email", "dashboard"]
else:
return ["dashboard"]
@transactional
async def get_active_alerts(self, tenant_id: UUID) -> List[ProductionAlert]:
"""Get all active production alerts for a tenant"""
try:
async with self.database_manager.get_session() as session:
alert_repo = ProductionAlertRepository(session)
return await alert_repo.get_active_alerts(str(tenant_id))
except Exception as e:
logger.error("Error getting active alerts",
error=str(e), tenant_id=str(tenant_id))
return []
@transactional
async def acknowledge_alert(
self,
tenant_id: UUID,
alert_id: UUID,
acknowledged_by: str
) -> ProductionAlert:
"""Acknowledge a production alert"""
try:
async with self.database_manager.get_session() as session:
alert_repo = ProductionAlertRepository(session)
return await alert_repo.acknowledge_alert(alert_id, acknowledged_by)
except Exception as e:
logger.error("Error acknowledging alert",
error=str(e), alert_id=str(alert_id), tenant_id=str(tenant_id))
raise

View File

@@ -0,0 +1,403 @@
"""
Production Service
Main business logic for production operations
"""
from typing import Optional, List, Dict, Any
from datetime import datetime, date, timedelta
from uuid import UUID
import structlog
from shared.database.transactions import transactional
from shared.clients import get_inventory_client, get_sales_client
from shared.clients.orders_client import OrdersServiceClient
from shared.clients.recipes_client import RecipesServiceClient
from shared.config.base import BaseServiceSettings
from app.repositories.production_batch_repository import ProductionBatchRepository
from app.repositories.production_schedule_repository import ProductionScheduleRepository
from app.repositories.production_capacity_repository import ProductionCapacityRepository
from app.repositories.quality_check_repository import QualityCheckRepository
from app.models.production import ProductionBatch, ProductionStatus, ProductionPriority
from app.schemas.production import (
ProductionBatchCreate, ProductionBatchUpdate, ProductionBatchStatusUpdate,
DailyProductionRequirements, ProductionDashboardSummary, ProductionMetrics
)
logger = structlog.get_logger()
class ProductionService:
"""Main production service with business logic"""
def __init__(self, database_manager, config: BaseServiceSettings):
self.database_manager = database_manager
self.config = config
# Initialize shared clients
self.inventory_client = get_inventory_client(config, "production")
self.orders_client = OrdersServiceClient(config)
self.recipes_client = RecipesServiceClient(config)
self.sales_client = get_sales_client(config, "production")
@transactional
async def calculate_daily_requirements(
self,
tenant_id: UUID,
target_date: date
) -> DailyProductionRequirements:
"""Calculate production requirements using shared client pattern"""
try:
# 1. Get demand requirements from Orders Service
demand_data = await self.orders_client.get_demand_requirements(
str(tenant_id),
target_date.isoformat()
)
# 2. Get current stock levels from Inventory Service
stock_levels = await self.inventory_client.get_stock_levels(str(tenant_id))
# 3. Get recipe requirements from Recipes Service
recipe_data = await self.recipes_client.get_recipe_requirements(str(tenant_id))
# 4. Get capacity information
async with self.database_manager.get_session() as session:
capacity_repo = ProductionCapacityRepository(session)
available_capacity = await self._calculate_available_capacity(
capacity_repo, tenant_id, target_date
)
# 5. Apply production planning business logic
production_plan = await self._calculate_production_plan(
tenant_id, target_date, demand_data, stock_levels, recipe_data, available_capacity
)
return production_plan
except Exception as e:
logger.error("Error calculating daily production requirements",
error=str(e), tenant_id=str(tenant_id), date=target_date.isoformat())
raise
@transactional
async def create_production_batch(
self,
tenant_id: UUID,
batch_data: ProductionBatchCreate
) -> ProductionBatch:
"""Create a new production batch"""
try:
async with self.database_manager.get_session() as session:
batch_repo = ProductionBatchRepository(session)
# Prepare batch data
batch_dict = batch_data.model_dump()
batch_dict["tenant_id"] = tenant_id
# Validate recipe exists if provided
if batch_data.recipe_id:
recipe_details = await self.recipes_client.get_recipe_by_id(
str(tenant_id), str(batch_data.recipe_id)
)
if not recipe_details:
raise ValueError(f"Recipe {batch_data.recipe_id} not found")
# Check ingredient availability
if batch_data.recipe_id:
ingredient_requirements = await self.recipes_client.calculate_ingredients_for_quantity(
str(tenant_id), str(batch_data.recipe_id), batch_data.planned_quantity
)
if ingredient_requirements:
availability_check = await self.inventory_client.check_availability(
str(tenant_id), ingredient_requirements.get("requirements", [])
)
if not availability_check or not availability_check.get("all_available", True):
logger.warning("Insufficient ingredients for batch",
batch_data=batch_dict, availability=availability_check)
# Create the batch
batch = await batch_repo.create_batch(batch_dict)
logger.info("Production batch created",
batch_id=str(batch.id), tenant_id=str(tenant_id))
return batch
except Exception as e:
logger.error("Error creating production batch",
error=str(e), tenant_id=str(tenant_id))
raise
@transactional
async def update_batch_status(
self,
tenant_id: UUID,
batch_id: UUID,
status_update: ProductionBatchStatusUpdate
) -> ProductionBatch:
"""Update production batch status"""
try:
async with self.database_manager.get_session() as session:
batch_repo = ProductionBatchRepository(session)
# Update batch status
batch = await batch_repo.update_batch_status(
batch_id,
status_update.status,
status_update.actual_quantity,
status_update.notes
)
# Update inventory if batch is completed
if status_update.status == ProductionStatus.COMPLETED and status_update.actual_quantity:
await self._update_inventory_on_completion(
tenant_id, batch, status_update.actual_quantity
)
logger.info("Updated batch status",
batch_id=str(batch_id),
new_status=status_update.status.value,
tenant_id=str(tenant_id))
return batch
except Exception as e:
logger.error("Error updating batch status",
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
raise
@transactional
async def get_dashboard_summary(self, tenant_id: UUID) -> ProductionDashboardSummary:
"""Get production dashboard summary data"""
try:
async with self.database_manager.get_session() as session:
batch_repo = ProductionBatchRepository(session)
# Get active batches
active_batches = await batch_repo.get_active_batches(str(tenant_id))
# Get today's production plan
today = date.today()
todays_batches = await batch_repo.get_batches_by_date_range(
str(tenant_id), today, today
)
# Calculate metrics
todays_plan = [
{
"product_name": batch.product_name,
"planned_quantity": batch.planned_quantity,
"status": batch.status.value,
"completion_time": batch.planned_end_time.isoformat() if batch.planned_end_time else None
}
for batch in todays_batches
]
# Get metrics for last 7 days
week_ago = today - timedelta(days=7)
weekly_metrics = await batch_repo.get_production_metrics(
str(tenant_id), week_ago, today
)
return ProductionDashboardSummary(
active_batches=len(active_batches),
todays_production_plan=todays_plan,
capacity_utilization=85.0, # TODO: Calculate from actual capacity data
current_alerts=0, # TODO: Get from alerts
on_time_completion_rate=weekly_metrics.get("on_time_completion_rate", 0),
average_quality_score=8.5, # TODO: Get from quality checks
total_output_today=sum(b.actual_quantity or 0 for b in todays_batches),
efficiency_percentage=weekly_metrics.get("average_yield_percentage", 0)
)
except Exception as e:
logger.error("Error getting dashboard summary",
error=str(e), tenant_id=str(tenant_id))
raise
@transactional
async def get_production_requirements(
self,
tenant_id: UUID,
target_date: Optional[date] = None
) -> Dict[str, Any]:
"""Get production requirements for procurement planning"""
try:
if not target_date:
target_date = date.today()
# Get planned batches for the date
async with self.database_manager.get_session() as session:
batch_repo = ProductionBatchRepository(session)
planned_batches = await batch_repo.get_batches_by_date_range(
str(tenant_id), target_date, target_date, ProductionStatus.PENDING
)
# Calculate ingredient requirements
total_requirements = {}
for batch in planned_batches:
if batch.recipe_id:
requirements = await self.recipes_client.calculate_ingredients_for_quantity(
str(tenant_id), str(batch.recipe_id), batch.planned_quantity
)
if requirements and "requirements" in requirements:
for req in requirements["requirements"]:
ingredient_id = req.get("ingredient_id")
quantity = req.get("quantity", 0)
if ingredient_id in total_requirements:
total_requirements[ingredient_id]["quantity"] += quantity
else:
total_requirements[ingredient_id] = {
"ingredient_id": ingredient_id,
"ingredient_name": req.get("ingredient_name"),
"quantity": quantity,
"unit": req.get("unit"),
"priority": "medium"
}
return {
"date": target_date.isoformat(),
"total_batches": len(planned_batches),
"ingredient_requirements": list(total_requirements.values()),
"estimated_start_time": "06:00:00",
"estimated_duration_hours": sum(b.planned_duration_minutes for b in planned_batches) / 60
}
except Exception as e:
logger.error("Error getting production requirements",
error=str(e), tenant_id=str(tenant_id))
raise
async def _calculate_production_plan(
self,
tenant_id: UUID,
target_date: date,
demand_data: Optional[Dict[str, Any]],
stock_levels: Optional[Dict[str, Any]],
recipe_data: Optional[Dict[str, Any]],
available_capacity: Dict[str, Any]
) -> DailyProductionRequirements:
"""Apply production planning business logic"""
# Default production plan structure
production_plan = []
total_capacity_needed = 0.0
urgent_items = 0
if demand_data and "demand_items" in demand_data:
for item in demand_data["demand_items"]:
product_id = item.get("product_id")
demand_quantity = item.get("quantity", 0)
current_stock = 0
# Find current stock for this product
if stock_levels and "stock_levels" in stock_levels:
for stock in stock_levels["stock_levels"]:
if stock.get("product_id") == product_id:
current_stock = stock.get("available_quantity", 0)
break
# Calculate production need
production_needed = max(0, demand_quantity - current_stock)
if production_needed > 0:
# Determine urgency
urgency = "high" if demand_quantity > current_stock * 2 else "medium"
if urgency == "high":
urgent_items += 1
# Estimate capacity needed (simplified)
estimated_time_hours = production_needed * 0.5 # 30 minutes per unit
total_capacity_needed += estimated_time_hours
production_plan.append({
"product_id": product_id,
"product_name": item.get("product_name", f"Product {product_id}"),
"current_inventory": current_stock,
"demand_forecast": demand_quantity,
"pre_orders": item.get("pre_orders", 0),
"recommended_production": production_needed,
"urgency": urgency
})
return DailyProductionRequirements(
date=target_date,
production_plan=production_plan,
total_capacity_needed=total_capacity_needed,
available_capacity=available_capacity.get("total_hours", 8.0),
capacity_gap=max(0, total_capacity_needed - available_capacity.get("total_hours", 8.0)),
urgent_items=urgent_items,
recommended_schedule=None
)
async def _calculate_available_capacity(
self,
capacity_repo: ProductionCapacityRepository,
tenant_id: UUID,
target_date: date
) -> Dict[str, Any]:
"""Calculate available production capacity for a date"""
try:
# Get capacity entries for the date
equipment_capacity = await capacity_repo.get_available_capacity(
str(tenant_id), "equipment", target_date, 0
)
staff_capacity = await capacity_repo.get_available_capacity(
str(tenant_id), "staff", target_date, 0
)
# Calculate total available hours (simplified)
total_equipment_hours = sum(c.remaining_capacity_units for c in equipment_capacity)
total_staff_hours = sum(c.remaining_capacity_units for c in staff_capacity)
# Capacity is limited by the minimum of equipment or staff
effective_hours = min(total_equipment_hours, total_staff_hours) if total_staff_hours > 0 else total_equipment_hours
return {
"total_hours": effective_hours,
"equipment_hours": total_equipment_hours,
"staff_hours": total_staff_hours,
"utilization_percentage": 0 # To be calculated
}
except Exception as e:
logger.error("Error calculating available capacity", error=str(e))
# Return default capacity if calculation fails
return {
"total_hours": 8.0,
"equipment_hours": 8.0,
"staff_hours": 8.0,
"utilization_percentage": 0
}
async def _update_inventory_on_completion(
self,
tenant_id: UUID,
batch: ProductionBatch,
actual_quantity: float
):
"""Update inventory when a batch is completed"""
try:
# Add the produced quantity to inventory
update_result = await self.inventory_client.update_stock_level(
str(tenant_id),
str(batch.product_id),
actual_quantity,
f"Production batch {batch.batch_number} completed"
)
logger.info("Updated inventory after production completion",
batch_id=str(batch.id),
product_id=str(batch.product_id),
quantity_added=actual_quantity,
update_result=update_result)
except Exception as e:
logger.error("Error updating inventory on batch completion",
error=str(e), batch_id=str(batch.id))
# Don't raise - inventory update failure shouldn't prevent batch completion