Improve the frontend 3
This commit is contained in:
@@ -231,31 +231,40 @@ async def clone_demo_data(
|
||||
base_tenant=str(base_uuid)
|
||||
)
|
||||
|
||||
# Calculate date offset to make production recent
|
||||
if base_batches:
|
||||
max_date = max(batch.planned_start_time for batch in base_batches if batch.planned_start_time)
|
||||
today = datetime.now(timezone.utc)
|
||||
date_offset = today - max_date
|
||||
else:
|
||||
date_offset = timedelta(days=0)
|
||||
|
||||
for batch in base_batches:
|
||||
new_batch_id = uuid.uuid4()
|
||||
batch_id_map[batch.id] = new_batch_id
|
||||
|
||||
# Adjust dates relative to session creation time
|
||||
adjusted_planned_start = adjust_date_for_demo(
|
||||
batch.planned_start_time, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.planned_start_time else None
|
||||
adjusted_planned_end = adjust_date_for_demo(
|
||||
batch.planned_end_time, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.planned_end_time else None
|
||||
adjusted_actual_start = adjust_date_for_demo(
|
||||
batch.actual_start_time, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.actual_start_time else None
|
||||
adjusted_actual_end = adjust_date_for_demo(
|
||||
batch.actual_end_time, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.actual_end_time else None
|
||||
adjusted_completed = adjust_date_for_demo(
|
||||
batch.completed_at, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.completed_at else None
|
||||
|
||||
new_batch = ProductionBatch(
|
||||
id=new_batch_id,
|
||||
tenant_id=virtual_uuid,
|
||||
batch_number=f"BATCH-{uuid.uuid4().hex[:8].upper()}", # New batch number
|
||||
product_id=batch.product_id, # Keep product reference
|
||||
product_id=batch.product_id, # Keep product reference
|
||||
product_name=batch.product_name,
|
||||
recipe_id=batch.recipe_id, # Keep recipe reference
|
||||
planned_start_time=batch.planned_start_time + date_offset if batch.planned_start_time else None,
|
||||
planned_end_time=batch.planned_end_time + date_offset if batch.planned_end_time else None,
|
||||
planned_start_time=adjusted_planned_start,
|
||||
planned_end_time=adjusted_planned_end,
|
||||
planned_quantity=batch.planned_quantity,
|
||||
planned_duration_minutes=batch.planned_duration_minutes,
|
||||
actual_start_time=batch.actual_start_time + date_offset if batch.actual_start_time else None,
|
||||
actual_end_time=batch.actual_end_time + date_offset if batch.actual_end_time else None,
|
||||
actual_start_time=adjusted_actual_start,
|
||||
actual_end_time=adjusted_actual_end,
|
||||
actual_quantity=batch.actual_quantity,
|
||||
actual_duration_minutes=batch.actual_duration_minutes,
|
||||
status=batch.status,
|
||||
@@ -284,9 +293,9 @@ async def clone_demo_data(
|
||||
quality_notes=batch.quality_notes,
|
||||
delay_reason=batch.delay_reason,
|
||||
cancellation_reason=batch.cancellation_reason,
|
||||
created_at=datetime.now(timezone.utc),
|
||||
updated_at=datetime.now(timezone.utc),
|
||||
completed_at=batch.completed_at + date_offset if batch.completed_at else None
|
||||
created_at=session_time,
|
||||
updated_at=session_time,
|
||||
completed_at=adjusted_completed
|
||||
)
|
||||
db.add(new_batch)
|
||||
stats["production_batches"] += 1
|
||||
@@ -310,6 +319,11 @@ async def clone_demo_data(
|
||||
new_batch_id = batch_id_map.get(check.batch_id, check.batch_id)
|
||||
new_template_id = template_id_map.get(check.template_id, check.template_id) if check.template_id else None
|
||||
|
||||
# Adjust check time relative to session creation time
|
||||
adjusted_check_time = adjust_date_for_demo(
|
||||
check.check_time, session_time, BASE_REFERENCE_DATE
|
||||
) if check.check_time else None
|
||||
|
||||
new_check = QualityCheck(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=virtual_uuid,
|
||||
@@ -317,7 +331,7 @@ async def clone_demo_data(
|
||||
template_id=new_template_id,
|
||||
check_type=check.check_type,
|
||||
process_stage=check.process_stage,
|
||||
check_time=check.check_time + date_offset,
|
||||
check_time=adjusted_check_time,
|
||||
checker_id=check.checker_id,
|
||||
quality_score=check.quality_score,
|
||||
pass_fail=check.pass_fail,
|
||||
@@ -340,8 +354,8 @@ async def clone_demo_data(
|
||||
check_notes=check.check_notes,
|
||||
photos_urls=check.photos_urls,
|
||||
certificate_url=check.certificate_url,
|
||||
created_at=datetime.now(timezone.utc),
|
||||
updated_at=datetime.now(timezone.utc)
|
||||
created_at=session_time,
|
||||
updated_at=session_time
|
||||
)
|
||||
db.add(new_check)
|
||||
stats["quality_checks"] += 1
|
||||
@@ -359,12 +373,26 @@ async def clone_demo_data(
|
||||
)
|
||||
|
||||
for schedule in base_schedules:
|
||||
# Adjust schedule dates relative to session creation time
|
||||
adjusted_schedule_date = adjust_date_for_demo(
|
||||
schedule.schedule_date, session_time, BASE_REFERENCE_DATE
|
||||
) if schedule.schedule_date else None
|
||||
adjusted_shift_start = adjust_date_for_demo(
|
||||
schedule.shift_start, session_time, BASE_REFERENCE_DATE
|
||||
) if schedule.shift_start else None
|
||||
adjusted_shift_end = adjust_date_for_demo(
|
||||
schedule.shift_end, session_time, BASE_REFERENCE_DATE
|
||||
) if schedule.shift_end else None
|
||||
adjusted_finalized = adjust_date_for_demo(
|
||||
schedule.finalized_at, session_time, BASE_REFERENCE_DATE
|
||||
) if schedule.finalized_at else None
|
||||
|
||||
new_schedule = ProductionSchedule(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=virtual_uuid,
|
||||
schedule_date=schedule.schedule_date + date_offset,
|
||||
shift_start=schedule.shift_start + date_offset,
|
||||
shift_end=schedule.shift_end + date_offset,
|
||||
schedule_date=adjusted_schedule_date,
|
||||
shift_start=adjusted_shift_start,
|
||||
shift_end=adjusted_shift_end,
|
||||
total_capacity_hours=schedule.total_capacity_hours,
|
||||
planned_capacity_hours=schedule.planned_capacity_hours,
|
||||
actual_capacity_hours=schedule.actual_capacity_hours,
|
||||
@@ -383,9 +411,9 @@ async def clone_demo_data(
|
||||
on_time_completion_rate=schedule.on_time_completion_rate,
|
||||
schedule_notes=schedule.schedule_notes,
|
||||
schedule_adjustments=schedule.schedule_adjustments,
|
||||
created_at=datetime.now(timezone.utc),
|
||||
updated_at=datetime.now(timezone.utc),
|
||||
finalized_at=schedule.finalized_at + date_offset if schedule.finalized_at else None
|
||||
created_at=session_time,
|
||||
updated_at=session_time,
|
||||
finalized_at=adjusted_finalized
|
||||
)
|
||||
db.add(new_schedule)
|
||||
stats["production_schedules"] += 1
|
||||
@@ -397,15 +425,29 @@ async def clone_demo_data(
|
||||
base_capacity = result.scalars().all()
|
||||
|
||||
for capacity in base_capacity:
|
||||
# Adjust capacity dates relative to session creation time
|
||||
adjusted_date = adjust_date_for_demo(
|
||||
capacity.date, session_time, BASE_REFERENCE_DATE
|
||||
) if capacity.date else None
|
||||
adjusted_start_time = adjust_date_for_demo(
|
||||
capacity.start_time, session_time, BASE_REFERENCE_DATE
|
||||
) if capacity.start_time else None
|
||||
adjusted_end_time = adjust_date_for_demo(
|
||||
capacity.end_time, session_time, BASE_REFERENCE_DATE
|
||||
) if capacity.end_time else None
|
||||
adjusted_last_maintenance = adjust_date_for_demo(
|
||||
capacity.last_maintenance_date, session_time, BASE_REFERENCE_DATE
|
||||
) if capacity.last_maintenance_date else None
|
||||
|
||||
new_capacity = ProductionCapacity(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=virtual_uuid,
|
||||
resource_type=capacity.resource_type,
|
||||
resource_id=capacity.resource_id,
|
||||
resource_name=capacity.resource_name,
|
||||
date=capacity.date + date_offset,
|
||||
start_time=capacity.start_time + date_offset,
|
||||
end_time=capacity.end_time + date_offset,
|
||||
date=adjusted_date,
|
||||
start_time=adjusted_start_time,
|
||||
end_time=adjusted_end_time,
|
||||
total_capacity_units=capacity.total_capacity_units,
|
||||
allocated_capacity_units=capacity.allocated_capacity_units,
|
||||
remaining_capacity_units=capacity.remaining_capacity_units,
|
||||
@@ -419,11 +461,11 @@ async def clone_demo_data(
|
||||
cleanup_time_minutes=capacity.cleanup_time_minutes,
|
||||
efficiency_rating=capacity.efficiency_rating,
|
||||
maintenance_status=capacity.maintenance_status,
|
||||
last_maintenance_date=capacity.last_maintenance_date + date_offset if capacity.last_maintenance_date else None,
|
||||
last_maintenance_date=adjusted_last_maintenance,
|
||||
notes=capacity.notes,
|
||||
restrictions=capacity.restrictions,
|
||||
created_at=datetime.now(timezone.utc),
|
||||
updated_at=datetime.now(timezone.utc)
|
||||
created_at=session_time,
|
||||
updated_at=session_time
|
||||
)
|
||||
db.add(new_capacity)
|
||||
stats["production_capacity"] += 1
|
||||
@@ -437,8 +479,8 @@ async def clone_demo_data(
|
||||
stats["alerts_generated"] = 0
|
||||
|
||||
# Calculate total from non-alert stats
|
||||
total_records = (stats["equipment"] + stats["batches"] + stats["schedules"] +
|
||||
stats["quality_templates"] + stats["quality_checks"] +
|
||||
total_records = (stats["equipment"] + stats["production_batches"] + stats["production_schedules"] +
|
||||
stats["quality_check_templates"] + stats["quality_checks"] +
|
||||
stats["production_capacity"])
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
|
||||
240
services/production/app/api/orchestrator.py
Normal file
240
services/production/app/api/orchestrator.py
Normal file
@@ -0,0 +1,240 @@
|
||||
# ================================================================
|
||||
# services/production/app/api/orchestrator.py
|
||||
# ================================================================
|
||||
"""
|
||||
Production Orchestrator API - Endpoints for orchestrated production scheduling
|
||||
Called by the Orchestrator Service to generate production schedules from forecast data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import date
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
|
||||
from shared.routing import RouteBuilder
|
||||
from app.services.production_service import ProductionService
|
||||
from app.schemas.production import ProductionScheduleResponse
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-orchestrator"])
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# REQUEST/RESPONSE SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class GenerateScheduleRequest(BaseModel):
|
||||
"""
|
||||
Request to generate production schedule (called by Orchestrator)
|
||||
|
||||
The Orchestrator calls Forecasting Service first, then passes forecast data here.
|
||||
Production Service uses this data to determine what to produce.
|
||||
|
||||
NEW: Accepts cached data snapshots from Orchestrator to eliminate duplicate API calls.
|
||||
"""
|
||||
forecast_data: Dict[str, Any] = Field(..., description="Forecast data from Forecasting Service")
|
||||
target_date: Optional[date] = Field(None, description="Target production date")
|
||||
planning_horizon_days: int = Field(default=1, ge=1, le=7, description="Planning horizon in days")
|
||||
|
||||
# NEW: Cached data from Orchestrator
|
||||
inventory_data: Optional[Dict[str, Any]] = Field(None, description="Cached inventory snapshot from Orchestrator")
|
||||
recipes_data: Optional[Dict[str, Any]] = Field(None, description="Cached recipes snapshot from Orchestrator")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"forecast_data": {
|
||||
"forecasts": [
|
||||
{
|
||||
"product_id": "uuid-here",
|
||||
"predicted_demand": 100.0,
|
||||
"confidence_score": 0.85
|
||||
}
|
||||
],
|
||||
"forecast_id": "uuid-here",
|
||||
"generated_at": "2025-01-30T10:00:00Z"
|
||||
},
|
||||
"target_date": "2025-01-31",
|
||||
"planning_horizon_days": 1
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class GenerateScheduleResponse(BaseModel):
|
||||
"""Response from generate_schedule endpoint"""
|
||||
success: bool
|
||||
message: str
|
||||
schedule_id: Optional[UUID] = None
|
||||
schedule_number: Optional[str] = None
|
||||
batches_created: int = 0
|
||||
total_planned_quantity: float = 0.0
|
||||
warnings: List[str] = []
|
||||
errors: List[str] = []
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"success": True,
|
||||
"message": "Production schedule generated successfully",
|
||||
"schedule_id": "uuid-here",
|
||||
"schedule_number": "PROD-2025-01-30-001",
|
||||
"batches_created": 5,
|
||||
"total_planned_quantity": 500.0,
|
||||
"warnings": [],
|
||||
"errors": []
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# ================================================================
|
||||
# ORCHESTRATOR ENTRY POINT
|
||||
# ================================================================
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("", None, "generate-schedule"),
|
||||
response_model=GenerateScheduleResponse
|
||||
)
|
||||
async def generate_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
request_data: GenerateScheduleRequest = ...,
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Generate production schedule from forecast data (called by Orchestrator)
|
||||
|
||||
This is the main entry point for orchestrated production planning.
|
||||
The Orchestrator calls Forecasting Service first, then passes forecast data here.
|
||||
|
||||
Flow:
|
||||
1. Receive forecast data from orchestrator
|
||||
2. Parse forecast to extract product demands
|
||||
3. Check inventory levels for each product
|
||||
4. Calculate production quantities needed
|
||||
5. Create production schedule and batches
|
||||
6. Return schedule summary
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
request_data: Schedule generation request with forecast data
|
||||
|
||||
Returns:
|
||||
GenerateScheduleResponse with schedule details and created batches
|
||||
"""
|
||||
try:
|
||||
logger.info("Generate production schedule endpoint called",
|
||||
tenant_id=str(tenant_id),
|
||||
has_forecast_data=bool(request_data.forecast_data))
|
||||
|
||||
target_date = request_data.target_date or date.today()
|
||||
forecast_data = request_data.forecast_data
|
||||
|
||||
# Parse forecast data from orchestrator
|
||||
forecasts = _parse_forecast_data(forecast_data)
|
||||
|
||||
if not forecasts:
|
||||
return GenerateScheduleResponse(
|
||||
success=False,
|
||||
message="No forecast data provided",
|
||||
errors=["Forecast data is empty or invalid"]
|
||||
)
|
||||
|
||||
# Generate production schedule using the service (with cached data if available)
|
||||
result = await production_service.generate_production_schedule_from_forecast(
|
||||
tenant_id=tenant_id,
|
||||
target_date=target_date,
|
||||
forecasts=forecasts,
|
||||
planning_horizon_days=request_data.planning_horizon_days,
|
||||
inventory_data=request_data.inventory_data, # NEW: Pass cached inventory
|
||||
recipes_data=request_data.recipes_data # NEW: Pass cached recipes
|
||||
)
|
||||
|
||||
logger.info("Production schedule generated successfully",
|
||||
tenant_id=str(tenant_id),
|
||||
schedule_id=str(result.get('schedule_id')) if result.get('schedule_id') else None,
|
||||
batches_created=result.get('batches_created', 0))
|
||||
|
||||
return GenerateScheduleResponse(
|
||||
success=True,
|
||||
message="Production schedule generated successfully",
|
||||
schedule_id=result.get('schedule_id'),
|
||||
schedule_number=result.get('schedule_number'),
|
||||
batches_created=result.get('batches_created', 0),
|
||||
total_planned_quantity=result.get('total_planned_quantity', 0.0),
|
||||
warnings=result.get('warnings', []),
|
||||
errors=[]
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating production schedule",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
return GenerateScheduleResponse(
|
||||
success=False,
|
||||
message="Failed to generate production schedule",
|
||||
errors=[str(e)]
|
||||
)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# HELPER FUNCTIONS
|
||||
# ================================================================
|
||||
|
||||
def _parse_forecast_data(forecast_data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Parse forecast data received from orchestrator
|
||||
|
||||
Expected format from Forecasting Service via Orchestrator:
|
||||
{
|
||||
"forecasts": [
|
||||
{
|
||||
"product_id": "uuid",
|
||||
"inventory_product_id": "uuid", # Alternative field name
|
||||
"predicted_demand": 100.0,
|
||||
"predicted_value": 100.0, # Alternative field name
|
||||
"confidence_score": 0.85,
|
||||
...
|
||||
}
|
||||
],
|
||||
"forecast_id": "uuid",
|
||||
"generated_at": "2025-01-30T10:00:00Z"
|
||||
}
|
||||
"""
|
||||
forecasts = []
|
||||
|
||||
forecast_list = forecast_data.get('forecasts', [])
|
||||
for forecast_item in forecast_list:
|
||||
# Extract product ID (try multiple field names)
|
||||
product_id = (
|
||||
forecast_item.get('product_id') or
|
||||
forecast_item.get('inventory_product_id') or
|
||||
forecast_item.get('item_id')
|
||||
)
|
||||
|
||||
# Extract predicted demand (try multiple field names)
|
||||
predicted_demand = (
|
||||
forecast_item.get('predicted_demand') or
|
||||
forecast_item.get('predicted_value') or
|
||||
forecast_item.get('demand') or
|
||||
0
|
||||
)
|
||||
|
||||
if product_id and predicted_demand > 0:
|
||||
forecasts.append({
|
||||
'product_id': product_id,
|
||||
'predicted_demand': float(predicted_demand),
|
||||
'confidence_score': forecast_item.get('confidence_score', 0.8),
|
||||
'lower_bound': forecast_item.get('lower_bound', 0),
|
||||
'upper_bound': forecast_item.get('upper_bound', 0),
|
||||
'forecast_id': forecast_data.get('forecast_id'),
|
||||
})
|
||||
|
||||
return forecasts
|
||||
@@ -12,7 +12,6 @@ from sqlalchemy import text
|
||||
from app.core.config import settings
|
||||
from app.core.database import database_manager
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
from app.services.production_scheduler_service import ProductionSchedulerService
|
||||
from shared.service_base import StandardFastAPIService
|
||||
|
||||
# Import standardized routers
|
||||
@@ -24,7 +23,8 @@ from app.api import (
|
||||
analytics,
|
||||
quality_templates,
|
||||
equipment,
|
||||
internal_demo
|
||||
internal_demo,
|
||||
orchestrator # NEW: Orchestrator integration endpoint
|
||||
)
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ class ProductionService(StandardFastAPIService):
|
||||
]
|
||||
|
||||
self.alert_service = None
|
||||
self.scheduler_service = None
|
||||
# REMOVED: scheduler_service (replaced by Orchestrator Service)
|
||||
|
||||
# Create custom checks for services
|
||||
async def check_alert_service():
|
||||
@@ -71,14 +71,6 @@ class ProductionService(StandardFastAPIService):
|
||||
self.logger.error("Alert service health check failed", error=str(e))
|
||||
return False
|
||||
|
||||
async def check_scheduler_service():
|
||||
"""Check production scheduler service health"""
|
||||
try:
|
||||
return bool(self.scheduler_service) if self.scheduler_service else False
|
||||
except Exception as e:
|
||||
self.logger.error("Scheduler service health check failed", error=str(e))
|
||||
return False
|
||||
|
||||
super().__init__(
|
||||
service_name=settings.SERVICE_NAME,
|
||||
app_name=settings.APP_NAME,
|
||||
@@ -88,8 +80,7 @@ class ProductionService(StandardFastAPIService):
|
||||
database_manager=database_manager,
|
||||
expected_tables=production_expected_tables,
|
||||
custom_health_checks={
|
||||
"alert_service": check_alert_service,
|
||||
"scheduler_service": check_scheduler_service
|
||||
"alert_service": check_alert_service
|
||||
}
|
||||
)
|
||||
|
||||
@@ -100,22 +91,15 @@ class ProductionService(StandardFastAPIService):
|
||||
await self.alert_service.start()
|
||||
self.logger.info("Production alert service started")
|
||||
|
||||
# Initialize production scheduler service
|
||||
self.scheduler_service = ProductionSchedulerService(settings)
|
||||
await self.scheduler_service.start()
|
||||
self.logger.info("Production scheduler service started")
|
||||
# REMOVED: Production scheduler service initialization
|
||||
# Scheduling is now handled by the Orchestrator Service
|
||||
# which calls our /generate-schedule endpoint
|
||||
|
||||
# Store services in app state
|
||||
app.state.alert_service = self.alert_service
|
||||
app.state.scheduler_service = self.scheduler_service
|
||||
|
||||
async def on_shutdown(self, app: FastAPI):
|
||||
"""Custom startup logic for production service"""
|
||||
# Stop scheduler service
|
||||
if self.scheduler_service:
|
||||
await self.scheduler_service.stop()
|
||||
self.logger.info("Scheduler service stopped")
|
||||
|
||||
"""Custom shutdown logic for production service"""
|
||||
# Stop alert service
|
||||
if self.alert_service:
|
||||
await self.alert_service.stop()
|
||||
@@ -127,7 +111,7 @@ class ProductionService(StandardFastAPIService):
|
||||
"production_planning",
|
||||
"batch_management",
|
||||
"production_scheduling",
|
||||
"automated_daily_scheduling", # NEW: Automated scheduler
|
||||
"orchestrator_integration", # NEW: Orchestrator-driven scheduling
|
||||
"quality_control",
|
||||
"equipment_management",
|
||||
"capacity_planning",
|
||||
@@ -166,6 +150,7 @@ service.setup_custom_middleware()
|
||||
|
||||
# Include standardized routers
|
||||
# NOTE: Register more specific routes before generic parameterized routes
|
||||
service.add_router(orchestrator.router) # NEW: Orchestrator integration endpoint
|
||||
service.add_router(quality_templates.router) # Register first to avoid route conflicts
|
||||
service.add_router(equipment.router)
|
||||
service.add_router(production_batches.router)
|
||||
@@ -175,20 +160,8 @@ service.add_router(production_dashboard.router)
|
||||
service.add_router(analytics.router)
|
||||
service.add_router(internal_demo.router)
|
||||
|
||||
|
||||
@app.post("/test/production-scheduler")
|
||||
async def test_production_scheduler():
|
||||
"""Test endpoint to manually trigger production scheduler"""
|
||||
try:
|
||||
if hasattr(app.state, 'scheduler_service'):
|
||||
scheduler_service = app.state.scheduler_service
|
||||
await scheduler_service.test_production_schedule_generation()
|
||||
return {"message": "Production scheduler test triggered successfully"}
|
||||
else:
|
||||
return {"error": "Scheduler service not available"}
|
||||
except Exception as e:
|
||||
service.logger.error("Error testing production scheduler", error=str(e))
|
||||
return {"error": f"Failed to trigger scheduler test: {str(e)}"}
|
||||
# REMOVED: test_production_scheduler endpoint
|
||||
# Production scheduling is now triggered by the Orchestrator Service
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -36,7 +36,7 @@ class ProductionAlertRepository:
|
||||
FROM production_batches pb
|
||||
WHERE pb.planned_start_time >= CURRENT_DATE
|
||||
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
|
||||
AND pb.status IN ('planned', 'in_progress')
|
||||
AND pb.status IN ('PENDING', 'IN_PROGRESS')
|
||||
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
|
||||
HAVING COUNT(*) > 10
|
||||
ORDER BY total_planned DESC
|
||||
@@ -65,7 +65,7 @@ class ProductionAlertRepository:
|
||||
COALESCE(pb.priority::text, 'medium') as priority_level,
|
||||
1 as affected_orders
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'in_progress'
|
||||
WHERE pb.status = 'IN_PROGRESS'
|
||||
AND pb.planned_end_time < NOW()
|
||||
AND pb.planned_end_time > NOW() - INTERVAL '24 hours'
|
||||
ORDER BY
|
||||
@@ -91,13 +91,14 @@ class ProductionAlertRepository:
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
qc.id, qc.tenant_id, qc.batch_id, qc.test_type,
|
||||
qc.result_value, qc.min_acceptable, qc.max_acceptable,
|
||||
qc.id, qc.tenant_id, qc.batch_id, qc.check_type,
|
||||
qc.quality_score, qc.within_tolerance,
|
||||
qc.pass_fail, qc.defect_count,
|
||||
qc.notes as qc_severity,
|
||||
qc.check_notes as qc_severity,
|
||||
1 as total_failures,
|
||||
pb.product_name, pb.batch_number,
|
||||
qc.created_at
|
||||
qc.created_at,
|
||||
qc.process_stage
|
||||
FROM quality_checks qc
|
||||
JOIN production_batches pb ON pb.id = qc.batch_id
|
||||
WHERE qc.pass_fail = false
|
||||
@@ -256,7 +257,7 @@ class ProductionAlertRepository:
|
||||
FROM production_batches pb
|
||||
JOIN recipe_ingredients ri ON ri.recipe_id = pb.recipe_id
|
||||
WHERE ri.ingredient_id = :ingredient_id
|
||||
AND pb.status = 'in_progress'
|
||||
AND pb.status = 'IN_PROGRESS'
|
||||
AND pb.planned_completion_time > NOW()
|
||||
""")
|
||||
|
||||
|
||||
@@ -296,15 +296,16 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
|
||||
'type': 'quality_control_failure',
|
||||
'severity': severity,
|
||||
'title': f'❌ Fallo Control Calidad: {issue["product_name"]}',
|
||||
'message': f'Lote {issue["batch_number"]} falló en {issue["test_type"]}. Valor: {issue["result_value"]} (rango: {issue["min_acceptable"]}-{issue["max_acceptable"]})',
|
||||
'message': f'Lote {issue["batch_number"]} falló en {issue["check_type"]}. Puntuación: {issue["quality_score"]}/10. Defectos: {issue["defect_count"]}',
|
||||
'actions': ['Revisar lote', 'Repetir prueba', 'Ajustar proceso', 'Documentar causa'],
|
||||
'metadata': {
|
||||
'quality_check_id': str(issue['id']),
|
||||
'batch_id': str(issue['batch_id']),
|
||||
'test_type': issue['test_type'],
|
||||
'result_value': float(issue['result_value']),
|
||||
'min_acceptable': float(issue['min_acceptable']),
|
||||
'max_acceptable': float(issue['max_acceptable']),
|
||||
'check_type': issue['check_type'],
|
||||
'quality_score': float(issue['quality_score']),
|
||||
'within_tolerance': issue['within_tolerance'],
|
||||
'defect_count': int(issue['defect_count']),
|
||||
'process_stage': issue.get('process_stage'),
|
||||
'qc_severity': qc_severity,
|
||||
'total_failures': total_failures
|
||||
}
|
||||
|
||||
@@ -1,478 +0,0 @@
|
||||
# services/production/app/services/production_scheduler_service.py
|
||||
"""
|
||||
Production Scheduler Service - Daily production planning automation
|
||||
|
||||
Automatically generates daily production schedules for all active tenants based on:
|
||||
- Demand forecasts from Orders Service
|
||||
- Current inventory levels
|
||||
- Production capacity
|
||||
- Recipe requirements
|
||||
|
||||
Runs daily at 5:30 AM (before procurement @ 6:00 AM) to ensure production
|
||||
plans are ready for the day ahead.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta, date
|
||||
from typing import List, Dict, Any, Optional
|
||||
from uuid import UUID
|
||||
from decimal import Decimal
|
||||
import structlog
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
from shared.alerts.base_service import BaseAlertService, AlertServiceMixin
|
||||
from shared.database.base import create_database_manager
|
||||
from app.services.production_service import ProductionService
|
||||
from app.schemas.production import ProductionScheduleCreate, ProductionBatchCreate
|
||||
from app.models.production import ProductionStatus, ProductionPriority
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionSchedulerService(BaseAlertService, AlertServiceMixin):
|
||||
"""
|
||||
Production scheduler service for automated daily production planning
|
||||
Extends BaseAlertService to use proven scheduling infrastructure
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.production_service = None
|
||||
|
||||
async def start(self):
|
||||
"""Initialize scheduler and production service"""
|
||||
await super().start()
|
||||
|
||||
# Store database manager for session creation
|
||||
from app.core.database import database_manager
|
||||
self.db_manager = database_manager
|
||||
|
||||
logger.info("Production scheduler service started", service=self.config.SERVICE_NAME)
|
||||
|
||||
def setup_scheduled_checks(self):
|
||||
"""Configure daily production planning jobs"""
|
||||
|
||||
# Daily production planning at 5:30 AM (before procurement)
|
||||
# This ensures production plans are ready before procurement plans
|
||||
self.scheduler.add_job(
|
||||
func=self.run_daily_production_planning,
|
||||
trigger=CronTrigger(hour=5, minute=30),
|
||||
id="daily_production_planning",
|
||||
name="Daily Production Planning",
|
||||
misfire_grace_time=300, # 5 minutes grace period
|
||||
coalesce=True, # Combine missed runs
|
||||
max_instances=1 # Only one instance at a time
|
||||
)
|
||||
|
||||
# Stale schedule cleanup at 5:50 AM
|
||||
self.scheduler.add_job(
|
||||
func=self.run_stale_schedule_cleanup,
|
||||
trigger=CronTrigger(hour=5, minute=50),
|
||||
id="stale_schedule_cleanup",
|
||||
name="Stale Schedule Cleanup",
|
||||
misfire_grace_time=300,
|
||||
coalesce=True,
|
||||
max_instances=1
|
||||
)
|
||||
|
||||
# Test job for development (every 30 minutes if DEBUG enabled)
|
||||
if getattr(self.config, 'DEBUG', False) or getattr(self.config, 'PRODUCTION_TEST_MODE', False):
|
||||
self.scheduler.add_job(
|
||||
func=self.run_daily_production_planning,
|
||||
trigger=CronTrigger(minute='*/30'),
|
||||
id="test_production_planning",
|
||||
name="Test Production Planning (30min)",
|
||||
misfire_grace_time=300,
|
||||
coalesce=True,
|
||||
max_instances=1
|
||||
)
|
||||
logger.info("⚡ Test production planning job added (every 30 minutes)")
|
||||
|
||||
logger.info("📅 Production scheduled jobs configured",
|
||||
jobs_count=len(self.scheduler.get_jobs()))
|
||||
|
||||
async def run_daily_production_planning(self):
|
||||
"""
|
||||
Execute daily production planning for all active tenants
|
||||
Processes tenants in parallel with individual timeouts
|
||||
"""
|
||||
if not self.is_leader:
|
||||
logger.debug("Skipping production planning - not leader")
|
||||
return
|
||||
|
||||
try:
|
||||
self._checks_performed += 1
|
||||
logger.info("🔄 Starting daily production planning execution",
|
||||
timestamp=datetime.now().isoformat())
|
||||
|
||||
# Get active non-demo tenants
|
||||
active_tenants = await self.get_active_tenants()
|
||||
if not active_tenants:
|
||||
logger.info("No active tenants found for production planning")
|
||||
return
|
||||
|
||||
logger.info(f"Processing {len(active_tenants)} tenants in parallel")
|
||||
|
||||
# Create tasks with timeout for each tenant
|
||||
tasks = [
|
||||
self._process_tenant_with_timeout(tenant_id, timeout_seconds=180)
|
||||
for tenant_id in active_tenants
|
||||
]
|
||||
|
||||
# Execute all tasks in parallel
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# Count successes and failures
|
||||
processed_tenants = sum(1 for r in results if r is True)
|
||||
failed_tenants = sum(1 for r in results if isinstance(r, Exception) or r is False)
|
||||
|
||||
logger.info("🎯 Daily production planning completed",
|
||||
total_tenants=len(active_tenants),
|
||||
processed_tenants=processed_tenants,
|
||||
failed_tenants=failed_tenants)
|
||||
|
||||
except Exception as e:
|
||||
self._errors_count += 1
|
||||
logger.error("💥 Daily production planning failed completely", error=str(e))
|
||||
|
||||
async def _process_tenant_with_timeout(self, tenant_id: UUID, timeout_seconds: int = 180) -> bool:
|
||||
"""
|
||||
Process tenant production planning with timeout
|
||||
Returns True on success, False or raises exception on failure
|
||||
"""
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
self.process_tenant_production(tenant_id),
|
||||
timeout=timeout_seconds
|
||||
)
|
||||
logger.info("✅ Successfully processed tenant", tenant_id=str(tenant_id))
|
||||
return True
|
||||
except asyncio.TimeoutError:
|
||||
logger.error("⏱️ Tenant processing timed out",
|
||||
tenant_id=str(tenant_id),
|
||||
timeout=timeout_seconds)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("❌ Error processing tenant production",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def process_tenant_production(self, tenant_id: UUID):
|
||||
"""Process production planning for a specific tenant"""
|
||||
try:
|
||||
# Get tenant timezone for accurate date calculation
|
||||
tenant_tz = await self._get_tenant_timezone(tenant_id)
|
||||
|
||||
# Calculate target date in tenant's timezone
|
||||
target_date = datetime.now(ZoneInfo(tenant_tz)).date()
|
||||
|
||||
logger.info("Processing production for tenant",
|
||||
tenant_id=str(tenant_id),
|
||||
target_date=str(target_date),
|
||||
timezone=tenant_tz)
|
||||
|
||||
# Check if schedule already exists for this date
|
||||
async with self.db_manager.get_session() as session:
|
||||
production_service = ProductionService(self.db_manager, self.config)
|
||||
|
||||
# Check for existing schedule
|
||||
existing_schedule = await self._get_schedule_by_date(
|
||||
session, tenant_id, target_date
|
||||
)
|
||||
|
||||
if existing_schedule:
|
||||
logger.info("📋 Production schedule already exists, skipping",
|
||||
tenant_id=str(tenant_id),
|
||||
schedule_date=str(target_date),
|
||||
schedule_id=str(existing_schedule.get('id')))
|
||||
return
|
||||
|
||||
# Calculate daily requirements
|
||||
requirements = await production_service.calculate_daily_requirements(
|
||||
tenant_id, target_date
|
||||
)
|
||||
|
||||
if not requirements.production_plan:
|
||||
logger.info("No production requirements for date",
|
||||
tenant_id=str(tenant_id),
|
||||
date=str(target_date))
|
||||
return
|
||||
|
||||
# Create production schedule
|
||||
schedule_data = ProductionScheduleCreate(
|
||||
schedule_date=target_date,
|
||||
schedule_name=f"Daily Production - {target_date.strftime('%Y-%m-%d')}",
|
||||
status="draft",
|
||||
notes=f"Auto-generated daily production schedule for {target_date}",
|
||||
total_batches=len(requirements.production_plan),
|
||||
auto_generated=True
|
||||
)
|
||||
|
||||
schedule = await production_service.create_production_schedule(
|
||||
tenant_id, schedule_data
|
||||
)
|
||||
|
||||
# Create production batches from requirements
|
||||
batches_created = 0
|
||||
for item in requirements.production_plan:
|
||||
try:
|
||||
batch_data = await self._create_batch_from_requirement(
|
||||
item, schedule.id, target_date
|
||||
)
|
||||
|
||||
batch = await production_service.create_production_batch(
|
||||
tenant_id, batch_data
|
||||
)
|
||||
batches_created += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error creating batch from requirement",
|
||||
tenant_id=str(tenant_id),
|
||||
product=item.get('product_name'),
|
||||
error=str(e))
|
||||
|
||||
# Send notification about new schedule
|
||||
await self.send_production_schedule_notification(
|
||||
tenant_id, schedule.id, batches_created
|
||||
)
|
||||
|
||||
logger.info("🎉 Production schedule created successfully",
|
||||
tenant_id=str(tenant_id),
|
||||
schedule_id=str(schedule.id),
|
||||
schedule_date=str(target_date),
|
||||
batches_created=batches_created)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("💥 Error processing tenant production",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def _get_tenant_timezone(self, tenant_id: UUID) -> str:
|
||||
"""Get tenant's timezone, fallback to UTC if not configured"""
|
||||
try:
|
||||
from services.tenant.app.models.tenants import Tenant
|
||||
from sqlalchemy import select
|
||||
import os
|
||||
|
||||
tenant_db_url = os.getenv("TENANT_DATABASE_URL")
|
||||
if not tenant_db_url:
|
||||
logger.warning("TENANT_DATABASE_URL not set, using UTC")
|
||||
return "UTC"
|
||||
|
||||
tenant_db = create_database_manager(tenant_db_url, "tenant-tz-lookup")
|
||||
|
||||
async with tenant_db.get_session() as session:
|
||||
result = await session.execute(
|
||||
select(Tenant).where(Tenant.id == tenant_id)
|
||||
)
|
||||
tenant = result.scalars().first()
|
||||
|
||||
if tenant and hasattr(tenant, 'timezone') and tenant.timezone:
|
||||
return tenant.timezone
|
||||
|
||||
# Default to Europe/Madrid for Spanish bakeries
|
||||
return "Europe/Madrid"
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("Could not fetch tenant timezone, using UTC",
|
||||
tenant_id=str(tenant_id), error=str(e))
|
||||
return "UTC"
|
||||
|
||||
async def _get_schedule_by_date(self, session, tenant_id: UUID, schedule_date: date) -> Optional[Dict]:
|
||||
"""Check if production schedule exists for date"""
|
||||
try:
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
|
||||
schedule_repo = ProductionScheduleRepository(session)
|
||||
schedule = await schedule_repo.get_schedule_by_date(str(tenant_id), schedule_date)
|
||||
|
||||
if schedule:
|
||||
return {"id": schedule.id, "status": schedule.status}
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking existing schedule", error=str(e))
|
||||
return None
|
||||
|
||||
async def _create_batch_from_requirement(
|
||||
self,
|
||||
requirement: Dict[str, Any],
|
||||
schedule_id: UUID,
|
||||
target_date: date
|
||||
) -> ProductionBatchCreate:
|
||||
"""Create batch data from production requirement"""
|
||||
|
||||
# Map urgency to priority
|
||||
urgency_to_priority = {
|
||||
"high": ProductionPriority.HIGH,
|
||||
"medium": ProductionPriority.MEDIUM,
|
||||
"low": ProductionPriority.LOW
|
||||
}
|
||||
priority = urgency_to_priority.get(requirement.get('urgency', 'medium'), ProductionPriority.MEDIUM)
|
||||
|
||||
# Calculate planned times (start at 6 AM, estimate 2 hours per batch)
|
||||
planned_start = datetime.combine(target_date, datetime.min.time().replace(hour=6))
|
||||
planned_duration = 120 # 2 hours default
|
||||
|
||||
return ProductionBatchCreate(
|
||||
schedule_id=schedule_id,
|
||||
product_id=UUID(requirement['product_id']),
|
||||
product_name=requirement['product_name'],
|
||||
planned_quantity=Decimal(str(requirement['recommended_production'])),
|
||||
unit_of_measure="units",
|
||||
priority=priority,
|
||||
status=ProductionStatus.PLANNED,
|
||||
planned_start_time=planned_start,
|
||||
planned_duration_minutes=planned_duration,
|
||||
notes=f"Auto-generated from demand forecast. Urgency: {requirement.get('urgency', 'medium')}",
|
||||
auto_generated=True
|
||||
)
|
||||
|
||||
async def run_stale_schedule_cleanup(self):
|
||||
"""
|
||||
Clean up stale production schedules and send reminders
|
||||
"""
|
||||
if not self.is_leader:
|
||||
logger.debug("Skipping stale schedule cleanup - not leader")
|
||||
return
|
||||
|
||||
try:
|
||||
logger.info("🧹 Starting stale schedule cleanup")
|
||||
|
||||
active_tenants = await self.get_active_tenants()
|
||||
if not active_tenants:
|
||||
logger.info("No active tenants found for cleanup")
|
||||
return
|
||||
|
||||
total_archived = 0
|
||||
total_cancelled = 0
|
||||
total_escalated = 0
|
||||
|
||||
# Process each tenant's stale schedules
|
||||
for tenant_id in active_tenants:
|
||||
try:
|
||||
stats = await self._cleanup_tenant_schedules(tenant_id)
|
||||
total_archived += stats.get('archived', 0)
|
||||
total_cancelled += stats.get('cancelled', 0)
|
||||
total_escalated += stats.get('escalated', 0)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error cleaning up tenant schedules",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
|
||||
logger.info("✅ Stale schedule cleanup completed",
|
||||
archived=total_archived,
|
||||
cancelled=total_cancelled,
|
||||
escalated=total_escalated)
|
||||
|
||||
except Exception as e:
|
||||
self._errors_count += 1
|
||||
logger.error("💥 Stale schedule cleanup failed", error=str(e))
|
||||
|
||||
async def _cleanup_tenant_schedules(self, tenant_id: UUID) -> Dict[str, int]:
|
||||
"""Cleanup stale schedules for a specific tenant"""
|
||||
stats = {"archived": 0, "cancelled": 0, "escalated": 0}
|
||||
|
||||
try:
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
|
||||
async with self.db_manager.get_session() as session:
|
||||
schedule_repo = ProductionScheduleRepository(session)
|
||||
|
||||
today = date.today()
|
||||
|
||||
# Get all schedules for tenant
|
||||
schedules = await schedule_repo.get_all_schedules_for_tenant(tenant_id)
|
||||
|
||||
for schedule in schedules:
|
||||
schedule_age_days = (today - schedule.schedule_date).days
|
||||
|
||||
# Archive completed schedules older than 90 days
|
||||
if schedule.status == "completed" and schedule_age_days > 90:
|
||||
await schedule_repo.archive_schedule(schedule)
|
||||
stats["archived"] += 1
|
||||
|
||||
# Cancel draft schedules older than 7 days
|
||||
elif schedule.status == "draft" and schedule_age_days > 7:
|
||||
await schedule_repo.cancel_schedule(schedule, "Auto-cancelled: stale draft schedule")
|
||||
stats["cancelled"] += 1
|
||||
|
||||
# Escalate overdue schedules
|
||||
elif schedule.schedule_date == today and schedule.status in ['draft', 'pending_approval']:
|
||||
await self._send_schedule_escalation_alert(tenant_id, schedule.id)
|
||||
stats["escalated"] += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error in tenant schedule cleanup",
|
||||
tenant_id=str(tenant_id), error=str(e))
|
||||
|
||||
return stats
|
||||
|
||||
async def send_production_schedule_notification(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
schedule_id: UUID,
|
||||
batches_count: int
|
||||
):
|
||||
"""Send notification about new production schedule"""
|
||||
try:
|
||||
alert_data = {
|
||||
"type": "production_schedule_created",
|
||||
"severity": "low",
|
||||
"title": "Nuevo Plan de Producción Generado",
|
||||
"message": f"Plan de producción diario creado con {batches_count} lotes programados",
|
||||
"metadata": {
|
||||
"tenant_id": str(tenant_id),
|
||||
"schedule_id": str(schedule_id),
|
||||
"batches_count": batches_count,
|
||||
"auto_generated": True
|
||||
}
|
||||
}
|
||||
|
||||
await self.publish_item(tenant_id, alert_data, item_type='alert')
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error sending schedule notification",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
|
||||
async def _send_schedule_escalation_alert(self, tenant_id: UUID, schedule_id: UUID):
|
||||
"""Send escalation alert for overdue schedule"""
|
||||
try:
|
||||
alert_data = {
|
||||
"type": "schedule_escalation",
|
||||
"severity": "high",
|
||||
"title": "Plan de Producción Vencido",
|
||||
"message": "Plan de producción para hoy no ha sido procesado - Requiere atención urgente",
|
||||
"metadata": {
|
||||
"tenant_id": str(tenant_id),
|
||||
"schedule_id": str(schedule_id),
|
||||
"escalation_level": "urgent"
|
||||
}
|
||||
}
|
||||
|
||||
await self.publish_item(tenant_id, alert_data, item_type='alert')
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error sending escalation alert", error=str(e))
|
||||
|
||||
async def test_production_schedule_generation(self):
|
||||
"""Test method to manually trigger production planning"""
|
||||
active_tenants = await self.get_active_tenants()
|
||||
if not active_tenants:
|
||||
logger.error("No active tenants found for testing production schedule generation")
|
||||
return
|
||||
|
||||
test_tenant_id = active_tenants[0]
|
||||
logger.info("Testing production schedule generation", tenant_id=str(test_tenant_id))
|
||||
|
||||
try:
|
||||
await self.process_tenant_production(test_tenant_id)
|
||||
logger.info("Test production schedule generation completed successfully")
|
||||
except Exception as e:
|
||||
logger.error("Test production schedule generation failed",
|
||||
error=str(e), tenant_id=str(test_tenant_id))
|
||||
@@ -1721,4 +1721,162 @@ class ProductionService:
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
raise
|
||||
|
||||
# ================================================================
|
||||
# NEW: ORCHESTRATOR INTEGRATION
|
||||
# ================================================================
|
||||
|
||||
async def generate_production_schedule_from_forecast(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
target_date: date,
|
||||
forecasts: List[Dict[str, Any]],
|
||||
planning_horizon_days: int = 1
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate production schedule from forecast data (called by Orchestrator)
|
||||
|
||||
This method receives forecast data from the Orchestrator and generates
|
||||
a production schedule with production batches.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
target_date: Target production date
|
||||
forecasts: List of forecast data with product_id and predicted_demand
|
||||
planning_horizon_days: Planning horizon (1-7 days)
|
||||
|
||||
Returns:
|
||||
Dict with schedule_id, schedule_number, batches_created, etc.
|
||||
"""
|
||||
try:
|
||||
logger.info("Generating production schedule from forecast",
|
||||
tenant_id=str(tenant_id),
|
||||
target_date=target_date,
|
||||
forecasts_count=len(forecasts))
|
||||
|
||||
async with self.database_manager.get_session() as session:
|
||||
schedule_repo = ProductionScheduleRepository(session)
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
|
||||
# Generate schedule number
|
||||
schedule_number = await schedule_repo.generate_schedule_number(tenant_id, target_date)
|
||||
|
||||
# Calculate production end date
|
||||
production_end_date = target_date + timedelta(days=planning_horizon_days - 1)
|
||||
|
||||
# Create production schedule
|
||||
schedule_data = {
|
||||
'tenant_id': tenant_id,
|
||||
'schedule_number': schedule_number,
|
||||
'schedule_date': target_date,
|
||||
'production_start_date': target_date,
|
||||
'production_end_date': production_end_date,
|
||||
'status': 'draft',
|
||||
'total_batches': 0,
|
||||
'completed_batches': 0,
|
||||
'created_at': datetime.now(timezone.utc),
|
||||
'updated_at': datetime.now(timezone.utc),
|
||||
}
|
||||
|
||||
schedule = await schedule_repo.create_schedule(schedule_data)
|
||||
|
||||
# Create production batches from forecasts
|
||||
batches_created = 0
|
||||
total_planned_quantity = 0.0
|
||||
warnings = []
|
||||
|
||||
for forecast in forecasts:
|
||||
try:
|
||||
product_id = UUID(forecast['product_id'])
|
||||
predicted_demand = float(forecast['predicted_demand'])
|
||||
|
||||
# Get current stock level from inventory
|
||||
stock_info = await self.inventory_client.get_stock_level(
|
||||
str(tenant_id), str(product_id)
|
||||
)
|
||||
|
||||
current_stock = stock_info.get('current_stock', 0) if stock_info else 0
|
||||
|
||||
# Calculate production quantity needed
|
||||
# Production needed = Predicted demand - Current stock (if positive)
|
||||
production_needed = max(0, predicted_demand - current_stock)
|
||||
|
||||
if production_needed <= 0:
|
||||
logger.info("Skipping product - sufficient stock",
|
||||
product_id=str(product_id),
|
||||
current_stock=current_stock,
|
||||
predicted_demand=predicted_demand)
|
||||
warnings.append(f"Product {product_id}: sufficient stock, no production needed")
|
||||
continue
|
||||
|
||||
# Get recipe for the product (if exists)
|
||||
# Note: In a real scenario, we'd fetch recipe_id from product/inventory
|
||||
# For now, we assume recipe_id = product_id or fetch from a mapping
|
||||
|
||||
# Create production batch
|
||||
batch_data = {
|
||||
'tenant_id': tenant_id,
|
||||
'schedule_id': schedule.id,
|
||||
'recipe_id': product_id, # Assuming recipe_id matches product_id
|
||||
'batch_number': await self._generate_batch_number(session, tenant_id, target_date, batches_created + 1),
|
||||
'status': 'scheduled',
|
||||
'priority': 'normal',
|
||||
'planned_start_time': datetime.combine(target_date, datetime.min.time()),
|
||||
'planned_end_time': datetime.combine(target_date, datetime.max.time()),
|
||||
'planned_quantity': production_needed,
|
||||
'created_at': datetime.now(timezone.utc),
|
||||
'updated_at': datetime.now(timezone.utc),
|
||||
}
|
||||
|
||||
batch = await batch_repo.create_batch(batch_data)
|
||||
|
||||
batches_created += 1
|
||||
total_planned_quantity += production_needed
|
||||
|
||||
logger.info("Production batch created from forecast",
|
||||
batch_id=str(batch.id),
|
||||
product_id=str(product_id),
|
||||
planned_quantity=production_needed)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error creating batch for product {forecast.get('product_id')}: {str(e)}"
|
||||
logger.warning(error_msg, tenant_id=str(tenant_id))
|
||||
warnings.append(error_msg)
|
||||
continue
|
||||
|
||||
# Update schedule with batch counts
|
||||
await schedule_repo.update_schedule(
|
||||
schedule.id,
|
||||
tenant_id,
|
||||
{'total_batches': batches_created}
|
||||
)
|
||||
|
||||
logger.info("Production schedule generated successfully",
|
||||
tenant_id=str(tenant_id),
|
||||
schedule_id=str(schedule.id),
|
||||
batches_created=batches_created)
|
||||
|
||||
return {
|
||||
'schedule_id': schedule.id,
|
||||
'schedule_number': schedule.schedule_number,
|
||||
'batches_created': batches_created,
|
||||
'total_planned_quantity': total_planned_quantity,
|
||||
'warnings': warnings
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating production schedule from forecast",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def _generate_batch_number(
|
||||
self,
|
||||
session,
|
||||
tenant_id: UUID,
|
||||
target_date: date,
|
||||
batch_index: int
|
||||
) -> str:
|
||||
"""Generate batch number in format BATCH-YYYYMMDD-NNN"""
|
||||
date_str = target_date.strftime("%Y%m%d")
|
||||
return f"BATCH-{date_str}-{batch_index:03d}"
|
||||
Reference in New Issue
Block a user