demo seed change
This commit is contained in:
@@ -8,7 +8,7 @@ Requires: Professional or Enterprise subscription tier
|
||||
from datetime import date, datetime, timedelta
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
@@ -25,10 +25,11 @@ route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-analytics"])
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
# ===== ANALYTICS ENDPOINTS (Professional/Enterprise Only) =====
|
||||
|
||||
@@ -13,6 +13,7 @@ from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
import asyncio
|
||||
|
||||
from fastapi import Request
|
||||
from app.services.production_service import ProductionService
|
||||
from app.core.config import settings
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
@@ -21,10 +22,11 @@ router = APIRouter(tags=["production-batch"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
class ProductionSummaryBatchRequest(BaseModel):
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Equipment API - CRUD operations on Equipment model
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
@@ -33,10 +33,11 @@ router = APIRouter(tags=["production-equipment"])
|
||||
audit_logger = create_audit_logger("production-service", AuditLog)
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
"""
|
||||
Internal API for triggering production alerts.
|
||||
Used by demo session cloning to generate realistic production delay alerts.
|
||||
|
||||
URL Pattern: /api/v1/tenants/{tenant_id}/production/internal/alerts/trigger
|
||||
This follows the tenant-scoped pattern so gateway can proxy correctly.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request, Path
|
||||
@@ -13,16 +16,20 @@ logger = structlog.get_logger()
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/api/internal/production-alerts/trigger/{tenant_id}")
|
||||
# New URL pattern: tenant-scoped so gateway proxies to production service correctly
|
||||
@router.post("/api/v1/tenants/{tenant_id}/production/internal/alerts/trigger")
|
||||
async def trigger_production_alerts(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID to check production for"),
|
||||
request: Request = None
|
||||
) -> dict:
|
||||
"""
|
||||
Trigger production alert checks for a specific tenant (internal use only).
|
||||
Trigger comprehensive production alert checks for a specific tenant (internal use only).
|
||||
|
||||
This endpoint is called by the demo session cloning process after production
|
||||
batches are seeded to generate realistic production delay alerts.
|
||||
batches are seeded to generate realistic production alerts including:
|
||||
- Production delays
|
||||
- Equipment maintenance alerts
|
||||
- Batch start delays
|
||||
|
||||
Security: Protected by X-Internal-Service header check.
|
||||
"""
|
||||
@@ -35,40 +42,36 @@ async def trigger_production_alerts(
|
||||
detail="This endpoint is for internal service use only"
|
||||
)
|
||||
|
||||
# Get production alert service from app state
|
||||
production_alert_service = getattr(request.app.state, 'production_alert_service', None)
|
||||
# Get production scheduler from app state
|
||||
production_scheduler = getattr(request.app.state, 'production_scheduler', None)
|
||||
|
||||
if not production_alert_service:
|
||||
logger.error("Production alert service not initialized")
|
||||
if not production_scheduler:
|
||||
logger.error("Production scheduler not initialized")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Production alert service not available"
|
||||
detail="Production scheduler not available"
|
||||
)
|
||||
|
||||
# Trigger production alert checks (checks all tenants, including this one)
|
||||
logger.info("Triggering production alert checks", tenant_id=str(tenant_id))
|
||||
await production_alert_service.check_production_delays()
|
||||
# Trigger comprehensive production alert checks for the specific tenant
|
||||
logger.info("Triggering comprehensive production alert checks", tenant_id=str(tenant_id))
|
||||
|
||||
# Return success (service checks all tenants, we can't get specific count)
|
||||
result = {"total_alerts": 0, "message": "Production alert checks triggered"}
|
||||
# Call the scheduler's manual trigger method
|
||||
result = await production_scheduler.trigger_manual_check(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Production alert checks completed",
|
||||
tenant_id=str(tenant_id),
|
||||
alerts_generated=result.get("total_alerts", 0)
|
||||
)
|
||||
if result.get("success", False):
|
||||
logger.info(
|
||||
"Production alert checks completed successfully",
|
||||
tenant_id=str(tenant_id),
|
||||
alerts_generated=result.get("alerts_generated", 0)
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"Production alert checks failed",
|
||||
tenant_id=str(tenant_id),
|
||||
error=result.get("error", "Unknown error")
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"tenant_id": str(tenant_id),
|
||||
"alerts_generated": result.get("total_alerts", 0),
|
||||
"breakdown": {
|
||||
"critical": result.get("critical", 0),
|
||||
"high": result.get("high", 0),
|
||||
"medium": result.get("medium", 0),
|
||||
"low": result.get("low", 0)
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
|
||||
@@ -8,9 +8,12 @@ from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, delete, func
|
||||
import structlog
|
||||
import uuid
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Optional, Dict, Any
|
||||
import os
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from app.core.database import get_db
|
||||
from app.models.production import (
|
||||
@@ -19,12 +22,12 @@ from app.models.production import (
|
||||
ProductionStatus, ProductionPriority, ProcessStage,
|
||||
EquipmentStatus, EquipmentType
|
||||
)
|
||||
from shared.utils.demo_dates import adjust_date_for_demo, BASE_REFERENCE_DATE
|
||||
from shared.utils.demo_dates import adjust_date_for_demo, BASE_REFERENCE_DATE, resolve_time_marker
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(prefix="/internal/demo", tags=["internal"])
|
||||
router = APIRouter()
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_PROFESSIONAL = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
@@ -38,7 +41,7 @@ def verify_internal_api_key(x_internal_api_key: Optional[str] = Header(None)):
|
||||
return True
|
||||
|
||||
|
||||
@router.post("/clone")
|
||||
@router.post("/internal/demo/clone")
|
||||
async def clone_demo_data(
|
||||
base_tenant_id: str,
|
||||
virtual_tenant_id: str,
|
||||
@@ -91,12 +94,11 @@ async def clone_demo_data(
|
||||
|
||||
try:
|
||||
# Validate UUIDs
|
||||
base_uuid = uuid.UUID(base_tenant_id)
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Track cloning statistics
|
||||
stats = {
|
||||
"production_batches": 0,
|
||||
"batches": 0,
|
||||
"production_schedules": 0,
|
||||
"production_capacity": 0,
|
||||
"quality_check_templates": 0,
|
||||
@@ -105,63 +107,137 @@ async def clone_demo_data(
|
||||
"alerts_generated": 0
|
||||
}
|
||||
|
||||
# ID mappings
|
||||
batch_id_map = {}
|
||||
template_id_map = {}
|
||||
equipment_id_map = {}
|
||||
def parse_date_field(date_value, field_name="date"):
|
||||
"""Parse date field, handling both ISO strings and BASE_TS markers"""
|
||||
if not date_value:
|
||||
return None
|
||||
|
||||
# Check if it's a BASE_TS marker
|
||||
if isinstance(date_value, str) and date_value.startswith("BASE_TS"):
|
||||
try:
|
||||
return resolve_time_marker(date_value, session_time)
|
||||
except ValueError as e:
|
||||
logger.warning(
|
||||
f"Invalid BASE_TS marker in {field_name}",
|
||||
marker=date_value,
|
||||
error=str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
# Handle regular ISO date strings
|
||||
try:
|
||||
return adjust_date_for_demo(
|
||||
datetime.fromisoformat(date_value.replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
)
|
||||
except (ValueError, AttributeError) as e:
|
||||
logger.warning(
|
||||
f"Invalid date format in {field_name}",
|
||||
date_value=date_value,
|
||||
error=str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
# Clone Equipment first (no dependencies)
|
||||
result = await db.execute(
|
||||
select(Equipment).where(Equipment.tenant_id == base_uuid)
|
||||
)
|
||||
base_equipment = result.scalars().all()
|
||||
# Load seed data from JSON files
|
||||
try:
|
||||
from shared.utils.seed_data_paths import get_seed_data_path
|
||||
|
||||
if demo_account_type == "professional":
|
||||
json_file = get_seed_data_path("professional", "06-production.json")
|
||||
elif demo_account_type == "enterprise":
|
||||
json_file = get_seed_data_path("enterprise", "06-production.json")
|
||||
else:
|
||||
raise ValueError(f"Invalid demo account type: {demo_account_type}")
|
||||
|
||||
logger.info(
|
||||
"Found equipment to clone",
|
||||
count=len(base_equipment),
|
||||
base_tenant=str(base_uuid)
|
||||
)
|
||||
except ImportError:
|
||||
# Fallback to original path
|
||||
seed_data_dir = Path(__file__).parent.parent.parent.parent / "infrastructure" / "seed-data"
|
||||
if demo_account_type == "professional":
|
||||
json_file = seed_data_dir / "professional" / "06-production.json"
|
||||
elif demo_account_type == "enterprise":
|
||||
json_file = seed_data_dir / "enterprise" / "parent" / "06-production.json"
|
||||
else:
|
||||
raise ValueError(f"Invalid demo account type: {demo_account_type}")
|
||||
|
||||
for equipment in base_equipment:
|
||||
new_equipment_id = uuid.uuid4()
|
||||
equipment_id_map[equipment.id] = new_equipment_id
|
||||
if not json_file.exists():
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Seed data file not found: {json_file}"
|
||||
)
|
||||
|
||||
# Load JSON data
|
||||
with open(json_file, 'r', encoding='utf-8') as f:
|
||||
seed_data = json.load(f)
|
||||
|
||||
# Create Equipment first (no dependencies)
|
||||
for equipment_data in seed_data.get('equipment', []):
|
||||
# Transform equipment ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
equipment_uuid = UUID(equipment_data['id'])
|
||||
transformed_id = transform_id(equipment_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse equipment UUID",
|
||||
equipment_id=equipment_data['id'],
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid UUID format in equipment data: {str(e)}"
|
||||
)
|
||||
|
||||
# Adjust dates relative to session creation time
|
||||
adjusted_install_date = adjust_date_for_demo(
|
||||
equipment.install_date, session_time, BASE_REFERENCE_DATE
|
||||
datetime.fromisoformat(equipment_data['install_date'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
)
|
||||
adjusted_last_maintenance = adjust_date_for_demo(
|
||||
equipment.last_maintenance_date, session_time, BASE_REFERENCE_DATE
|
||||
datetime.fromisoformat(equipment_data['last_maintenance_date'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
)
|
||||
adjusted_next_maintenance = adjust_date_for_demo(
|
||||
equipment.next_maintenance_date, session_time, BASE_REFERENCE_DATE
|
||||
datetime.fromisoformat(equipment_data['next_maintenance_date'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
)
|
||||
adjusted_created_at = adjust_date_for_demo(
|
||||
datetime.fromisoformat(equipment_data['created_at'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
)
|
||||
adjusted_updated_at = adjust_date_for_demo(
|
||||
datetime.fromisoformat(equipment_data['updated_at'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
)
|
||||
|
||||
new_equipment = Equipment(
|
||||
id=new_equipment_id,
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
name=equipment.name,
|
||||
type=equipment.type,
|
||||
model=equipment.model,
|
||||
serial_number=equipment.serial_number,
|
||||
location=equipment.location,
|
||||
status=equipment.status,
|
||||
name=equipment_data['name'],
|
||||
type=equipment_data['type'],
|
||||
model=equipment_data['model'],
|
||||
serial_number=equipment_data.get('serial_number'),
|
||||
location=equipment_data['location'],
|
||||
status=equipment_data['status'],
|
||||
install_date=adjusted_install_date,
|
||||
last_maintenance_date=adjusted_last_maintenance,
|
||||
next_maintenance_date=adjusted_next_maintenance,
|
||||
maintenance_interval_days=equipment.maintenance_interval_days,
|
||||
efficiency_percentage=equipment.efficiency_percentage,
|
||||
uptime_percentage=equipment.uptime_percentage,
|
||||
energy_usage_kwh=equipment.energy_usage_kwh,
|
||||
power_kw=equipment.power_kw,
|
||||
capacity=equipment.capacity,
|
||||
weight_kg=equipment.weight_kg,
|
||||
current_temperature=equipment.current_temperature,
|
||||
target_temperature=equipment.target_temperature,
|
||||
is_active=equipment.is_active,
|
||||
notes=equipment.notes,
|
||||
created_at=session_time,
|
||||
updated_at=session_time
|
||||
maintenance_interval_days=equipment_data.get('maintenance_interval_days'),
|
||||
efficiency_percentage=equipment_data.get('efficiency_percentage'),
|
||||
uptime_percentage=equipment_data.get('uptime_percentage'),
|
||||
energy_usage_kwh=equipment_data.get('energy_usage_kwh'),
|
||||
power_kw=equipment_data.get('power_kw'),
|
||||
capacity=equipment_data.get('capacity'),
|
||||
weight_kg=equipment_data.get('weight_kg'),
|
||||
current_temperature=equipment_data.get('current_temperature'),
|
||||
target_temperature=equipment_data.get('target_temperature'),
|
||||
is_active=equipment_data.get('is_active', True),
|
||||
notes=equipment_data.get('notes'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_equipment)
|
||||
stats["equipment"] += 1
|
||||
@@ -170,17 +246,17 @@ async def clone_demo_data(
|
||||
await db.flush()
|
||||
|
||||
# Clone Quality Check Templates
|
||||
result = await db.execute(
|
||||
select(QualityCheckTemplate).where(QualityCheckTemplate.tenant_id == base_uuid)
|
||||
)
|
||||
base_templates = result.scalars().all()
|
||||
# Note: Quality check templates are not included in seed data
|
||||
# They would need to be added to the production seed data if needed
|
||||
template_id_map = {}
|
||||
base_templates = []
|
||||
|
||||
logger.info(
|
||||
"Found quality check templates to clone",
|
||||
count=len(base_templates),
|
||||
base_tenant=str(base_uuid)
|
||||
"No quality check templates to clone (not in seed data)",
|
||||
count=len(base_templates)
|
||||
)
|
||||
|
||||
# Only create templates if they exist in base templates
|
||||
for template in base_templates:
|
||||
new_template_id = uuid.uuid4()
|
||||
template_id_map[template.id] = new_template_id
|
||||
@@ -217,253 +293,333 @@ async def clone_demo_data(
|
||||
# Flush to get template IDs
|
||||
await db.flush()
|
||||
|
||||
# Clone Production Batches
|
||||
result = await db.execute(
|
||||
select(ProductionBatch).where(ProductionBatch.tenant_id == base_uuid)
|
||||
)
|
||||
base_batches = result.scalars().all()
|
||||
# Clone Production Batches from seed data
|
||||
batch_id_map = {}
|
||||
for batch_data in seed_data.get('batches', []):
|
||||
# Transform batch ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
batch_uuid = UUID(batch_data['id'])
|
||||
transformed_id = transform_id(batch_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse batch UUID",
|
||||
batch_id=batch_data['id'],
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid UUID format in batch data: {str(e)}"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Found production batches to clone",
|
||||
count=len(base_batches),
|
||||
base_tenant=str(base_uuid)
|
||||
)
|
||||
|
||||
for batch in base_batches:
|
||||
new_batch_id = uuid.uuid4()
|
||||
batch_id_map[batch.id] = new_batch_id
|
||||
batch_id_map[UUID(batch_data['id'])] = transformed_id
|
||||
|
||||
# Adjust dates relative to session creation time
|
||||
adjusted_planned_start = adjust_date_for_demo(
|
||||
batch.planned_start_time, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.planned_start_time else None
|
||||
adjusted_planned_end = adjust_date_for_demo(
|
||||
batch.planned_end_time, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.planned_end_time else None
|
||||
adjusted_actual_start = adjust_date_for_demo(
|
||||
batch.actual_start_time, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.actual_start_time else None
|
||||
adjusted_actual_end = adjust_date_for_demo(
|
||||
batch.actual_end_time, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.actual_end_time else None
|
||||
adjusted_completed = adjust_date_for_demo(
|
||||
batch.completed_at, session_time, BASE_REFERENCE_DATE
|
||||
) if batch.completed_at else None
|
||||
adjusted_planned_start = parse_date_field(batch_data.get('planned_start_time'), "planned_start_time")
|
||||
adjusted_planned_end = parse_date_field(batch_data.get('planned_end_time'), "planned_end_time")
|
||||
adjusted_actual_start = parse_date_field(batch_data.get('actual_start_time'), "actual_start_time")
|
||||
adjusted_actual_end = parse_date_field(batch_data.get('actual_end_time'), "actual_end_time")
|
||||
adjusted_completed = parse_date_field(batch_data.get('completed_at'), "completed_at")
|
||||
adjusted_created_at = parse_date_field(batch_data.get('created_at'), "created_at") or session_time
|
||||
adjusted_updated_at = parse_date_field(batch_data.get('updated_at'), "updated_at") or adjusted_created_at
|
||||
|
||||
# Map status and priority enums
|
||||
status_value = batch_data.get('status', 'PENDING')
|
||||
if isinstance(status_value, str):
|
||||
try:
|
||||
status_value = ProductionStatus[status_value]
|
||||
except KeyError:
|
||||
status_value = ProductionStatus.PENDING
|
||||
|
||||
priority_value = batch_data.get('priority', 'MEDIUM')
|
||||
if isinstance(priority_value, str):
|
||||
try:
|
||||
priority_value = ProductionPriority[priority_value]
|
||||
except KeyError:
|
||||
priority_value = ProductionPriority.MEDIUM
|
||||
|
||||
# Map process stage enum
|
||||
process_stage_value = batch_data.get('current_process_stage')
|
||||
if process_stage_value and isinstance(process_stage_value, str):
|
||||
try:
|
||||
process_stage_value = ProcessStage[process_stage_value]
|
||||
except KeyError:
|
||||
process_stage_value = None
|
||||
|
||||
new_batch = ProductionBatch(
|
||||
id=new_batch_id,
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
batch_number=f"BATCH-{uuid.uuid4().hex[:8].upper()}", # New batch number
|
||||
product_id=batch.product_id, # Keep product reference
|
||||
product_name=batch.product_name,
|
||||
recipe_id=batch.recipe_id, # Keep recipe reference
|
||||
batch_number=f"{session_id[:8]}-{batch_data.get('batch_number', f'BATCH-{uuid.uuid4().hex[:8].upper()}')}",
|
||||
product_id=batch_data.get('product_id'),
|
||||
product_name=batch_data.get('product_name'),
|
||||
recipe_id=batch_data.get('recipe_id'),
|
||||
planned_start_time=adjusted_planned_start,
|
||||
planned_end_time=adjusted_planned_end,
|
||||
planned_quantity=batch.planned_quantity,
|
||||
planned_duration_minutes=batch.planned_duration_minutes,
|
||||
planned_quantity=batch_data.get('planned_quantity'),
|
||||
planned_duration_minutes=batch_data.get('planned_duration_minutes'),
|
||||
actual_start_time=adjusted_actual_start,
|
||||
actual_end_time=adjusted_actual_end,
|
||||
actual_quantity=batch.actual_quantity,
|
||||
actual_duration_minutes=batch.actual_duration_minutes,
|
||||
status=batch.status,
|
||||
priority=batch.priority,
|
||||
current_process_stage=batch.current_process_stage,
|
||||
process_stage_history=batch.process_stage_history,
|
||||
pending_quality_checks=batch.pending_quality_checks,
|
||||
completed_quality_checks=batch.completed_quality_checks,
|
||||
estimated_cost=batch.estimated_cost,
|
||||
actual_cost=batch.actual_cost,
|
||||
labor_cost=batch.labor_cost,
|
||||
material_cost=batch.material_cost,
|
||||
overhead_cost=batch.overhead_cost,
|
||||
yield_percentage=batch.yield_percentage,
|
||||
quality_score=batch.quality_score,
|
||||
waste_quantity=batch.waste_quantity,
|
||||
defect_quantity=batch.defect_quantity,
|
||||
equipment_used=batch.equipment_used,
|
||||
staff_assigned=batch.staff_assigned,
|
||||
station_id=batch.station_id,
|
||||
order_id=batch.order_id,
|
||||
forecast_id=batch.forecast_id,
|
||||
is_rush_order=batch.is_rush_order,
|
||||
is_special_recipe=batch.is_special_recipe,
|
||||
production_notes=batch.production_notes,
|
||||
quality_notes=batch.quality_notes,
|
||||
delay_reason=batch.delay_reason,
|
||||
cancellation_reason=batch.cancellation_reason,
|
||||
created_at=session_time,
|
||||
updated_at=session_time,
|
||||
actual_quantity=batch_data.get('actual_quantity'),
|
||||
actual_duration_minutes=batch_data.get('actual_duration_minutes'),
|
||||
status=status_value,
|
||||
priority=priority_value,
|
||||
current_process_stage=process_stage_value,
|
||||
process_stage_history=batch_data.get('process_stage_history'),
|
||||
pending_quality_checks=batch_data.get('pending_quality_checks'),
|
||||
completed_quality_checks=batch_data.get('completed_quality_checks'),
|
||||
estimated_cost=batch_data.get('estimated_cost'),
|
||||
actual_cost=batch_data.get('actual_cost'),
|
||||
labor_cost=batch_data.get('labor_cost'),
|
||||
material_cost=batch_data.get('material_cost'),
|
||||
overhead_cost=batch_data.get('overhead_cost'),
|
||||
yield_percentage=batch_data.get('yield_percentage'),
|
||||
quality_score=batch_data.get('quality_score'),
|
||||
waste_quantity=batch_data.get('waste_quantity'),
|
||||
defect_quantity=batch_data.get('defect_quantity'),
|
||||
equipment_used=batch_data.get('equipment_used'),
|
||||
staff_assigned=batch_data.get('staff_assigned'),
|
||||
station_id=batch_data.get('station_id'),
|
||||
order_id=batch_data.get('order_id'),
|
||||
forecast_id=batch_data.get('forecast_id'),
|
||||
is_rush_order=batch_data.get('is_rush_order', False),
|
||||
is_special_recipe=batch_data.get('is_special_recipe', False),
|
||||
production_notes=batch_data.get('production_notes'),
|
||||
quality_notes=batch_data.get('quality_notes'),
|
||||
delay_reason=batch_data.get('delay_reason'),
|
||||
cancellation_reason=batch_data.get('cancellation_reason'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at,
|
||||
completed_at=adjusted_completed
|
||||
)
|
||||
db.add(new_batch)
|
||||
stats["production_batches"] += 1
|
||||
stats["batches"] += 1
|
||||
|
||||
# Flush to get batch IDs
|
||||
await db.flush()
|
||||
|
||||
# Clone Quality Checks
|
||||
result = await db.execute(
|
||||
select(QualityCheck).where(QualityCheck.tenant_id == base_uuid)
|
||||
)
|
||||
base_checks = result.scalars().all()
|
||||
# Clone Quality Checks from seed data (if any)
|
||||
for check_data in seed_data.get('quality_checks', []):
|
||||
# Transform IDs
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
check_uuid = UUID(check_data['id'])
|
||||
transformed_id = transform_id(check_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse check UUID",
|
||||
check_id=check_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Found quality checks to clone",
|
||||
count=len(base_checks),
|
||||
base_tenant=str(base_uuid)
|
||||
)
|
||||
# Map batch_id if it exists in our map
|
||||
batch_id_value = check_data.get('batch_id')
|
||||
if batch_id_value:
|
||||
batch_id_value = batch_id_map.get(UUID(batch_id_value), UUID(batch_id_value))
|
||||
|
||||
for check in base_checks:
|
||||
new_batch_id = batch_id_map.get(check.batch_id, check.batch_id)
|
||||
new_template_id = template_id_map.get(check.template_id, check.template_id) if check.template_id else None
|
||||
# Map template_id if it exists
|
||||
template_id_value = check_data.get('template_id')
|
||||
if template_id_value:
|
||||
template_id_value = template_id_map.get(UUID(template_id_value), UUID(template_id_value))
|
||||
|
||||
# Adjust check time relative to session creation time
|
||||
adjusted_check_time = adjust_date_for_demo(
|
||||
check.check_time, session_time, BASE_REFERENCE_DATE
|
||||
) if check.check_time else None
|
||||
datetime.fromisoformat(check_data['check_time'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if check_data.get('check_time') else None
|
||||
|
||||
adjusted_created_at = adjust_date_for_demo(
|
||||
datetime.fromisoformat(check_data['created_at'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
)
|
||||
adjusted_updated_at = adjust_date_for_demo(
|
||||
datetime.fromisoformat(check_data['updated_at'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if check_data.get('updated_at') else adjusted_created_at
|
||||
|
||||
new_check = QualityCheck(
|
||||
id=uuid.uuid4(),
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
batch_id=new_batch_id,
|
||||
template_id=new_template_id,
|
||||
check_type=check.check_type,
|
||||
process_stage=check.process_stage,
|
||||
batch_id=str(batch_id_value) if batch_id_value else None,
|
||||
template_id=str(template_id_value) if template_id_value else None,
|
||||
check_type=check_data.get('check_type'),
|
||||
process_stage=check_data.get('process_stage'),
|
||||
check_time=adjusted_check_time,
|
||||
checker_id=check.checker_id,
|
||||
quality_score=check.quality_score,
|
||||
pass_fail=check.pass_fail,
|
||||
defect_count=check.defect_count,
|
||||
defect_types=check.defect_types,
|
||||
measured_weight=check.measured_weight,
|
||||
measured_temperature=check.measured_temperature,
|
||||
measured_moisture=check.measured_moisture,
|
||||
measured_dimensions=check.measured_dimensions,
|
||||
stage_specific_data=check.stage_specific_data,
|
||||
target_weight=check.target_weight,
|
||||
target_temperature=check.target_temperature,
|
||||
target_moisture=check.target_moisture,
|
||||
tolerance_percentage=check.tolerance_percentage,
|
||||
within_tolerance=check.within_tolerance,
|
||||
corrective_action_needed=check.corrective_action_needed,
|
||||
corrective_actions=check.corrective_actions,
|
||||
template_results=check.template_results,
|
||||
criteria_scores=check.criteria_scores,
|
||||
check_notes=check.check_notes,
|
||||
photos_urls=check.photos_urls,
|
||||
certificate_url=check.certificate_url,
|
||||
created_at=session_time,
|
||||
updated_at=session_time
|
||||
checker_id=check_data.get('checker_id'),
|
||||
quality_score=check_data.get('quality_score'),
|
||||
pass_fail=check_data.get('pass_fail'),
|
||||
defect_count=check_data.get('defect_count'),
|
||||
defect_types=check_data.get('defect_types'),
|
||||
measured_weight=check_data.get('measured_weight'),
|
||||
measured_temperature=check_data.get('measured_temperature'),
|
||||
measured_moisture=check_data.get('measured_moisture'),
|
||||
measured_dimensions=check_data.get('measured_dimensions'),
|
||||
stage_specific_data=check_data.get('stage_specific_data'),
|
||||
target_weight=check_data.get('target_weight'),
|
||||
target_temperature=check_data.get('target_temperature'),
|
||||
target_moisture=check_data.get('target_moisture'),
|
||||
tolerance_percentage=check_data.get('tolerance_percentage'),
|
||||
within_tolerance=check_data.get('within_tolerance'),
|
||||
corrective_action_needed=check_data.get('corrective_action_needed'),
|
||||
corrective_actions=check_data.get('corrective_actions'),
|
||||
template_results=check_data.get('template_results'),
|
||||
criteria_scores=check_data.get('criteria_scores'),
|
||||
check_notes=check_data.get('check_notes'),
|
||||
photos_urls=check_data.get('photos_urls'),
|
||||
certificate_url=check_data.get('certificate_url'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_check)
|
||||
stats["quality_checks"] += 1
|
||||
|
||||
# Clone Production Schedules
|
||||
result = await db.execute(
|
||||
select(ProductionSchedule).where(ProductionSchedule.tenant_id == base_uuid)
|
||||
)
|
||||
base_schedules = result.scalars().all()
|
||||
# Clone Production Schedules from seed data (if any)
|
||||
for schedule_data in seed_data.get('production_schedules', []):
|
||||
# Transform IDs
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
schedule_uuid = UUID(schedule_data['id'])
|
||||
transformed_id = transform_id(schedule_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse schedule UUID",
|
||||
schedule_id=schedule_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Found production schedules to clone",
|
||||
count=len(base_schedules),
|
||||
base_tenant=str(base_uuid)
|
||||
)
|
||||
|
||||
for schedule in base_schedules:
|
||||
# Adjust schedule dates relative to session creation time
|
||||
adjusted_schedule_date = adjust_date_for_demo(
|
||||
schedule.schedule_date, session_time, BASE_REFERENCE_DATE
|
||||
) if schedule.schedule_date else None
|
||||
datetime.fromisoformat(schedule_data['schedule_date'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if schedule_data.get('schedule_date') else None
|
||||
adjusted_shift_start = adjust_date_for_demo(
|
||||
schedule.shift_start, session_time, BASE_REFERENCE_DATE
|
||||
) if schedule.shift_start else None
|
||||
datetime.fromisoformat(schedule_data['shift_start'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if schedule_data.get('shift_start') else None
|
||||
adjusted_shift_end = adjust_date_for_demo(
|
||||
schedule.shift_end, session_time, BASE_REFERENCE_DATE
|
||||
) if schedule.shift_end else None
|
||||
datetime.fromisoformat(schedule_data['shift_end'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if schedule_data.get('shift_end') else None
|
||||
adjusted_finalized = adjust_date_for_demo(
|
||||
schedule.finalized_at, session_time, BASE_REFERENCE_DATE
|
||||
) if schedule.finalized_at else None
|
||||
datetime.fromisoformat(schedule_data['finalized_at'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if schedule_data.get('finalized_at') else None
|
||||
adjusted_created_at = adjust_date_for_demo(
|
||||
datetime.fromisoformat(schedule_data['created_at'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
)
|
||||
adjusted_updated_at = adjust_date_for_demo(
|
||||
datetime.fromisoformat(schedule_data['updated_at'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if schedule_data.get('updated_at') else adjusted_created_at
|
||||
|
||||
new_schedule = ProductionSchedule(
|
||||
id=uuid.uuid4(),
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
schedule_date=adjusted_schedule_date,
|
||||
shift_start=adjusted_shift_start,
|
||||
shift_end=adjusted_shift_end,
|
||||
total_capacity_hours=schedule.total_capacity_hours,
|
||||
planned_capacity_hours=schedule.planned_capacity_hours,
|
||||
actual_capacity_hours=schedule.actual_capacity_hours,
|
||||
overtime_hours=schedule.overtime_hours,
|
||||
staff_count=schedule.staff_count,
|
||||
equipment_capacity=schedule.equipment_capacity,
|
||||
station_assignments=schedule.station_assignments,
|
||||
total_batches_planned=schedule.total_batches_planned,
|
||||
total_batches_completed=schedule.total_batches_completed,
|
||||
total_quantity_planned=schedule.total_quantity_planned,
|
||||
total_quantity_produced=schedule.total_quantity_produced,
|
||||
is_finalized=schedule.is_finalized,
|
||||
is_active=schedule.is_active,
|
||||
efficiency_percentage=schedule.efficiency_percentage,
|
||||
utilization_percentage=schedule.utilization_percentage,
|
||||
on_time_completion_rate=schedule.on_time_completion_rate,
|
||||
schedule_notes=schedule.schedule_notes,
|
||||
schedule_adjustments=schedule.schedule_adjustments,
|
||||
created_at=session_time,
|
||||
updated_at=session_time,
|
||||
total_capacity_hours=schedule_data.get('total_capacity_hours'),
|
||||
planned_capacity_hours=schedule_data.get('planned_capacity_hours'),
|
||||
actual_capacity_hours=schedule_data.get('actual_capacity_hours'),
|
||||
overtime_hours=schedule_data.get('overtime_hours', 0.0),
|
||||
staff_count=schedule_data.get('staff_count'),
|
||||
equipment_capacity=schedule_data.get('equipment_capacity'),
|
||||
station_assignments=schedule_data.get('station_assignments'),
|
||||
total_batches_planned=schedule_data.get('total_batches_planned', 0),
|
||||
total_batches_completed=schedule_data.get('total_batches_completed', 0),
|
||||
total_quantity_planned=schedule_data.get('total_quantity_planned', 0.0),
|
||||
total_quantity_produced=schedule_data.get('total_quantity_produced', 0.0),
|
||||
is_finalized=schedule_data.get('is_finalized', False),
|
||||
is_active=schedule_data.get('is_active', True),
|
||||
efficiency_percentage=schedule_data.get('efficiency_percentage'),
|
||||
utilization_percentage=schedule_data.get('utilization_percentage'),
|
||||
on_time_completion_rate=schedule_data.get('on_time_completion_rate'),
|
||||
schedule_notes=schedule_data.get('schedule_notes'),
|
||||
schedule_adjustments=schedule_data.get('schedule_adjustments'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at,
|
||||
finalized_at=adjusted_finalized
|
||||
)
|
||||
db.add(new_schedule)
|
||||
stats["production_schedules"] += 1
|
||||
|
||||
# Clone Production Capacity
|
||||
result = await db.execute(
|
||||
select(ProductionCapacity).where(ProductionCapacity.tenant_id == base_uuid)
|
||||
)
|
||||
base_capacity = result.scalars().all()
|
||||
# Clone Production Capacity from seed data (if any)
|
||||
for capacity_data in seed_data.get('production_capacity', []):
|
||||
# Transform IDs
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
capacity_uuid = UUID(capacity_data['id'])
|
||||
transformed_id = transform_id(capacity_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse capacity UUID",
|
||||
capacity_id=capacity_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
for capacity in base_capacity:
|
||||
# Adjust capacity dates relative to session creation time
|
||||
adjusted_date = adjust_date_for_demo(
|
||||
capacity.date, session_time, BASE_REFERENCE_DATE
|
||||
) if capacity.date else None
|
||||
datetime.fromisoformat(capacity_data['date'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if capacity_data.get('date') else None
|
||||
adjusted_start_time = adjust_date_for_demo(
|
||||
capacity.start_time, session_time, BASE_REFERENCE_DATE
|
||||
) if capacity.start_time else None
|
||||
datetime.fromisoformat(capacity_data['start_time'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if capacity_data.get('start_time') else None
|
||||
adjusted_end_time = adjust_date_for_demo(
|
||||
capacity.end_time, session_time, BASE_REFERENCE_DATE
|
||||
) if capacity.end_time else None
|
||||
datetime.fromisoformat(capacity_data['end_time'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if capacity_data.get('end_time') else None
|
||||
adjusted_last_maintenance = adjust_date_for_demo(
|
||||
capacity.last_maintenance_date, session_time, BASE_REFERENCE_DATE
|
||||
) if capacity.last_maintenance_date else None
|
||||
datetime.fromisoformat(capacity_data['last_maintenance_date'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if capacity_data.get('last_maintenance_date') else None
|
||||
adjusted_created_at = adjust_date_for_demo(
|
||||
datetime.fromisoformat(capacity_data['created_at'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
)
|
||||
adjusted_updated_at = adjust_date_for_demo(
|
||||
datetime.fromisoformat(capacity_data['updated_at'].replace('Z', '+00:00')),
|
||||
session_time,
|
||||
BASE_REFERENCE_DATE
|
||||
) if capacity_data.get('updated_at') else adjusted_created_at
|
||||
|
||||
new_capacity = ProductionCapacity(
|
||||
id=uuid.uuid4(),
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
resource_type=capacity.resource_type,
|
||||
resource_id=capacity.resource_id,
|
||||
resource_name=capacity.resource_name,
|
||||
resource_type=capacity_data.get('resource_type'),
|
||||
resource_id=capacity_data.get('resource_id'),
|
||||
resource_name=capacity_data.get('resource_name'),
|
||||
date=adjusted_date,
|
||||
start_time=adjusted_start_time,
|
||||
end_time=adjusted_end_time,
|
||||
total_capacity_units=capacity.total_capacity_units,
|
||||
allocated_capacity_units=capacity.allocated_capacity_units,
|
||||
remaining_capacity_units=capacity.remaining_capacity_units,
|
||||
is_available=capacity.is_available,
|
||||
is_maintenance=capacity.is_maintenance,
|
||||
is_reserved=capacity.is_reserved,
|
||||
equipment_type=capacity.equipment_type,
|
||||
max_batch_size=capacity.max_batch_size,
|
||||
min_batch_size=capacity.min_batch_size,
|
||||
setup_time_minutes=capacity.setup_time_minutes,
|
||||
cleanup_time_minutes=capacity.cleanup_time_minutes,
|
||||
efficiency_rating=capacity.efficiency_rating,
|
||||
maintenance_status=capacity.maintenance_status,
|
||||
total_capacity_units=capacity_data.get('total_capacity_units'),
|
||||
allocated_capacity_units=capacity_data.get('allocated_capacity_units'),
|
||||
remaining_capacity_units=capacity_data.get('remaining_capacity_units'),
|
||||
is_available=capacity_data.get('is_available'),
|
||||
is_maintenance=capacity_data.get('is_maintenance'),
|
||||
is_reserved=capacity_data.get('is_reserved'),
|
||||
equipment_type=capacity_data.get('equipment_type'),
|
||||
max_batch_size=capacity_data.get('max_batch_size'),
|
||||
min_batch_size=capacity_data.get('min_batch_size'),
|
||||
setup_time_minutes=capacity_data.get('setup_time_minutes'),
|
||||
cleanup_time_minutes=capacity_data.get('cleanup_time_minutes'),
|
||||
efficiency_rating=capacity_data.get('efficiency_rating'),
|
||||
maintenance_status=capacity_data.get('maintenance_status'),
|
||||
last_maintenance_date=adjusted_last_maintenance,
|
||||
notes=capacity.notes,
|
||||
restrictions=capacity.restrictions,
|
||||
created_at=session_time,
|
||||
updated_at=session_time
|
||||
notes=capacity_data.get('notes'),
|
||||
restrictions=capacity_data.get('restrictions'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_capacity)
|
||||
stats["production_capacity"] += 1
|
||||
@@ -477,7 +633,7 @@ async def clone_demo_data(
|
||||
stats["alerts_generated"] = 0
|
||||
|
||||
# Calculate total from non-alert stats
|
||||
total_records = (stats["equipment"] + stats["production_batches"] + stats["production_schedules"] +
|
||||
total_records = (stats["equipment"] + stats["batches"] + stats["production_schedules"] +
|
||||
stats["quality_check_templates"] + stats["quality_checks"] +
|
||||
stats["production_capacity"])
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
@@ -237,9 +237,8 @@ async def trigger_yield_prediction(
|
||||
logger.error(error_msg, exc_info=True)
|
||||
errors.append(error_msg)
|
||||
|
||||
# Close orchestrator and clients
|
||||
# Close orchestrator
|
||||
await orchestrator.close()
|
||||
await recipes_client.close()
|
||||
|
||||
# Build response
|
||||
response = YieldPredictionResponse(
|
||||
@@ -286,3 +285,89 @@ async def ml_insights_health():
|
||||
"POST /ml/insights/predict-yields"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# ================================================================
|
||||
# INTERNAL ENDPOINTS (for demo-session service)
|
||||
# ================================================================
|
||||
|
||||
from fastapi import Request
|
||||
|
||||
# Create a separate router for internal endpoints to avoid the tenant prefix
|
||||
internal_router = APIRouter(
|
||||
tags=["ML Insights - Internal"]
|
||||
)
|
||||
|
||||
|
||||
@internal_router.post("/api/v1/tenants/{tenant_id}/production/internal/ml/generate-yield-insights")
|
||||
async def generate_yield_insights_internal(
|
||||
tenant_id: str,
|
||||
request: Request,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Internal endpoint to trigger yield insights generation for demo sessions.
|
||||
|
||||
This endpoint is called by the demo-session service after cloning data.
|
||||
It uses the same ML logic as the public endpoint but with optimized defaults.
|
||||
|
||||
Security: Protected by X-Internal-Service header check.
|
||||
|
||||
Args:
|
||||
tenant_id: The tenant UUID
|
||||
request: FastAPI request object
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
{
|
||||
"insights_posted": int,
|
||||
"tenant_id": str,
|
||||
"status": str
|
||||
}
|
||||
"""
|
||||
# Verify internal service header
|
||||
if not request or request.headers.get("X-Internal-Service") not in ["demo-session", "internal"]:
|
||||
logger.warning("Unauthorized internal API call", tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="This endpoint is for internal service use only"
|
||||
)
|
||||
|
||||
logger.info("Internal yield insights generation triggered", tenant_id=tenant_id)
|
||||
|
||||
try:
|
||||
# Use the existing yield prediction logic with sensible defaults
|
||||
request_data = YieldPredictionRequest(
|
||||
recipe_ids=None, # Analyze all recipes
|
||||
lookback_days=90, # 3 months of history
|
||||
min_history_runs=20 # Minimum 20 production runs required
|
||||
)
|
||||
|
||||
# Call the existing yield prediction endpoint logic
|
||||
result = await trigger_yield_prediction(
|
||||
tenant_id=tenant_id,
|
||||
request_data=request_data,
|
||||
db=db
|
||||
)
|
||||
|
||||
# Return simplified response for internal use
|
||||
return {
|
||||
"insights_posted": result.total_insights_posted,
|
||||
"tenant_id": tenant_id,
|
||||
"status": "success" if result.success else "failed",
|
||||
"message": result.message,
|
||||
"recipes_analyzed": result.recipes_analyzed,
|
||||
"recipes_with_issues": result.recipes_with_issues
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Internal yield insights generation failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Internal yield insights generation failed: {str(e)}"
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ Production Orchestrator API - Endpoints for orchestrated production scheduling
|
||||
Called by the Orchestrator Service to generate production schedules from forecast data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Request
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import date
|
||||
from uuid import UUID
|
||||
@@ -23,10 +23,11 @@ route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-orchestrator"])
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
# ================================================================
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Production Batches API - ATOMIC CRUD operations on ProductionBatch model
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from datetime import date
|
||||
from uuid import UUID
|
||||
@@ -26,8 +26,19 @@ from app.schemas.production import (
|
||||
)
|
||||
from app.core.config import settings
|
||||
from app.utils.cache import get_cached, set_cached, make_cache_key
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
async def get_production_alert_service(request: Request) -> ProductionAlertService:
|
||||
"""Dependency injection for production alert service"""
|
||||
# Get the alert service from app state, which is where it's stored during app startup
|
||||
alert_service = getattr(request.app.state, 'production_alert_service', None)
|
||||
if not alert_service:
|
||||
logger.warning("Production alert service not available in app state")
|
||||
return None
|
||||
return alert_service
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-batches"])
|
||||
|
||||
@@ -35,10 +46,11 @@ router = APIRouter(tags=["production-batches"])
|
||||
audit_logger = create_audit_logger("production-service", AuditLog)
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
@@ -108,12 +120,60 @@ async def create_production_batch(
|
||||
batch_data: ProductionBatchCreate,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
request: Request = None,
|
||||
alert_service: ProductionAlertService = Depends(get_production_alert_service)
|
||||
):
|
||||
"""Create a new production batch"""
|
||||
try:
|
||||
batch = await production_service.create_production_batch(tenant_id, batch_data)
|
||||
|
||||
# Trigger Start Production alert
|
||||
if alert_service:
|
||||
try:
|
||||
# Generate reasoning data for the batch
|
||||
reasoning_data = {
|
||||
"type": "manual_creation",
|
||||
"parameters": {
|
||||
"product_name": batch.product_name,
|
||||
"planned_quantity": batch.planned_quantity,
|
||||
"priority": batch.priority.value if batch.priority else "MEDIUM"
|
||||
},
|
||||
"urgency": {
|
||||
"level": "normal",
|
||||
"ready_by_time": batch.planned_start_time.strftime('%H:%M') if batch.planned_start_time else "unknown"
|
||||
},
|
||||
"metadata": {
|
||||
"trigger_source": "manual_creation",
|
||||
"created_by": current_user.get("user_id", "unknown"),
|
||||
"is_ai_assisted": False
|
||||
}
|
||||
}
|
||||
|
||||
# Update batch with reasoning data
|
||||
from app.core.database import get_db
|
||||
db = next(get_db())
|
||||
batch.reasoning_data = reasoning_data
|
||||
await db.commit()
|
||||
|
||||
# Emit Start Production alert
|
||||
await alert_service.emit_start_production_alert(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch.id,
|
||||
product_name=batch.product_name,
|
||||
batch_number=batch.batch_number,
|
||||
reasoning_data=reasoning_data,
|
||||
planned_start_time=batch.planned_start_time.isoformat() if batch.planned_start_time else None
|
||||
)
|
||||
|
||||
logger.info("Start Production alert triggered for batch",
|
||||
batch_id=str(batch.id), tenant_id=str(tenant_id))
|
||||
|
||||
except Exception as alert_error:
|
||||
logger.error("Failed to trigger Start Production alert",
|
||||
error=str(alert_error), batch_id=str(batch.id))
|
||||
# Don't fail the batch creation if alert fails
|
||||
|
||||
logger.info("Created production batch",
|
||||
batch_id=str(batch.id), tenant_id=str(tenant_id))
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Production Dashboard API - Dashboard endpoints for production overview
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from datetime import date, datetime
|
||||
from uuid import UUID
|
||||
@@ -21,10 +21,11 @@ route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-dashboard"])
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
|
||||
@@ -25,10 +25,11 @@ route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-operations"])
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
# ===== BATCH OPERATIONS =====
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Production Schedules API - ATOMIC CRUD operations on ProductionSchedule model
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from datetime import date, datetime, timedelta
|
||||
from uuid import UUID
|
||||
@@ -31,10 +31,11 @@ router = APIRouter(tags=["production-schedules"])
|
||||
audit_logger = create_audit_logger("production-service", AuditLog)
|
||||
|
||||
|
||||
def get_production_service() -> ProductionService:
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
return ProductionService(database_manager, settings)
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
|
||||
@@ -12,10 +12,13 @@ from sqlalchemy import text
|
||||
from app.core.config import settings
|
||||
from app.core.database import database_manager
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
from app.services.production_scheduler import ProductionScheduler
|
||||
from app.services.production_notification_service import ProductionNotificationService
|
||||
from shared.service_base import StandardFastAPIService
|
||||
|
||||
# Import standardized routers
|
||||
from app.api import (
|
||||
internal_demo,
|
||||
production_batches,
|
||||
production_schedules,
|
||||
production_operations,
|
||||
@@ -23,7 +26,6 @@ from app.api import (
|
||||
analytics,
|
||||
quality_templates,
|
||||
equipment,
|
||||
internal_demo,
|
||||
orchestrator, # NEW: Orchestrator integration endpoint
|
||||
production_orders_operations, # Tenant deletion endpoints
|
||||
audit,
|
||||
@@ -65,6 +67,7 @@ class ProductionService(StandardFastAPIService):
|
||||
]
|
||||
|
||||
self.alert_service = None
|
||||
self.notification_service = None
|
||||
self.rabbitmq_client = None
|
||||
self.event_publisher = None
|
||||
# REMOVED: scheduler_service (replaced by Orchestrator Service)
|
||||
@@ -124,20 +127,28 @@ class ProductionService(StandardFastAPIService):
|
||||
await self.alert_service.start()
|
||||
self.logger.info("Production alert service started")
|
||||
|
||||
# Store services in app state
|
||||
app.state.alert_service = self.alert_service
|
||||
app.state.production_alert_service = self.alert_service # Also store with this name for internal trigger
|
||||
# Initialize notification service with EventPublisher
|
||||
self.notification_service = ProductionNotificationService(self.event_publisher)
|
||||
self.logger.info("Production notification service initialized")
|
||||
|
||||
# REMOVED: Production scheduler service initialization
|
||||
# Scheduling is now handled by the Orchestrator Service
|
||||
# which calls our /generate-schedule endpoint
|
||||
# Initialize production scheduler with alert service and database manager
|
||||
self.production_scheduler = ProductionScheduler(self.alert_service, self.database_manager)
|
||||
await self.production_scheduler.start()
|
||||
self.logger.info("Production scheduler started")
|
||||
|
||||
# Store services in app state
|
||||
app.state.alert_service = self.alert_service
|
||||
app.state.production_alert_service = self.alert_service # Also store with this name for internal trigger
|
||||
app.state.notification_service = self.notification_service # Notification service for state change events
|
||||
app.state.production_scheduler = self.production_scheduler # Store scheduler for manual triggering
|
||||
|
||||
async def on_shutdown(self, app: FastAPI):
|
||||
"""Custom shutdown logic for production service"""
|
||||
# Stop production scheduler
|
||||
if hasattr(self, 'production_scheduler') and self.production_scheduler:
|
||||
await self.production_scheduler.stop()
|
||||
self.logger.info("Production scheduler stopped")
|
||||
|
||||
# Stop alert service
|
||||
if self.alert_service:
|
||||
await self.alert_service.stop()
|
||||
@@ -203,8 +214,9 @@ service.add_router(production_schedules.router)
|
||||
service.add_router(production_operations.router)
|
||||
service.add_router(production_dashboard.router)
|
||||
service.add_router(analytics.router)
|
||||
service.add_router(internal_demo.router)
|
||||
service.add_router(internal_demo.router, tags=["internal-demo"])
|
||||
service.add_router(ml_insights.router) # ML insights endpoint
|
||||
service.add_router(ml_insights.internal_router) # Internal ML insights endpoint for demo cloning
|
||||
service.add_router(internal_alert_trigger_router) # Internal alert trigger for demo cloning
|
||||
|
||||
# REMOVED: test_production_scheduler endpoint
|
||||
@@ -218,4 +230,4 @@ if __name__ == "__main__":
|
||||
host="0.0.0.0",
|
||||
port=8000,
|
||||
reload=settings.DEBUG
|
||||
)
|
||||
)
|
||||
|
||||
@@ -38,10 +38,10 @@ class ProductionPriority(str, enum.Enum):
|
||||
|
||||
class EquipmentStatus(str, enum.Enum):
|
||||
"""Equipment status enumeration"""
|
||||
OPERATIONAL = "operational"
|
||||
MAINTENANCE = "maintenance"
|
||||
DOWN = "down"
|
||||
WARNING = "warning"
|
||||
OPERATIONAL = "OPERATIONAL"
|
||||
MAINTENANCE = "MAINTENANCE"
|
||||
DOWN = "DOWN"
|
||||
WARNING = "WARNING"
|
||||
|
||||
|
||||
class ProcessStage(str, enum.Enum):
|
||||
|
||||
@@ -9,10 +9,12 @@ from .production_batch_repository import ProductionBatchRepository
|
||||
from .production_schedule_repository import ProductionScheduleRepository
|
||||
from .production_capacity_repository import ProductionCapacityRepository
|
||||
from .quality_check_repository import QualityCheckRepository
|
||||
from .equipment_repository import EquipmentRepository
|
||||
|
||||
__all__ = [
|
||||
"ProductionBatchRepository",
|
||||
"ProductionScheduleRepository",
|
||||
"ProductionCapacityRepository",
|
||||
"QualityCheckRepository",
|
||||
"EquipmentRepository",
|
||||
]
|
||||
@@ -3,7 +3,7 @@ Equipment Repository
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy import select, func, and_
|
||||
from sqlalchemy import select, func, and_, text
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
@@ -219,3 +219,168 @@ class EquipmentRepository(ProductionBaseRepository):
|
||||
equipment_id=str(equipment_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
# ================================================================
|
||||
# ALERT-RELATED METHODS (migrated from production_alert_repository)
|
||||
# ================================================================
|
||||
|
||||
async def get_equipment_status(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get equipment requiring attention.
|
||||
Returns equipment with maintenance due or status issues.
|
||||
"""
|
||||
try:
|
||||
|
||||
query = text("""
|
||||
SELECT
|
||||
e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days,
|
||||
EXTRACT(DAYS FROM (e.next_maintenance_date - NOW())) as days_to_maintenance,
|
||||
COUNT(ea.id) as active_alerts
|
||||
FROM equipment e
|
||||
LEFT JOIN alerts ea ON ea.equipment_id = e.id
|
||||
AND ea.is_active = true
|
||||
AND ea.is_resolved = false
|
||||
WHERE e.is_active = true
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days
|
||||
ORDER BY e.next_maintenance_date ASC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get equipment status", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_equipment_needing_maintenance(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get equipment that needs maintenance.
|
||||
Returns equipment where next_maintenance_date has passed.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to filter by
|
||||
"""
|
||||
try:
|
||||
|
||||
query_str = """
|
||||
SELECT
|
||||
e.id, e.name, e.type, e.tenant_id,
|
||||
e.last_maintenance_date,
|
||||
e.next_maintenance_date,
|
||||
EXTRACT(DAY FROM (NOW() - e.next_maintenance_date)) as days_overdue
|
||||
FROM equipment e
|
||||
WHERE e.next_maintenance_date IS NOT NULL
|
||||
AND e.next_maintenance_date < NOW()
|
||||
AND e.status = 'OPERATIONAL'
|
||||
AND e.is_active = true
|
||||
"""
|
||||
|
||||
params = {}
|
||||
if tenant_id:
|
||||
query_str += " AND e.tenant_id = :tenant_id"
|
||||
params["tenant_id"] = tenant_id
|
||||
|
||||
query_str += " ORDER BY e.next_maintenance_date ASC LIMIT 50"
|
||||
|
||||
result = await self.session.execute(text(query_str), params)
|
||||
rows = result.fetchall()
|
||||
|
||||
return [
|
||||
{
|
||||
'id': str(row.id),
|
||||
'name': row.name,
|
||||
'type': row.type,
|
||||
'tenant_id': str(row.tenant_id),
|
||||
'last_maintenance_date': row.last_maintenance_date.isoformat() if row.last_maintenance_date else None,
|
||||
'next_maintenance_date': row.next_maintenance_date.isoformat() if row.next_maintenance_date else None,
|
||||
'days_overdue': int(row.days_overdue) if row.days_overdue else 0
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get equipment needing maintenance", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_efficiency_recommendations(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production efficiency improvement recommendations.
|
||||
Analyzes production patterns to identify optimization opportunities.
|
||||
"""
|
||||
try:
|
||||
|
||||
query = text("""
|
||||
WITH efficiency_analysis AS (
|
||||
SELECT
|
||||
pb.tenant_id, pb.product_name,
|
||||
AVG(EXTRACT(EPOCH FROM (pb.actual_end_time - pb.actual_start_time)) / 60) as avg_production_time,
|
||||
AVG(pb.planned_duration_minutes) as avg_planned_duration,
|
||||
COUNT(*) as batch_count,
|
||||
AVG(pb.yield_percentage) as avg_yield,
|
||||
EXTRACT(hour FROM pb.actual_start_time) as start_hour
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'COMPLETED'
|
||||
AND pb.actual_completion_time > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND pb.tenant_id = :tenant_id
|
||||
GROUP BY pb.tenant_id, pb.product_name, EXTRACT(hour FROM pb.actual_start_time)
|
||||
HAVING COUNT(*) >= 3
|
||||
),
|
||||
recommendations AS (
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN avg_production_time > avg_planned_duration * 1.2 THEN 'reduce_production_time'
|
||||
WHEN avg_yield < 85 THEN 'improve_yield'
|
||||
WHEN start_hour BETWEEN 14 AND 16 AND avg_production_time > avg_planned_duration * 1.1 THEN 'avoid_afternoon_production'
|
||||
ELSE null
|
||||
END as recommendation_type,
|
||||
(avg_production_time - avg_planned_duration) / avg_planned_duration * 100 as efficiency_loss_percent
|
||||
FROM efficiency_analysis
|
||||
)
|
||||
SELECT * FROM recommendations
|
||||
WHERE recommendation_type IS NOT NULL
|
||||
AND efficiency_loss_percent > 10
|
||||
ORDER BY efficiency_loss_percent DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get efficiency recommendations", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_energy_consumption_patterns(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get energy consumption patterns for optimization analysis.
|
||||
Returns consumption by equipment and hour of day.
|
||||
"""
|
||||
try:
|
||||
|
||||
query = text("""
|
||||
SELECT
|
||||
e.tenant_id, e.name as equipment_name, e.type,
|
||||
AVG(ec.energy_consumption_kwh) as avg_energy,
|
||||
EXTRACT(hour FROM ec.recorded_at) as hour_of_day,
|
||||
COUNT(*) as readings_count
|
||||
FROM equipment e
|
||||
JOIN energy_consumption ec ON ec.equipment_id = e.id
|
||||
WHERE ec.recorded_at > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.tenant_id, e.id, e.name, e.type, EXTRACT(hour FROM ec.recorded_at)
|
||||
HAVING COUNT(*) >= 10
|
||||
ORDER BY avg_energy DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get energy consumption patterns", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
@@ -1,279 +0,0 @@
|
||||
# services/production/app/repositories/production_alert_repository.py
|
||||
"""
|
||||
Production Alert Repository
|
||||
Data access layer for production-specific alert detection and analysis
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any
|
||||
from uuid import UUID
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionAlertRepository:
|
||||
"""Repository for production alert data access"""
|
||||
|
||||
def __init__(self, session: AsyncSession):
|
||||
self.session = session
|
||||
|
||||
async def get_capacity_issues(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production capacity overload issues
|
||||
Returns batches that exceed daily capacity thresholds
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
pb.tenant_id,
|
||||
DATE(pb.planned_start_time) as planned_date,
|
||||
COUNT(*) as batch_count,
|
||||
SUM(pb.planned_quantity) as total_planned,
|
||||
'capacity_check' as capacity_status,
|
||||
100.0 as capacity_percentage
|
||||
FROM production_batches pb
|
||||
WHERE pb.planned_start_time >= CURRENT_DATE
|
||||
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
|
||||
AND pb.status IN ('PENDING', 'IN_PROGRESS')
|
||||
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
|
||||
HAVING COUNT(*) > 10
|
||||
ORDER BY total_planned DESC
|
||||
LIMIT 20
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get capacity issues", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_production_delays(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production batches that are delayed
|
||||
Returns batches in progress past their planned end time
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
|
||||
pb.planned_end_time as planned_completion_time, pb.actual_start_time,
|
||||
pb.actual_end_time as estimated_completion_time, pb.status,
|
||||
EXTRACT(minutes FROM (NOW() - pb.planned_end_time)) as delay_minutes,
|
||||
COALESCE(pb.priority::text, 'medium') as priority_level,
|
||||
1 as affected_orders
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'IN_PROGRESS'
|
||||
AND pb.planned_end_time < NOW()
|
||||
AND pb.planned_end_time > NOW() - INTERVAL '24 hours'
|
||||
ORDER BY
|
||||
CASE COALESCE(pb.priority::text, 'MEDIUM')
|
||||
WHEN 'URGENT' THEN 1 WHEN 'HIGH' THEN 2 ELSE 3
|
||||
END,
|
||||
delay_minutes DESC
|
||||
LIMIT 50
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get production delays", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_quality_issues(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get quality control failures
|
||||
Returns quality checks that failed within recent hours
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
qc.id, qc.tenant_id, qc.batch_id, qc.check_type,
|
||||
qc.quality_score, qc.within_tolerance,
|
||||
qc.pass_fail, qc.defect_count,
|
||||
qc.check_notes as qc_severity,
|
||||
1 as total_failures,
|
||||
pb.product_name, pb.batch_number,
|
||||
qc.created_at,
|
||||
qc.process_stage
|
||||
FROM quality_checks qc
|
||||
JOIN production_batches pb ON pb.id = qc.batch_id
|
||||
WHERE qc.pass_fail = false
|
||||
AND qc.created_at > NOW() - INTERVAL '4 hours'
|
||||
AND qc.corrective_action_needed = true
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 1
|
||||
WHEN qc.pass_fail = false THEN 2
|
||||
ELSE 3
|
||||
END,
|
||||
qc.created_at DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality issues", error=str(e))
|
||||
raise
|
||||
|
||||
async def mark_quality_check_acknowledged(self, quality_check_id: UUID) -> None:
|
||||
"""
|
||||
Mark a quality check as acknowledged to avoid duplicate alerts
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
UPDATE quality_checks
|
||||
SET acknowledged = true
|
||||
WHERE id = :id
|
||||
""")
|
||||
|
||||
await self.session.execute(query, {"id": quality_check_id})
|
||||
await self.session.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to mark quality check acknowledged", error=str(e), qc_id=str(quality_check_id))
|
||||
raise
|
||||
|
||||
async def get_equipment_status(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get equipment requiring attention
|
||||
Returns equipment with maintenance due or status issues
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days,
|
||||
EXTRACT(DAYS FROM (e.next_maintenance_date - NOW())) as days_to_maintenance,
|
||||
COUNT(ea.id) as active_alerts
|
||||
FROM equipment e
|
||||
LEFT JOIN alerts ea ON ea.equipment_id = e.id
|
||||
AND ea.is_active = true
|
||||
AND ea.is_resolved = false
|
||||
WHERE e.is_active = true
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days
|
||||
ORDER BY e.next_maintenance_date ASC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get equipment status", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_efficiency_recommendations(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production efficiency improvement recommendations
|
||||
Analyzes production patterns to identify optimization opportunities
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
WITH efficiency_analysis AS (
|
||||
SELECT
|
||||
pb.tenant_id, pb.product_name,
|
||||
AVG(EXTRACT(EPOCH FROM (pb.actual_end_time - pb.actual_start_time)) / 60) as avg_production_time,
|
||||
AVG(pb.planned_duration_minutes) as avg_planned_duration,
|
||||
COUNT(*) as batch_count,
|
||||
AVG(pb.yield_percentage) as avg_yield,
|
||||
EXTRACT(hour FROM pb.actual_start_time) as start_hour
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'COMPLETED'
|
||||
AND pb.actual_completion_time > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND pb.tenant_id = :tenant_id
|
||||
GROUP BY pb.tenant_id, pb.product_name, EXTRACT(hour FROM pb.actual_start_time)
|
||||
HAVING COUNT(*) >= 3
|
||||
),
|
||||
recommendations AS (
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN avg_production_time > avg_planned_duration * 1.2 THEN 'reduce_production_time'
|
||||
WHEN avg_yield < 85 THEN 'improve_yield'
|
||||
WHEN start_hour BETWEEN 14 AND 16 AND avg_production_time > avg_planned_duration * 1.1 THEN 'avoid_afternoon_production'
|
||||
ELSE null
|
||||
END as recommendation_type,
|
||||
(avg_production_time - avg_planned_duration) / avg_planned_duration * 100 as efficiency_loss_percent
|
||||
FROM efficiency_analysis
|
||||
)
|
||||
SELECT * FROM recommendations
|
||||
WHERE recommendation_type IS NOT NULL
|
||||
AND efficiency_loss_percent > 10
|
||||
ORDER BY efficiency_loss_percent DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get efficiency recommendations", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_energy_consumption_patterns(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get energy consumption patterns for optimization analysis
|
||||
Returns consumption by equipment and hour of day
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
e.tenant_id, e.name as equipment_name, e.type,
|
||||
AVG(ec.energy_consumption_kwh) as avg_energy,
|
||||
EXTRACT(hour FROM ec.recorded_at) as hour_of_day,
|
||||
COUNT(*) as readings_count
|
||||
FROM equipment e
|
||||
JOIN energy_consumption ec ON ec.equipment_id = e.id
|
||||
WHERE ec.recorded_at > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.tenant_id, e.id, e.name, e.type, EXTRACT(hour FROM ec.recorded_at)
|
||||
HAVING COUNT(*) >= 10
|
||||
ORDER BY avg_energy DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get energy consumption patterns", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_affected_production_batches(self, ingredient_id: str) -> List[str]:
|
||||
"""
|
||||
Get production batches affected by ingredient shortage
|
||||
Returns batch IDs that use the specified ingredient
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT DISTINCT pb.id
|
||||
FROM production_batches pb
|
||||
JOIN recipe_ingredients ri ON ri.recipe_id = pb.recipe_id
|
||||
WHERE ri.ingredient_id = :ingredient_id
|
||||
AND pb.status = 'IN_PROGRESS'
|
||||
AND pb.planned_completion_time > NOW()
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"ingredient_id": ingredient_id})
|
||||
return [str(row.id) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get affected production batches", error=str(e), ingredient_id=ingredient_id)
|
||||
raise
|
||||
|
||||
async def set_statement_timeout(self, timeout: str = '30s') -> None:
|
||||
"""
|
||||
Set PostgreSQL statement timeout for the current session
|
||||
"""
|
||||
try:
|
||||
await self.session.execute(text(f"SET statement_timeout = '{timeout}'"))
|
||||
except Exception as e:
|
||||
logger.error("Failed to set statement timeout", error=str(e))
|
||||
raise
|
||||
@@ -850,3 +850,162 @@ class ProductionBatchRepository(ProductionBaseRepository, BatchCountProvider):
|
||||
except Exception as e:
|
||||
logger.error("Error calculating baseline metrics", error=str(e), tenant_id=str(tenant_id))
|
||||
raise DatabaseError(f"Failed to calculate baseline metrics: {str(e)}")
|
||||
|
||||
# ================================================================
|
||||
# ALERT-RELATED METHODS (migrated from production_alert_repository)
|
||||
# ================================================================
|
||||
|
||||
async def get_capacity_issues(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production capacity overload issues.
|
||||
Returns batches that exceed daily capacity thresholds.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to filter by
|
||||
"""
|
||||
try:
|
||||
query_str = """
|
||||
SELECT
|
||||
pb.tenant_id,
|
||||
DATE(pb.planned_start_time) as planned_date,
|
||||
COUNT(*) as batch_count,
|
||||
SUM(pb.planned_quantity) as total_planned,
|
||||
'capacity_check' as capacity_status,
|
||||
100.0 as capacity_percentage
|
||||
FROM production_batches pb
|
||||
WHERE pb.planned_start_time >= CURRENT_DATE
|
||||
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
|
||||
AND pb.status IN ('PENDING', 'IN_PROGRESS')
|
||||
"""
|
||||
|
||||
params = {}
|
||||
if tenant_id:
|
||||
query_str += " AND pb.tenant_id = :tenant_id"
|
||||
params["tenant_id"] = tenant_id
|
||||
|
||||
query_str += """
|
||||
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
|
||||
HAVING COUNT(*) > 10
|
||||
ORDER BY total_planned DESC
|
||||
LIMIT 20
|
||||
"""
|
||||
|
||||
result = await self.session.execute(text(query_str), params)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get capacity issues", error=str(e))
|
||||
raise DatabaseError(f"Failed to get capacity issues: {str(e)}")
|
||||
|
||||
async def get_production_delays(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production batches that are delayed.
|
||||
Returns batches in progress past their planned end time.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to filter by
|
||||
"""
|
||||
try:
|
||||
query_str = """
|
||||
SELECT
|
||||
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
|
||||
pb.planned_end_time as planned_completion_time, pb.actual_start_time,
|
||||
pb.actual_end_time as estimated_completion_time, pb.status,
|
||||
EXTRACT(minutes FROM (NOW() - pb.planned_end_time)) as delay_minutes,
|
||||
COALESCE(pb.priority::text, 'medium') as priority_level,
|
||||
1 as affected_orders
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'IN_PROGRESS'
|
||||
AND pb.planned_end_time < NOW()
|
||||
AND pb.planned_end_time > NOW() - INTERVAL '24 hours'
|
||||
"""
|
||||
|
||||
params = {}
|
||||
if tenant_id:
|
||||
query_str += " AND pb.tenant_id = :tenant_id"
|
||||
params["tenant_id"] = tenant_id
|
||||
|
||||
query_str += """
|
||||
ORDER BY
|
||||
CASE COALESCE(pb.priority::text, 'MEDIUM')
|
||||
WHEN 'URGENT' THEN 1 WHEN 'HIGH' THEN 2 ELSE 3
|
||||
END,
|
||||
delay_minutes DESC
|
||||
LIMIT 50
|
||||
"""
|
||||
|
||||
result = await self.session.execute(text(query_str), params)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get production delays", error=str(e))
|
||||
raise DatabaseError(f"Failed to get production delays: {str(e)}")
|
||||
|
||||
async def get_batches_with_delayed_start(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get batches that should have started but haven't.
|
||||
Returns PENDING batches past their planned start time (with 30 min grace period).
|
||||
Only returns batches planned for TODAY to avoid alerting on old batches.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to filter by
|
||||
"""
|
||||
try:
|
||||
query_str = """
|
||||
SELECT
|
||||
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
|
||||
pb.planned_start_time as scheduled_start_time, pb.status
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'PENDING'
|
||||
AND pb.planned_start_time < NOW() - INTERVAL '30 minutes'
|
||||
AND pb.actual_start_time IS NULL
|
||||
AND pb.planned_start_time >= CURRENT_DATE
|
||||
AND pb.planned_start_time < CURRENT_DATE + INTERVAL '1 day'
|
||||
"""
|
||||
|
||||
params = {}
|
||||
if tenant_id:
|
||||
query_str += " AND pb.tenant_id = :tenant_id"
|
||||
params["tenant_id"] = tenant_id
|
||||
|
||||
query_str += " ORDER BY pb.planned_start_time ASC LIMIT 50"
|
||||
|
||||
result = await self.session.execute(text(query_str), params)
|
||||
rows = result.fetchall()
|
||||
|
||||
return [
|
||||
{
|
||||
'id': str(row.id),
|
||||
'tenant_id': str(row.tenant_id),
|
||||
'product_name': row.product_name,
|
||||
'batch_number': row.batch_number,
|
||||
'scheduled_start_time': row.scheduled_start_time.isoformat() if row.scheduled_start_time else None
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get batches with delayed start", error=str(e))
|
||||
raise DatabaseError(f"Failed to get batches with delayed start: {str(e)}")
|
||||
|
||||
async def get_affected_production_batches(self, ingredient_id: str) -> List[str]:
|
||||
"""
|
||||
Get production batches affected by ingredient shortage.
|
||||
Returns batch IDs that use the specified ingredient.
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT DISTINCT pb.id
|
||||
FROM production_batches pb
|
||||
JOIN recipe_ingredients ri ON ri.recipe_id = pb.recipe_id
|
||||
WHERE ri.ingredient_id = :ingredient_id
|
||||
AND pb.status = 'IN_PROGRESS'
|
||||
AND pb.planned_completion_time > NOW()
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"ingredient_id": ingredient_id})
|
||||
return [str(row.id) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get affected production batches", error=str(e), ingredient_id=ingredient_id)
|
||||
raise DatabaseError(f"Failed to get affected production batches: {str(e)}")
|
||||
|
||||
@@ -366,4 +366,76 @@ class QualityCheckRepository(ProductionBaseRepository):
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching quality checks with filters", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch quality checks with filters: {str(e)}")
|
||||
raise DatabaseError(f"Failed to fetch quality checks with filters: {str(e)}")
|
||||
|
||||
# ================================================================
|
||||
# ALERT-RELATED METHODS (migrated from production_alert_repository)
|
||||
# ================================================================
|
||||
|
||||
async def get_quality_issues(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get quality control failures.
|
||||
Returns quality checks that failed within recent hours.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to filter by
|
||||
"""
|
||||
try:
|
||||
from app.models.production import ProductionBatch
|
||||
|
||||
query_str = """
|
||||
SELECT
|
||||
qc.id, qc.tenant_id, qc.batch_id, qc.check_type,
|
||||
qc.quality_score, qc.within_tolerance,
|
||||
qc.pass_fail, qc.defect_count,
|
||||
qc.check_notes as qc_severity,
|
||||
1 as total_failures,
|
||||
pb.product_name, pb.batch_number,
|
||||
qc.created_at,
|
||||
qc.process_stage
|
||||
FROM quality_checks qc
|
||||
JOIN production_batches pb ON pb.id = qc.batch_id
|
||||
WHERE qc.pass_fail = false
|
||||
AND qc.created_at > NOW() - INTERVAL '4 hours'
|
||||
AND qc.corrective_action_needed = true
|
||||
"""
|
||||
|
||||
params = {}
|
||||
if tenant_id:
|
||||
query_str += " AND qc.tenant_id = :tenant_id"
|
||||
params["tenant_id"] = tenant_id
|
||||
|
||||
query_str += """
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 1
|
||||
WHEN qc.pass_fail = false THEN 2
|
||||
ELSE 3
|
||||
END,
|
||||
qc.created_at DESC
|
||||
"""
|
||||
|
||||
result = await self.session.execute(text(query_str), params)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality issues", error=str(e))
|
||||
raise DatabaseError(f"Failed to get quality issues: {str(e)}")
|
||||
|
||||
async def mark_quality_check_acknowledged(self, quality_check_id: UUID) -> None:
|
||||
"""
|
||||
Mark a quality check as acknowledged to avoid duplicate alerts.
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
UPDATE quality_checks
|
||||
SET acknowledged = true
|
||||
WHERE id = :id
|
||||
""")
|
||||
|
||||
await self.session.execute(query, {"id": quality_check_id})
|
||||
await self.session.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to mark quality check acknowledged", error=str(e), qc_id=str(quality_check_id))
|
||||
raise DatabaseError(f"Failed to mark quality check acknowledged: {str(e)}")
|
||||
@@ -130,6 +130,7 @@ class ProductionBatchResponse(BaseModel):
|
||||
quality_notes: Optional[str]
|
||||
delay_reason: Optional[str]
|
||||
cancellation_reason: Optional[str]
|
||||
reasoning_data: Optional[Dict[str, Any]] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
completed_at: Optional[datetime]
|
||||
@@ -349,5 +350,3 @@ class QualityCheckListResponse(BaseModel):
|
||||
total_count: int
|
||||
page: int
|
||||
page_size: int
|
||||
|
||||
|
||||
|
||||
@@ -181,6 +181,41 @@ class ProductionAlertService:
|
||||
issue_type=issue_type
|
||||
)
|
||||
|
||||
async def emit_start_production_alert(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
product_name: str,
|
||||
batch_number: str,
|
||||
reasoning_data: Optional[Dict[str, Any]] = None,
|
||||
planned_start_time: Optional[str] = None
|
||||
):
|
||||
"""Emit start production alert when a new batch is created"""
|
||||
|
||||
metadata = {
|
||||
"batch_id": str(batch_id),
|
||||
"product_name": product_name,
|
||||
"batch_number": batch_number,
|
||||
"reasoning_data": reasoning_data
|
||||
}
|
||||
|
||||
if planned_start_time:
|
||||
metadata["planned_start_time"] = planned_start_time
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.start_production",
|
||||
tenant_id=tenant_id,
|
||||
severity="medium",
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"start_production_alert_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_number=batch_number,
|
||||
reasoning_type=reasoning_data.get("type") if reasoning_data else None
|
||||
)
|
||||
|
||||
async def emit_batch_start_delayed(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
@@ -376,73 +411,3 @@ class ProductionAlertService:
|
||||
tenant_id=str(tenant_id),
|
||||
time_savings=estimated_time_savings_minutes
|
||||
)
|
||||
|
||||
async def check_production_delays(self) -> int:
|
||||
"""
|
||||
Check for production delays and emit alerts for delayed batches.
|
||||
This method queries the database for production batches that are IN_PROGRESS
|
||||
but past their planned end time, and emits production delay alerts.
|
||||
|
||||
Returns:
|
||||
int: Number of delay alerts emitted
|
||||
"""
|
||||
if not self.database_manager:
|
||||
logger.warning("Database manager not available for delay checking")
|
||||
return 0
|
||||
|
||||
logger.info("Checking for production delays")
|
||||
alerts_emitted = 0
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Import the repository here to avoid circular imports
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
|
||||
# Get production delays from the database
|
||||
delayed_batches = await alert_repo.get_production_delays()
|
||||
|
||||
logger.info("Found delayed batches", count=len(delayed_batches))
|
||||
|
||||
# For each delayed batch, emit a production delay alert
|
||||
for batch in delayed_batches:
|
||||
try:
|
||||
batch_id = UUID(batch["id"])
|
||||
tenant_id = UUID(batch["tenant_id"])
|
||||
delay_minutes = int(batch["delay_minutes"])
|
||||
affected_orders = int(batch.get("affected_orders", 0))
|
||||
|
||||
# Emit production delay alert using existing method
|
||||
await self.emit_production_delay(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
delay_minutes=delay_minutes,
|
||||
affected_orders=affected_orders
|
||||
)
|
||||
|
||||
alerts_emitted += 1
|
||||
logger.info(
|
||||
"Production delay alert emitted",
|
||||
batch_id=str(batch_id),
|
||||
delay_minutes=delay_minutes,
|
||||
tenant_id=str(tenant_id)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting alert for delayed batch",
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking for production delays", error=str(e))
|
||||
# Don't raise the exception - this method is called internally
|
||||
# and we don't want to break the calling flow
|
||||
return 0
|
||||
|
||||
logger.info("Production delay check completed", alerts_emitted=alerts_emitted)
|
||||
return alerts_emitted
|
||||
|
||||
609
services/production/app/services/production_scheduler.py
Normal file
609
services/production/app/services/production_scheduler.py
Normal file
@@ -0,0 +1,609 @@
|
||||
"""
|
||||
Production Scheduler Service
|
||||
Background task that periodically checks for production alert conditions
|
||||
and triggers appropriate alerts.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import Dict, Any, List, Optional
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timedelta
|
||||
import structlog
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import text
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from apscheduler.triggers.interval import IntervalTrigger
|
||||
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
from app.repositories.equipment_repository import EquipmentRepository
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
class ProductionScheduler:
|
||||
"""Production scheduler service that checks for alert conditions"""
|
||||
|
||||
def __init__(self, alert_service: ProductionAlertService, database_manager: Any):
|
||||
self.alert_service = alert_service
|
||||
self.database_manager = database_manager
|
||||
self.scheduler = AsyncIOScheduler()
|
||||
self.check_interval = 300 # 5 minutes
|
||||
self.job_id = 'production_scheduler'
|
||||
|
||||
# Cache de alertas emitidas para evitar duplicados
|
||||
self._emitted_alerts: set = set()
|
||||
self._alert_cache_ttl = 3600 # 1 hora
|
||||
self._last_cache_clear = datetime.utcnow()
|
||||
|
||||
async def start(self):
|
||||
"""Start the production scheduler with APScheduler"""
|
||||
if self.scheduler.running:
|
||||
logger.warning("Production scheduler is already running")
|
||||
return
|
||||
|
||||
# Add the periodic job
|
||||
trigger = IntervalTrigger(seconds=self.check_interval)
|
||||
self.scheduler.add_job(
|
||||
self._run_scheduler_task,
|
||||
trigger=trigger,
|
||||
id=self.job_id,
|
||||
name="Production Alert Checks",
|
||||
max_instances=1 # Prevent overlapping executions
|
||||
)
|
||||
|
||||
# Start the scheduler
|
||||
self.scheduler.start()
|
||||
logger.info("Production scheduler started", interval_seconds=self.check_interval)
|
||||
|
||||
async def stop(self):
|
||||
"""Stop the production scheduler"""
|
||||
if self.scheduler.running:
|
||||
self.scheduler.shutdown(wait=True)
|
||||
logger.info("Production scheduler stopped")
|
||||
else:
|
||||
logger.info("Production scheduler already stopped")
|
||||
|
||||
async def _run_scheduler_task(self):
|
||||
"""Run scheduled production alert checks with leader election"""
|
||||
# Try to acquire leader lock for this scheduler
|
||||
lock_name = f"production_scheduler:{self.database_manager.database_url if hasattr(self.database_manager, 'database_url') else 'default'}"
|
||||
lock_id = abs(hash(lock_name)) % (2**31) # Generate a unique integer ID for the lock
|
||||
acquired = False
|
||||
|
||||
try:
|
||||
# Try to acquire PostgreSQL advisory lock for leader election
|
||||
async with self.database_manager.get_session() as session:
|
||||
result = await session.execute(text("SELECT pg_try_advisory_lock(:lock_id)"), {"lock_id": lock_id})
|
||||
acquired = True # If no exception, lock was acquired
|
||||
|
||||
start_time = datetime.now()
|
||||
logger.info("Running scheduled production alert checks (as leader)")
|
||||
|
||||
# Run all alert checks
|
||||
alerts_generated = await self.check_all_conditions()
|
||||
|
||||
duration = (datetime.now() - start_time).total_seconds()
|
||||
logger.info(
|
||||
"Completed scheduled production alert checks",
|
||||
alerts_generated=alerts_generated,
|
||||
duration_seconds=round(duration, 2)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# If it's a lock acquisition error, log and skip execution (another instance is running)
|
||||
error_str = str(e).lower()
|
||||
if "lock" in error_str or "timeout" in error_str or "could not acquire" in error_str:
|
||||
logger.debug(
|
||||
"Skipping production scheduler execution (not leader)",
|
||||
lock_name=lock_name
|
||||
)
|
||||
return # Not an error, just not the leader
|
||||
else:
|
||||
logger.error(
|
||||
"Error in production scheduler task",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
finally:
|
||||
if acquired:
|
||||
# Release the lock
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
await session.execute(text("SELECT pg_advisory_unlock(:lock_id)"), {"lock_id": lock_id})
|
||||
await session.commit()
|
||||
except Exception as unlock_error:
|
||||
logger.warning(
|
||||
"Error releasing leader lock (may have been automatically released)",
|
||||
error=str(unlock_error)
|
||||
)
|
||||
|
||||
async def check_all_conditions(self) -> int:
|
||||
"""
|
||||
Check all production alert conditions and trigger alerts.
|
||||
|
||||
Returns:
|
||||
int: Total number of alerts generated
|
||||
"""
|
||||
if not self.database_manager:
|
||||
logger.warning("Database manager not available for production checks")
|
||||
return 0
|
||||
|
||||
total_alerts = 0
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Get repositories
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
equipment_repo = EquipmentRepository(session)
|
||||
|
||||
# Check production delays
|
||||
delay_alerts = await self._check_production_delays(batch_repo)
|
||||
total_alerts += delay_alerts
|
||||
|
||||
# Check equipment maintenance
|
||||
maintenance_alerts = await self._check_equipment_maintenance(equipment_repo)
|
||||
total_alerts += maintenance_alerts
|
||||
|
||||
# Check batch start delays (batches that should have started but haven't)
|
||||
start_delay_alerts = await self._check_batch_start_delays(batch_repo)
|
||||
total_alerts += start_delay_alerts
|
||||
|
||||
logger.info(
|
||||
"Production alert checks completed",
|
||||
total_alerts=total_alerts,
|
||||
production_delays=delay_alerts,
|
||||
equipment_maintenance=maintenance_alerts,
|
||||
batch_start_delays=start_delay_alerts
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error during production alert checks",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
return total_alerts
|
||||
|
||||
async def _check_production_delays(self, batch_repo: ProductionBatchRepository) -> int:
|
||||
"""
|
||||
Check for production delays and trigger alerts.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
|
||||
Returns:
|
||||
int: Number of delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get delayed batches from repository
|
||||
delayed_batches = await batch_repo.get_production_delays()
|
||||
|
||||
logger.info("Found delayed production batches", count=len(delayed_batches))
|
||||
|
||||
# Limpiar cache si expiró
|
||||
if (datetime.utcnow() - self._last_cache_clear).total_seconds() > self._alert_cache_ttl:
|
||||
self._emitted_alerts.clear()
|
||||
self._last_cache_clear = datetime.utcnow()
|
||||
logger.info("Cleared alert cache due to TTL expiration")
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_batches:
|
||||
try:
|
||||
batch_id = UUID(str(batch["id"]))
|
||||
|
||||
# Verificar si ya emitimos alerta para este batch
|
||||
alert_key = f"delay:{batch_id}"
|
||||
if alert_key in self._emitted_alerts:
|
||||
logger.debug("Skipping duplicate delay alert", batch_id=str(batch_id))
|
||||
continue
|
||||
|
||||
tenant_id = UUID(str(batch["tenant_id"]))
|
||||
delay_minutes = int(batch["delay_minutes"]) if batch.get("delay_minutes") else 0
|
||||
affected_orders = int(batch.get("affected_orders", 0))
|
||||
|
||||
# Emit production delay alert
|
||||
await self.alert_service.emit_production_delay(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
delay_minutes=delay_minutes,
|
||||
affected_orders=affected_orders
|
||||
)
|
||||
|
||||
# Registrar en cache
|
||||
self._emitted_alerts.add(alert_key)
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting production delay alert",
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking production delays", error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_equipment_maintenance(self, equipment_repo: EquipmentRepository) -> int:
|
||||
"""
|
||||
Check for equipment needing maintenance and trigger alerts.
|
||||
|
||||
Args:
|
||||
equipment_repo: Equipment repository
|
||||
|
||||
Returns:
|
||||
int: Number of maintenance alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get equipment that needs maintenance using repository method
|
||||
equipment_needing_maintenance = await equipment_repo.get_equipment_needing_maintenance()
|
||||
|
||||
logger.info(
|
||||
"Found equipment needing maintenance",
|
||||
count=len(equipment_needing_maintenance)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for equipment in equipment_needing_maintenance:
|
||||
try:
|
||||
equipment_id = UUID(equipment["id"])
|
||||
tenant_id = UUID(equipment["tenant_id"])
|
||||
days_overdue = int(equipment.get("days_overdue", 0))
|
||||
|
||||
# Emit equipment maintenance alert
|
||||
await self.alert_service.emit_equipment_maintenance_due(
|
||||
tenant_id=tenant_id,
|
||||
equipment_id=equipment_id,
|
||||
equipment_name=equipment.get("name", "Unknown Equipment"),
|
||||
equipment_type=equipment.get("type", "unknown"),
|
||||
last_maintenance_date=equipment.get("last_maintenance_date"),
|
||||
days_overdue=days_overdue
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting equipment maintenance alert",
|
||||
equipment_id=equipment.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking equipment maintenance", error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_batch_start_delays(self, batch_repo: ProductionBatchRepository) -> int:
|
||||
"""
|
||||
Check for batches that should have started but haven't.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
|
||||
Returns:
|
||||
int: Number of start delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get batches that should have started using repository method
|
||||
delayed_start_batches = await batch_repo.get_batches_with_delayed_start()
|
||||
|
||||
logger.info(
|
||||
"Found batches with delayed start",
|
||||
count=len(delayed_start_batches)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_start_batches:
|
||||
try:
|
||||
batch_id = UUID(batch["id"])
|
||||
|
||||
# Verificar si ya emitimos alerta para este batch
|
||||
alert_key = f"start_delay:{batch_id}"
|
||||
if alert_key in self._emitted_alerts:
|
||||
logger.debug("Skipping duplicate start delay alert", batch_id=str(batch_id))
|
||||
continue
|
||||
|
||||
tenant_id = UUID(batch["tenant_id"])
|
||||
scheduled_start = batch.get("scheduled_start_time")
|
||||
|
||||
# Emit batch start delayed alert
|
||||
await self.alert_service.emit_batch_start_delayed(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
scheduled_start=scheduled_start,
|
||||
delay_reason="Batch has not started on time"
|
||||
)
|
||||
|
||||
# Registrar en cache
|
||||
self._emitted_alerts.add(alert_key)
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting batch start delay alert",
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking batch start delays", error=str(e))
|
||||
return 0
|
||||
|
||||
async def trigger_manual_check(self, tenant_id: Optional[UUID] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Manually trigger production alert checks for a specific tenant or all tenants.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to check. If None, checks all tenants.
|
||||
|
||||
Returns:
|
||||
Dict with alert generation results
|
||||
"""
|
||||
logger.info(
|
||||
"Manually triggering production alert checks",
|
||||
tenant_id=str(tenant_id) if tenant_id else "all_tenants"
|
||||
)
|
||||
|
||||
try:
|
||||
if tenant_id:
|
||||
# Run tenant-specific alert checks
|
||||
alerts_generated = await self.check_all_conditions_for_tenant(tenant_id)
|
||||
else:
|
||||
# Run all alert checks across all tenants
|
||||
alerts_generated = await self.check_all_conditions()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"tenant_id": str(tenant_id) if tenant_id else None,
|
||||
"alerts_generated": alerts_generated,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"message": "Production alert checks completed successfully"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error during manual production alert check",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"tenant_id": str(tenant_id) if tenant_id else None,
|
||||
"alerts_generated": 0,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def check_all_conditions_for_tenant(self, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check all production alert conditions for a specific tenant and trigger alerts.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID to check conditions for
|
||||
|
||||
Returns:
|
||||
int: Total number of alerts generated
|
||||
"""
|
||||
if not self.database_manager:
|
||||
logger.warning("Database manager not available for production checks")
|
||||
return 0
|
||||
|
||||
total_alerts = 0
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Get repositories
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
equipment_repo = EquipmentRepository(session)
|
||||
|
||||
# Check production delays for specific tenant
|
||||
delay_alerts = await self._check_production_delays_for_tenant(batch_repo, tenant_id)
|
||||
total_alerts += delay_alerts
|
||||
|
||||
# Check equipment maintenance for specific tenant
|
||||
maintenance_alerts = await self._check_equipment_maintenance_for_tenant(equipment_repo, tenant_id)
|
||||
total_alerts += maintenance_alerts
|
||||
|
||||
# Check batch start delays for specific tenant
|
||||
start_delay_alerts = await self._check_batch_start_delays_for_tenant(batch_repo, tenant_id)
|
||||
total_alerts += start_delay_alerts
|
||||
|
||||
logger.info(
|
||||
"Tenant-specific production alert checks completed",
|
||||
tenant_id=str(tenant_id),
|
||||
total_alerts=total_alerts,
|
||||
production_delays=delay_alerts,
|
||||
equipment_maintenance=maintenance_alerts,
|
||||
batch_start_delays=start_delay_alerts
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error during tenant-specific production alert checks",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
return total_alerts
|
||||
|
||||
async def _check_production_delays_for_tenant(self, batch_repo: ProductionBatchRepository, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check for production delays for a specific tenant and trigger alerts.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
tenant_id: Tenant ID to check for
|
||||
|
||||
Returns:
|
||||
int: Number of delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get delayed batches for the specific tenant using repository method
|
||||
delayed_batches = await batch_repo.get_production_delays(tenant_id)
|
||||
|
||||
logger.info("Found delayed production batches for tenant", count=len(delayed_batches), tenant_id=str(tenant_id))
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_batches:
|
||||
try:
|
||||
batch_id = UUID(str(batch["id"]))
|
||||
delay_minutes = int(batch["delay_minutes"]) if batch.get("delay_minutes") else 0
|
||||
affected_orders = int(batch.get("affected_orders", 0))
|
||||
|
||||
# Emit production delay alert
|
||||
await self.alert_service.emit_production_delay(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
delay_minutes=delay_minutes,
|
||||
affected_orders=affected_orders
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting production delay alert",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking production delays for tenant", tenant_id=str(tenant_id), error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_equipment_maintenance_for_tenant(self, equipment_repo: EquipmentRepository, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check for equipment needing maintenance for a specific tenant and trigger alerts.
|
||||
|
||||
Args:
|
||||
equipment_repo: Equipment repository
|
||||
tenant_id: Tenant ID to check for
|
||||
|
||||
Returns:
|
||||
int: Number of maintenance alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get equipment that needs maintenance for specific tenant using repository method
|
||||
equipment_needing_maintenance = await equipment_repo.get_equipment_needing_maintenance(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Found equipment needing maintenance for tenant",
|
||||
count=len(equipment_needing_maintenance),
|
||||
tenant_id=str(tenant_id)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for equipment in equipment_needing_maintenance:
|
||||
try:
|
||||
equipment_id = UUID(equipment["id"])
|
||||
days_overdue = int(equipment.get("days_overdue", 0))
|
||||
|
||||
# Emit equipment maintenance alert
|
||||
await self.alert_service.emit_equipment_maintenance_due(
|
||||
tenant_id=tenant_id,
|
||||
equipment_id=equipment_id,
|
||||
equipment_name=equipment.get("name", "Unknown Equipment"),
|
||||
equipment_type=equipment.get("type", "unknown"),
|
||||
last_maintenance_date=equipment.get("last_maintenance_date"),
|
||||
days_overdue=days_overdue
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting equipment maintenance alert",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_id=equipment.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking equipment maintenance for tenant", tenant_id=str(tenant_id), error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_batch_start_delays_for_tenant(self, batch_repo: ProductionBatchRepository, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check for batches that should have started but haven't for a specific tenant.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
tenant_id: Tenant ID to check for
|
||||
|
||||
Returns:
|
||||
int: Number of start delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get batches that should have started for specific tenant using repository method
|
||||
delayed_start_batches = await batch_repo.get_batches_with_delayed_start(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Found batches with delayed start for tenant",
|
||||
count=len(delayed_start_batches),
|
||||
tenant_id=str(tenant_id)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_start_batches:
|
||||
try:
|
||||
batch_id = UUID(batch["id"])
|
||||
scheduled_start = batch.get("scheduled_start_time")
|
||||
|
||||
# Emit batch start delayed alert
|
||||
await self.alert_service.emit_batch_start_delayed(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
scheduled_start=scheduled_start,
|
||||
delay_reason="Batch has not started on time"
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting batch start delay alert",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking batch start delays for tenant", tenant_id=str(tenant_id), error=str(e))
|
||||
return 0
|
||||
@@ -25,17 +25,24 @@ from app.schemas.production import (
|
||||
DailyProductionRequirements, ProductionDashboardSummary, ProductionMetrics
|
||||
)
|
||||
from app.utils.cache import delete_cached, make_cache_key
|
||||
from app.services.production_notification_service import ProductionNotificationService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionService:
|
||||
"""Main production service with business logic"""
|
||||
|
||||
def __init__(self, database_manager, config: BaseServiceSettings):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
database_manager,
|
||||
config: BaseServiceSettings,
|
||||
notification_service: Optional[ProductionNotificationService] = None
|
||||
):
|
||||
self.database_manager = database_manager
|
||||
self.config = config
|
||||
|
||||
self.notification_service = notification_service
|
||||
|
||||
# Initialize shared clients
|
||||
self.inventory_client = get_inventory_client(config, "production")
|
||||
self.orders_client = OrdersServiceClient(config)
|
||||
@@ -302,24 +309,28 @@ class ProductionService:
|
||||
raise
|
||||
|
||||
async def update_batch_status(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
status_update: ProductionBatchStatusUpdate
|
||||
) -> ProductionBatch:
|
||||
"""Update production batch status"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
|
||||
|
||||
# Get current batch to capture old status for notification
|
||||
current_batch = await batch_repo.get_batch(tenant_id, batch_id)
|
||||
old_status = current_batch.status.value if current_batch else None
|
||||
|
||||
# Update batch status
|
||||
batch = await batch_repo.update_batch_status(
|
||||
batch_id,
|
||||
batch_id,
|
||||
status_update.status,
|
||||
status_update.actual_quantity,
|
||||
status_update.notes
|
||||
)
|
||||
|
||||
|
||||
# Update inventory if batch is completed
|
||||
if status_update.status == ProductionStatus.COMPLETED and status_update.actual_quantity:
|
||||
await self._update_inventory_on_completion(
|
||||
@@ -331,15 +342,33 @@ class ProductionService:
|
||||
await delete_cached(cache_key)
|
||||
logger.debug("Invalidated production dashboard cache", cache_key=cache_key, tenant_id=str(tenant_id))
|
||||
|
||||
# Emit batch state changed notification
|
||||
if self.notification_service and old_status:
|
||||
try:
|
||||
await self.notification_service.emit_batch_state_changed_notification(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=str(batch.id),
|
||||
product_sku=batch.product_sku or "",
|
||||
product_name=batch.product_name or "Unknown Product",
|
||||
old_status=old_status,
|
||||
new_status=status_update.status.value,
|
||||
quantity=batch.planned_quantity or 0,
|
||||
unit=batch.unit or "units",
|
||||
assigned_to=batch.assigned_to
|
||||
)
|
||||
except Exception as notif_error:
|
||||
logger.warning("Failed to emit batch state notification",
|
||||
error=str(notif_error), batch_id=str(batch_id))
|
||||
|
||||
logger.info("Updated batch status",
|
||||
batch_id=str(batch_id),
|
||||
new_status=status_update.status.value,
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return batch
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error updating batch status",
|
||||
logger.error("Error updating batch status",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
@@ -664,6 +693,23 @@ class ProductionService:
|
||||
logger.info("Started production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
# Emit batch started notification
|
||||
if self.notification_service:
|
||||
try:
|
||||
await self.notification_service.emit_batch_started_notification(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=str(batch.id),
|
||||
product_sku=batch.product_sku or "",
|
||||
product_name=batch.product_name or "Unknown Product",
|
||||
quantity_planned=batch.planned_quantity or 0,
|
||||
unit=batch.unit or "units",
|
||||
estimated_duration_minutes=batch.planned_duration_minutes,
|
||||
assigned_to=batch.assigned_to
|
||||
)
|
||||
except Exception as notif_error:
|
||||
logger.warning("Failed to emit batch started notification",
|
||||
error=str(notif_error), batch_id=str(batch_id))
|
||||
|
||||
# Acknowledge production delay alerts (non-blocking)
|
||||
try:
|
||||
from shared.clients.alert_processor_client import get_alert_processor_client
|
||||
@@ -710,7 +756,30 @@ class ProductionService:
|
||||
logger.info("Completed production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return batch
|
||||
# Emit batch completed notification
|
||||
if self.notification_service:
|
||||
try:
|
||||
# Calculate production duration if start and end times are available
|
||||
production_duration_minutes = None
|
||||
if batch.actual_start_time and batch.actual_end_time:
|
||||
duration = batch.actual_end_time - batch.actual_start_time
|
||||
production_duration_minutes = int(duration.total_seconds() / 60)
|
||||
|
||||
await self.notification_service.emit_batch_completed_notification(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=str(batch.id),
|
||||
product_sku=batch.product_sku or "",
|
||||
product_name=batch.product_name or "Unknown Product",
|
||||
quantity_produced=batch.actual_quantity or batch.planned_quantity or 0,
|
||||
unit=batch.unit or "units",
|
||||
production_duration_minutes=production_duration_minutes,
|
||||
quality_score=batch.quality_score
|
||||
)
|
||||
except Exception as notif_error:
|
||||
logger.warning("Failed to emit batch completed notification",
|
||||
error=str(notif_error), batch_id=str(batch_id))
|
||||
|
||||
return batch
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error completing production batch",
|
||||
@@ -1568,11 +1637,13 @@ class ProductionService:
|
||||
from app.repositories.equipment_repository import EquipmentRepository
|
||||
equipment_repo = EquipmentRepository(session)
|
||||
|
||||
# First verify equipment belongs to tenant
|
||||
# First verify equipment belongs to tenant and capture old status
|
||||
equipment = await equipment_repo.get_equipment_by_id(tenant_id, equipment_id)
|
||||
if not equipment:
|
||||
return None
|
||||
|
||||
old_status = equipment.status if hasattr(equipment, 'status') else None
|
||||
|
||||
# Update equipment
|
||||
updated_equipment = await equipment_repo.update_equipment(
|
||||
equipment_id,
|
||||
@@ -1585,7 +1656,24 @@ class ProductionService:
|
||||
logger.info("Updated equipment",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
return updated_equipment
|
||||
# Emit equipment status notification if status changed
|
||||
update_dict = equipment_update.model_dump(exclude_none=True)
|
||||
new_status = update_dict.get('status')
|
||||
if self.notification_service and new_status and old_status and new_status != old_status:
|
||||
try:
|
||||
await self.notification_service.emit_equipment_status_notification(
|
||||
tenant_id=tenant_id,
|
||||
equipment_id=str(equipment_id),
|
||||
equipment_name=updated_equipment.name or "Unknown Equipment",
|
||||
old_status=old_status,
|
||||
new_status=new_status,
|
||||
reason=update_dict.get('notes') or update_dict.get('status_reason')
|
||||
)
|
||||
except Exception as notif_error:
|
||||
logger.warning("Failed to emit equipment status notification",
|
||||
error=str(notif_error), equipment_id=str(equipment_id))
|
||||
|
||||
return updated_equipment
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error updating equipment",
|
||||
@@ -1862,7 +1950,11 @@ class ProductionService:
|
||||
# For now, we assume recipe_id = product_id or fetch from a mapping
|
||||
|
||||
# Generate reasoning data for JTBD dashboard
|
||||
from shared.schemas.reasoning_types import create_batch_reasoning_forecast_demand
|
||||
from shared.schemas.reasoning_types import (
|
||||
create_production_batch_reasoning,
|
||||
PredictionFactor,
|
||||
PredictionFactorType
|
||||
)
|
||||
|
||||
# Try to get product name from forecast, stock_info, or use placeholder
|
||||
product_name = (
|
||||
@@ -1871,15 +1963,113 @@ class ProductionService:
|
||||
f"Product {product_id}"
|
||||
)
|
||||
|
||||
reasoning_data = create_batch_reasoning_forecast_demand(
|
||||
product_name=product_name,
|
||||
predicted_demand=predicted_demand,
|
||||
current_stock=current_stock,
|
||||
production_needed=production_needed,
|
||||
target_date=target_date.isoformat(),
|
||||
confidence_score=forecast.get('confidence_score', 0.85)
|
||||
# Calculate variance from historical average if available
|
||||
historical_average = forecast.get('historical_average', predicted_demand * 0.8) # Default to 80% of predicted
|
||||
variance_percent = ((predicted_demand - historical_average) / historical_average * 100) if historical_average > 0 else 0
|
||||
|
||||
# Create detailed factors for enhanced reasoning
|
||||
factors = []
|
||||
|
||||
# Factor 1: Historical pattern (always present)
|
||||
factors.append(
|
||||
PredictionFactor(
|
||||
factor=PredictionFactorType.HISTORICAL_PATTERN,
|
||||
weight=0.40,
|
||||
contribution=historical_average * 0.40,
|
||||
description="Based on historical sales patterns",
|
||||
historical_data={
|
||||
"historical_average": historical_average,
|
||||
"historical_period": "last_30_days"
|
||||
},
|
||||
confidence=0.90
|
||||
)
|
||||
)
|
||||
|
||||
# Factor 2: Weather impact (if weather data is available in forecast)
|
||||
weather_impact = forecast.get('weather_impact')
|
||||
if weather_impact:
|
||||
weather_type = weather_impact.get('type', 'sunny')
|
||||
weather_contribution = weather_impact.get('contribution', 0)
|
||||
weather_weight = weather_impact.get('weight', 0.25)
|
||||
|
||||
# Map weather type to PredictionFactorType
|
||||
weather_factor_map = {
|
||||
'sunny': PredictionFactorType.WEATHER_SUNNY,
|
||||
'rainy': PredictionFactorType.WEATHER_RAINY,
|
||||
'cold': PredictionFactorType.WEATHER_COLD,
|
||||
'hot': PredictionFactorType.WEATHER_HOT
|
||||
}
|
||||
weather_factor = weather_factor_map.get(weather_type, PredictionFactorType.WEATHER_SUNNY)
|
||||
|
||||
factors.append(
|
||||
PredictionFactor(
|
||||
factor=weather_factor,
|
||||
weight=weather_weight,
|
||||
contribution=weather_contribution,
|
||||
description=f"Weather impact: {weather_type}",
|
||||
weather_data={
|
||||
"condition": weather_type,
|
||||
"temperature": weather_impact.get('temperature', 22),
|
||||
"impact_direction": weather_impact.get('impact_direction', 'positive')
|
||||
},
|
||||
confidence=weather_impact.get('confidence', 0.85)
|
||||
)
|
||||
)
|
||||
|
||||
# Factor 3: Weekend boost (if target date is weekend)
|
||||
if target_date.weekday() >= 5: # Saturday (5) or Sunday (6)
|
||||
weekend_contribution = predicted_demand * 0.20 # 20% boost
|
||||
factors.append(
|
||||
PredictionFactor(
|
||||
factor=PredictionFactorType.WEEKEND_BOOST,
|
||||
weight=0.20,
|
||||
contribution=weekend_contribution,
|
||||
description="Weekend demand increase",
|
||||
confidence=0.80
|
||||
)
|
||||
)
|
||||
|
||||
# Factor 4: Inventory level consideration
|
||||
inventory_weight = 0.15
|
||||
inventory_contribution = current_stock * inventory_weight
|
||||
factors.append(
|
||||
PredictionFactor(
|
||||
factor=PredictionFactorType.INVENTORY_LEVEL,
|
||||
weight=inventory_weight,
|
||||
contribution=inventory_contribution,
|
||||
description="Current inventory consideration",
|
||||
inventory_data={
|
||||
"current_stock": current_stock,
|
||||
"safety_stock_days": 3
|
||||
},
|
||||
confidence=0.95
|
||||
)
|
||||
)
|
||||
|
||||
# Use unified reasoning function - enhanced when factors exist, basic otherwise
|
||||
if factors:
|
||||
reasoning_data = create_production_batch_reasoning(
|
||||
product_name=product_name,
|
||||
predicted_demand=predicted_demand,
|
||||
historical_average=historical_average,
|
||||
variance_percent=variance_percent,
|
||||
variance_reason="weather_sunny_weekend" if (target_date.weekday() >= 5 and weather_impact) else "historical_pattern",
|
||||
confidence_score=forecast.get('confidence_score', 0.87),
|
||||
factors=factors,
|
||||
urgency_level="normal",
|
||||
ready_by_time="08:00",
|
||||
forecast_id=forecast.get('forecast_id')
|
||||
)
|
||||
else:
|
||||
reasoning_data = create_production_batch_reasoning(
|
||||
product_name=product_name,
|
||||
predicted_demand=predicted_demand,
|
||||
current_stock=current_stock,
|
||||
production_needed=production_needed,
|
||||
target_date=target_date.isoformat(),
|
||||
confidence_score=forecast.get('confidence_score', 0.85)
|
||||
)
|
||||
|
||||
# Create production batch
|
||||
planned_start = datetime.combine(target_date, datetime.min.time())
|
||||
planned_end = datetime.combine(target_date, datetime.max.time())
|
||||
@@ -1953,4 +2143,4 @@ class ProductionService:
|
||||
) -> str:
|
||||
"""Generate batch number in format BATCH-YYYYMMDD-NNN"""
|
||||
date_str = target_date.strftime("%Y%m%d")
|
||||
return f"BATCH-{date_str}-{batch_index:03d}"
|
||||
return f"BATCH-{date_str}-{batch_index:03d}"
|
||||
|
||||
@@ -1,345 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Demo Production Batches Seeding Script for Production Service
|
||||
Creates production batches for demo template tenants
|
||||
|
||||
This script runs as a Kubernetes init job inside the production-service container.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import uuid
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
# Add app to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy import select
|
||||
import structlog
|
||||
|
||||
from app.models.production import ProductionBatch, ProductionStatus, ProductionPriority, ProcessStage
|
||||
|
||||
# Import reasoning helper functions for i18n support
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
||||
from shared.schemas.reasoning_types import create_batch_reasoning_forecast_demand, create_batch_reasoning_regular_schedule
|
||||
|
||||
# Configure logging
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_PROFESSIONAL = uuid.UUID("a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6") # Individual bakery
|
||||
DEMO_TENANT_ENTERPRISE_CHAIN = uuid.UUID("c3d4e5f6-a7b8-49c0-d1e2-f3a4b5c6d7e8") # Enterprise parent (Obrador)
|
||||
|
||||
from shared.utils.demo_dates import BASE_REFERENCE_DATE
|
||||
|
||||
|
||||
def load_batches_data():
|
||||
"""Load production batches data from JSON file"""
|
||||
data_file = Path(__file__).parent / "lotes_produccion_es.json"
|
||||
if not data_file.exists():
|
||||
raise FileNotFoundError(f"Production batches data file not found: {data_file}")
|
||||
|
||||
with open(data_file, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def calculate_datetime_from_offset(offset_days: int, hour: int, minute: int) -> datetime:
|
||||
"""Calculate a datetime based on offset from BASE_REFERENCE_DATE"""
|
||||
base_date = BASE_REFERENCE_DATE.replace(hour=hour, minute=minute, second=0, microsecond=0)
|
||||
return base_date + timedelta(days=offset_days)
|
||||
|
||||
|
||||
def map_status(status_str: str) -> ProductionStatus:
|
||||
"""Map status string to enum"""
|
||||
mapping = {
|
||||
"PENDING": ProductionStatus.PENDING,
|
||||
"IN_PROGRESS": ProductionStatus.IN_PROGRESS,
|
||||
"COMPLETED": ProductionStatus.COMPLETED,
|
||||
"CANCELLED": ProductionStatus.CANCELLED,
|
||||
"ON_HOLD": ProductionStatus.ON_HOLD,
|
||||
"QUALITY_CHECK": ProductionStatus.QUALITY_CHECK,
|
||||
"FAILED": ProductionStatus.FAILED
|
||||
}
|
||||
return mapping.get(status_str, ProductionStatus.PENDING)
|
||||
|
||||
|
||||
def map_priority(priority_str: str) -> ProductionPriority:
|
||||
"""Map priority string to enum"""
|
||||
mapping = {
|
||||
"LOW": ProductionPriority.LOW,
|
||||
"MEDIUM": ProductionPriority.MEDIUM,
|
||||
"HIGH": ProductionPriority.HIGH,
|
||||
"URGENT": ProductionPriority.URGENT
|
||||
}
|
||||
return mapping.get(priority_str, ProductionPriority.MEDIUM)
|
||||
|
||||
|
||||
def map_process_stage(stage_str: str) -> ProcessStage:
|
||||
"""Map process stage string to enum"""
|
||||
if not stage_str:
|
||||
return None
|
||||
|
||||
mapping = {
|
||||
"mixing": ProcessStage.MIXING,
|
||||
"proofing": ProcessStage.PROOFING,
|
||||
"shaping": ProcessStage.SHAPING,
|
||||
"baking": ProcessStage.BAKING,
|
||||
"cooling": ProcessStage.COOLING,
|
||||
"packaging": ProcessStage.PACKAGING,
|
||||
"finishing": ProcessStage.FINISHING
|
||||
}
|
||||
return mapping.get(stage_str, None)
|
||||
|
||||
|
||||
async def seed_batches_for_tenant(
|
||||
db: AsyncSession,
|
||||
tenant_id: uuid.UUID,
|
||||
tenant_name: str,
|
||||
batches_list: list
|
||||
):
|
||||
"""Seed production batches for a specific tenant"""
|
||||
logger.info(f"Seeding production batches for: {tenant_name}", tenant_id=str(tenant_id))
|
||||
|
||||
# Check if batches already exist
|
||||
result = await db.execute(
|
||||
select(ProductionBatch).where(ProductionBatch.tenant_id == tenant_id).limit(1)
|
||||
)
|
||||
existing = result.scalar_one_or_none()
|
||||
|
||||
if existing:
|
||||
logger.info(f"Production batches already exist for {tenant_name}, skipping seed")
|
||||
return {"tenant_id": str(tenant_id), "batches_created": 0, "skipped": True}
|
||||
|
||||
count = 0
|
||||
for batch_data in batches_list:
|
||||
# Calculate planned start and end times
|
||||
planned_start = calculate_datetime_from_offset(
|
||||
batch_data["planned_start_offset_days"],
|
||||
batch_data["planned_start_hour"],
|
||||
batch_data["planned_start_minute"]
|
||||
)
|
||||
|
||||
planned_end = planned_start + timedelta(minutes=batch_data["planned_duration_minutes"])
|
||||
|
||||
# Calculate actual times for completed batches
|
||||
actual_start = None
|
||||
actual_end = None
|
||||
completed_at = None
|
||||
actual_duration = None
|
||||
|
||||
if batch_data["status"] in ["COMPLETED", "QUALITY_CHECK"]:
|
||||
actual_start = planned_start # Assume started on time
|
||||
actual_duration = batch_data["planned_duration_minutes"]
|
||||
actual_end = actual_start + timedelta(minutes=actual_duration)
|
||||
completed_at = actual_end
|
||||
elif batch_data["status"] == "IN_PROGRESS":
|
||||
# For IN_PROGRESS batches, set actual_start to a recent time to ensure valid progress calculation
|
||||
# If planned_start is in the past, use it; otherwise, set to 30 minutes ago
|
||||
# Use BASE_REFERENCE_DATE as "now" for consistent demo data
|
||||
now = BASE_REFERENCE_DATE
|
||||
if planned_start < now:
|
||||
# If planned start was in the past, use a time that ensures batch is ~30% complete
|
||||
elapsed_time_minutes = min(
|
||||
int(batch_data["planned_duration_minutes"] * 0.3),
|
||||
int((now - planned_start).total_seconds() / 60)
|
||||
)
|
||||
actual_start = now - timedelta(minutes=elapsed_time_minutes)
|
||||
else:
|
||||
# If planned_start is in the future, start batch 30 minutes ago
|
||||
actual_start = now - timedelta(minutes=30)
|
||||
actual_duration = None
|
||||
actual_end = None
|
||||
|
||||
# For San Pablo, use original IDs. For La Espiga, generate new UUIDs
|
||||
if tenant_id == DEMO_TENANT_PROFESSIONAL:
|
||||
batch_id = uuid.UUID(batch_data["id"])
|
||||
else:
|
||||
# Generate deterministic UUID for La Espiga based on original ID
|
||||
base_uuid = uuid.UUID(batch_data["id"])
|
||||
# Add a fixed offset to create a unique but deterministic ID
|
||||
batch_id = uuid.UUID(int=base_uuid.int + 0x10000000000000000000000000000000)
|
||||
|
||||
# Map enums
|
||||
status = map_status(batch_data["status"])
|
||||
priority = map_priority(batch_data["priority"])
|
||||
current_stage = map_process_stage(batch_data.get("current_process_stage"))
|
||||
|
||||
# Create unique batch number for each tenant
|
||||
if tenant_id == DEMO_TENANT_PROFESSIONAL:
|
||||
batch_number = batch_data["batch_number"]
|
||||
else:
|
||||
# For La Espiga, append tenant suffix to make batch number unique
|
||||
batch_number = batch_data["batch_number"] + "-LE"
|
||||
|
||||
# Generate structured reasoning_data for i18n support
|
||||
reasoning_data = None
|
||||
try:
|
||||
# Use forecast demand reasoning for most batches
|
||||
if batch_data.get("is_ai_assisted") or priority in [ProductionPriority.HIGH, ProductionPriority.URGENT]:
|
||||
reasoning_data = create_batch_reasoning_forecast_demand(
|
||||
product_name=batch_data["product_name"],
|
||||
predicted_demand=batch_data["planned_quantity"],
|
||||
current_stock=int(batch_data["planned_quantity"] * 0.3), # Demo: assume 30% current stock
|
||||
production_needed=batch_data["planned_quantity"],
|
||||
target_date=planned_start.date().isoformat(),
|
||||
confidence_score=0.85 if batch_data.get("is_ai_assisted") else 0.75
|
||||
)
|
||||
else:
|
||||
# Regular schedule reasoning for standard batches
|
||||
reasoning_data = create_batch_reasoning_regular_schedule(
|
||||
product_name=batch_data["product_name"],
|
||||
schedule_frequency="daily",
|
||||
batch_size=batch_data["planned_quantity"]
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to generate reasoning_data for batch {batch_number}: {e}")
|
||||
|
||||
# Create production batch
|
||||
batch = ProductionBatch(
|
||||
id=batch_id,
|
||||
tenant_id=tenant_id,
|
||||
batch_number=batch_number,
|
||||
product_id=uuid.UUID(batch_data["product_id"]),
|
||||
product_name=batch_data["product_name"],
|
||||
recipe_id=uuid.UUID(batch_data["recipe_id"]) if batch_data.get("recipe_id") else None,
|
||||
planned_start_time=planned_start,
|
||||
planned_end_time=planned_end,
|
||||
planned_quantity=batch_data["planned_quantity"],
|
||||
planned_duration_minutes=batch_data["planned_duration_minutes"],
|
||||
actual_start_time=actual_start,
|
||||
actual_end_time=actual_end,
|
||||
actual_quantity=batch_data.get("actual_quantity"),
|
||||
actual_duration_minutes=actual_duration,
|
||||
status=status,
|
||||
priority=priority,
|
||||
current_process_stage=current_stage,
|
||||
yield_percentage=batch_data.get("yield_percentage"),
|
||||
quality_score=batch_data.get("quality_score"),
|
||||
waste_quantity=batch_data.get("waste_quantity"),
|
||||
defect_quantity=batch_data.get("defect_quantity"),
|
||||
estimated_cost=batch_data.get("estimated_cost"),
|
||||
actual_cost=batch_data.get("actual_cost"),
|
||||
labor_cost=batch_data.get("labor_cost"),
|
||||
material_cost=batch_data.get("material_cost"),
|
||||
overhead_cost=batch_data.get("overhead_cost"),
|
||||
equipment_used=batch_data.get("equipment_used"),
|
||||
station_id=batch_data.get("station_id"),
|
||||
is_rush_order=batch_data.get("is_rush_order", False),
|
||||
is_special_recipe=batch_data.get("is_special_recipe", False),
|
||||
is_ai_assisted=batch_data.get("is_ai_assisted", False),
|
||||
waste_defect_type=batch_data.get("waste_defect_type"),
|
||||
production_notes=batch_data.get("production_notes"),
|
||||
quality_notes=batch_data.get("quality_notes"),
|
||||
reasoning_data=reasoning_data, # Structured reasoning for i18n support
|
||||
created_at=BASE_REFERENCE_DATE,
|
||||
updated_at=BASE_REFERENCE_DATE,
|
||||
completed_at=completed_at
|
||||
)
|
||||
|
||||
db.add(batch)
|
||||
count += 1
|
||||
logger.debug(f"Created production batch: {batch.batch_number}", batch_id=str(batch.id))
|
||||
|
||||
await db.commit()
|
||||
logger.info(f"Successfully created {count} production batches for {tenant_name}")
|
||||
|
||||
return {
|
||||
"tenant_id": str(tenant_id),
|
||||
"batches_created": count,
|
||||
"skipped": False
|
||||
}
|
||||
|
||||
|
||||
async def seed_all(db: AsyncSession):
|
||||
"""Seed all demo tenants with production batches"""
|
||||
logger.info("Starting demo production batches seed process")
|
||||
|
||||
# Load batches data
|
||||
data = load_batches_data()
|
||||
|
||||
results = []
|
||||
|
||||
# Seed Professional Bakery with production batches (single location)
|
||||
result_professional = await seed_batches_for_tenant(
|
||||
db,
|
||||
DEMO_TENANT_PROFESSIONAL,
|
||||
"Panadería Artesana Madrid (Professional)",
|
||||
data["lotes_produccion"]
|
||||
)
|
||||
results.append(result_professional)
|
||||
|
||||
# Seed Enterprise Parent (central production - Obrador) with scaled-up batches
|
||||
result_enterprise_parent = await seed_batches_for_tenant(
|
||||
db,
|
||||
DEMO_TENANT_ENTERPRISE_CHAIN,
|
||||
"Panadería Central - Obrador Madrid (Enterprise Parent)",
|
||||
data["lotes_produccion"]
|
||||
)
|
||||
results.append(result_enterprise_parent)
|
||||
|
||||
total_created = sum(r["batches_created"] for r in results)
|
||||
|
||||
return {
|
||||
"results": results,
|
||||
"total_batches_created": total_created,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main execution function"""
|
||||
# Get database URL from environment
|
||||
database_url = os.getenv("PRODUCTION_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.error("PRODUCTION_DATABASE_URL environment variable must be set")
|
||||
return 1
|
||||
|
||||
# Ensure asyncpg driver
|
||||
if database_url.startswith("postgresql://"):
|
||||
database_url = database_url.replace("postgresql://", "postgresql+asyncpg://", 1)
|
||||
|
||||
# Create async engine
|
||||
engine = create_async_engine(database_url, echo=False)
|
||||
async_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
||||
|
||||
try:
|
||||
async with async_session() as session:
|
||||
result = await seed_all(session)
|
||||
|
||||
logger.info(
|
||||
"Production batches seed completed successfully!",
|
||||
total_batches=result["total_batches_created"],
|
||||
status=result["status"]
|
||||
)
|
||||
|
||||
# Print summary
|
||||
print("\n" + "="*60)
|
||||
print("DEMO PRODUCTION BATCHES SEED SUMMARY")
|
||||
print("="*60)
|
||||
for tenant_result in result["results"]:
|
||||
tenant_id = tenant_result["tenant_id"]
|
||||
count = tenant_result["batches_created"]
|
||||
skipped = tenant_result.get("skipped", False)
|
||||
status = "SKIPPED (already exists)" if skipped else f"CREATED {count} batches"
|
||||
print(f"Tenant {tenant_id}: {status}")
|
||||
print(f"\nTotal Batches Created: {result['total_batches_created']}")
|
||||
print("="*60 + "\n")
|
||||
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Production batches seed failed: {str(e)}", exc_info=True)
|
||||
return 1
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
sys.exit(exit_code)
|
||||
@@ -1,243 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Demo Equipment Seeding Script for Production Service
|
||||
Creates production equipment for demo template tenants
|
||||
|
||||
This script runs as a Kubernetes init job inside the production-service container.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import uuid
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
# Add app to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy import select
|
||||
import structlog
|
||||
|
||||
from app.models.production import Equipment, EquipmentType, EquipmentStatus
|
||||
|
||||
# Add shared path for demo utilities
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
||||
from shared.utils.demo_dates import BASE_REFERENCE_DATE
|
||||
|
||||
# Configure logging
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_PROFESSIONAL = uuid.UUID("a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6") # Individual bakery
|
||||
DEMO_TENANT_ENTERPRISE_CHAIN = uuid.UUID("c3d4e5f6-a7b8-49c0-d1e2-f3a4b5c6d7e8") # Enterprise parent (Obrador)
|
||||
|
||||
|
||||
def load_equipment_data():
|
||||
"""Load equipment data from JSON file"""
|
||||
data_file = Path(__file__).parent / "equipos_es.json"
|
||||
if not data_file.exists():
|
||||
raise FileNotFoundError(f"Equipment data file not found: {data_file}")
|
||||
|
||||
with open(data_file, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def calculate_date_from_offset(offset_days: int) -> datetime:
|
||||
"""Calculate a date based on offset from BASE_REFERENCE_DATE"""
|
||||
return BASE_REFERENCE_DATE + timedelta(days=offset_days)
|
||||
|
||||
|
||||
async def seed_equipment_for_tenant(
|
||||
db: AsyncSession,
|
||||
tenant_id: uuid.UUID,
|
||||
tenant_name: str,
|
||||
equipment_list: list
|
||||
):
|
||||
"""Seed equipment for a specific tenant"""
|
||||
logger.info(f"Seeding equipment for: {tenant_name}", tenant_id=str(tenant_id))
|
||||
|
||||
# Check if equipment already exists
|
||||
result = await db.execute(
|
||||
select(Equipment).where(Equipment.tenant_id == tenant_id).limit(1)
|
||||
)
|
||||
existing = result.scalar_one_or_none()
|
||||
|
||||
if existing:
|
||||
logger.info(f"Equipment already exists for {tenant_name}, skipping seed")
|
||||
return {"tenant_id": str(tenant_id), "equipment_created": 0, "skipped": True}
|
||||
|
||||
count = 0
|
||||
for equip_data in equipment_list:
|
||||
# Calculate dates from offsets
|
||||
install_date = None
|
||||
if "install_date_offset_days" in equip_data:
|
||||
install_date = calculate_date_from_offset(equip_data["install_date_offset_days"])
|
||||
|
||||
last_maintenance_date = None
|
||||
if "last_maintenance_offset_days" in equip_data:
|
||||
last_maintenance_date = calculate_date_from_offset(equip_data["last_maintenance_offset_days"])
|
||||
|
||||
# Calculate next maintenance date
|
||||
next_maintenance_date = None
|
||||
if last_maintenance_date and equip_data.get("maintenance_interval_days"):
|
||||
next_maintenance_date = last_maintenance_date + timedelta(
|
||||
days=equip_data["maintenance_interval_days"]
|
||||
)
|
||||
|
||||
# Map status string to enum
|
||||
status_mapping = {
|
||||
"operational": EquipmentStatus.OPERATIONAL,
|
||||
"warning": EquipmentStatus.WARNING,
|
||||
"maintenance": EquipmentStatus.MAINTENANCE,
|
||||
"down": EquipmentStatus.DOWN
|
||||
}
|
||||
status = status_mapping.get(equip_data["status"], EquipmentStatus.OPERATIONAL)
|
||||
|
||||
# Map type string to enum
|
||||
type_mapping = {
|
||||
"oven": EquipmentType.OVEN,
|
||||
"mixer": EquipmentType.MIXER,
|
||||
"proofer": EquipmentType.PROOFER,
|
||||
"freezer": EquipmentType.FREEZER,
|
||||
"packaging": EquipmentType.PACKAGING,
|
||||
"other": EquipmentType.OTHER
|
||||
}
|
||||
equipment_type = type_mapping.get(equip_data["type"], EquipmentType.OTHER)
|
||||
|
||||
# Generate tenant-specific equipment ID using XOR transformation
|
||||
base_equipment_id = uuid.UUID(equip_data["id"])
|
||||
tenant_int = int(tenant_id.hex, 16)
|
||||
equipment_id = uuid.UUID(int=tenant_int ^ int(base_equipment_id.hex, 16))
|
||||
|
||||
# Create equipment
|
||||
equipment = Equipment(
|
||||
id=equipment_id,
|
||||
tenant_id=tenant_id,
|
||||
name=equip_data["name"],
|
||||
type=equipment_type,
|
||||
model=equip_data.get("model"),
|
||||
serial_number=equip_data.get("serial_number"),
|
||||
location=equip_data.get("location"),
|
||||
status=status,
|
||||
power_kw=equip_data.get("power_kw"),
|
||||
capacity=equip_data.get("capacity"),
|
||||
efficiency_percentage=equip_data.get("efficiency_percentage"),
|
||||
current_temperature=equip_data.get("current_temperature"),
|
||||
target_temperature=equip_data.get("target_temperature"),
|
||||
maintenance_interval_days=equip_data.get("maintenance_interval_days"),
|
||||
last_maintenance_date=last_maintenance_date,
|
||||
next_maintenance_date=next_maintenance_date,
|
||||
install_date=install_date,
|
||||
notes=equip_data.get("notes"),
|
||||
created_at=BASE_REFERENCE_DATE,
|
||||
updated_at=BASE_REFERENCE_DATE
|
||||
)
|
||||
|
||||
db.add(equipment)
|
||||
count += 1
|
||||
logger.debug(f"Created equipment: {equipment.name}", equipment_id=str(equipment.id))
|
||||
|
||||
await db.commit()
|
||||
logger.info(f"Successfully created {count} equipment items for {tenant_name}")
|
||||
|
||||
return {
|
||||
"tenant_id": str(tenant_id),
|
||||
"equipment_created": count,
|
||||
"skipped": False
|
||||
}
|
||||
|
||||
|
||||
async def seed_all(db: AsyncSession):
|
||||
"""Seed all demo tenants with equipment"""
|
||||
logger.info("Starting demo equipment seed process")
|
||||
|
||||
# Load equipment data
|
||||
data = load_equipment_data()
|
||||
|
||||
results = []
|
||||
|
||||
# Seed Professional Bakery with equipment (single location)
|
||||
result_professional = await seed_equipment_for_tenant(
|
||||
db,
|
||||
DEMO_TENANT_PROFESSIONAL,
|
||||
"Panadería Artesana Madrid (Professional)",
|
||||
data["equipos_individual_bakery"]
|
||||
)
|
||||
results.append(result_professional)
|
||||
|
||||
# Seed Enterprise Parent (central production - Obrador) with scaled-up equipment
|
||||
# Use enterprise equipment list if available, otherwise use individual bakery equipment
|
||||
enterprise_equipment_key = "equipos_enterprise_chain" if "equipos_enterprise_chain" in data else "equipos_individual_bakery"
|
||||
result_enterprise_parent = await seed_equipment_for_tenant(
|
||||
db,
|
||||
DEMO_TENANT_ENTERPRISE_CHAIN,
|
||||
"Panadería Central - Obrador Madrid (Enterprise Parent)",
|
||||
data[enterprise_equipment_key]
|
||||
)
|
||||
results.append(result_enterprise_parent)
|
||||
|
||||
total_created = sum(r["equipment_created"] for r in results)
|
||||
|
||||
return {
|
||||
"results": results,
|
||||
"total_equipment_created": total_created,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main execution function"""
|
||||
# Get database URL from environment
|
||||
database_url = os.getenv("PRODUCTION_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.error("PRODUCTION_DATABASE_URL environment variable must be set")
|
||||
return 1
|
||||
|
||||
# Ensure asyncpg driver
|
||||
if database_url.startswith("postgresql://"):
|
||||
database_url = database_url.replace("postgresql://", "postgresql+asyncpg://", 1)
|
||||
|
||||
# Create async engine
|
||||
engine = create_async_engine(database_url, echo=False)
|
||||
async_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
||||
|
||||
try:
|
||||
async with async_session() as session:
|
||||
result = await seed_all(session)
|
||||
|
||||
logger.info(
|
||||
"Equipment seed completed successfully!",
|
||||
total_equipment=result["total_equipment_created"],
|
||||
status=result["status"]
|
||||
)
|
||||
|
||||
# Print summary
|
||||
print("\n" + "="*60)
|
||||
print("DEMO EQUIPMENT SEED SUMMARY")
|
||||
print("="*60)
|
||||
for tenant_result in result["results"]:
|
||||
tenant_id = tenant_result["tenant_id"]
|
||||
count = tenant_result["equipment_created"]
|
||||
skipped = tenant_result.get("skipped", False)
|
||||
status = "SKIPPED (already exists)" if skipped else f"CREATED {count} items"
|
||||
print(f"Tenant {tenant_id}: {status}")
|
||||
print(f"\nTotal Equipment Created: {result['total_equipment_created']}")
|
||||
print("="*60 + "\n")
|
||||
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Equipment seed failed: {str(e)}", exc_info=True)
|
||||
return 1
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
sys.exit(exit_code)
|
||||
@@ -1,218 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Demo Quality Templates Seeding Script for Production Service
|
||||
Creates quality check templates for demo template tenants
|
||||
|
||||
This script runs as a Kubernetes init job inside the production-service container.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import uuid
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
# Add app to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy import select
|
||||
import structlog
|
||||
|
||||
from app.models.production import QualityCheckTemplate
|
||||
|
||||
# Add shared path for demo utilities
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
||||
from shared.utils.demo_dates import BASE_REFERENCE_DATE
|
||||
|
||||
# Configure logging
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_PROFESSIONAL = uuid.UUID("a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6") # Individual bakery
|
||||
DEMO_TENANT_ENTERPRISE_CHAIN = uuid.UUID("c3d4e5f6-a7b8-49c0-d1e2-f3a4b5c6d7e8") # Enterprise parent (Obrador)
|
||||
|
||||
# System user ID (first admin user from auth service)
|
||||
SYSTEM_USER_ID = uuid.UUID("50000000-0000-0000-0000-000000000004")
|
||||
|
||||
|
||||
def load_quality_templates_data():
|
||||
"""Load quality templates data from JSON file"""
|
||||
data_file = Path(__file__).parent / "plantillas_calidad_es.json"
|
||||
if not data_file.exists():
|
||||
raise FileNotFoundError(f"Quality templates data file not found: {data_file}")
|
||||
|
||||
with open(data_file, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
# Model uses simple strings, no need for enum mapping functions
|
||||
|
||||
|
||||
async def seed_quality_templates_for_tenant(
|
||||
db: AsyncSession,
|
||||
tenant_id: uuid.UUID,
|
||||
tenant_name: str,
|
||||
templates_list: list
|
||||
):
|
||||
"""Seed quality templates for a specific tenant"""
|
||||
logger.info(f"Seeding quality templates for: {tenant_name}", tenant_id=str(tenant_id))
|
||||
|
||||
# Check if templates already exist
|
||||
result = await db.execute(
|
||||
select(QualityCheckTemplate).where(QualityCheckTemplate.tenant_id == tenant_id).limit(1)
|
||||
)
|
||||
existing = result.scalar_one_or_none()
|
||||
|
||||
if existing:
|
||||
logger.info(f"Quality templates already exist for {tenant_name}, skipping seed")
|
||||
return {"tenant_id": str(tenant_id), "templates_created": 0, "skipped": True}
|
||||
|
||||
count = 0
|
||||
for template_data in templates_list:
|
||||
# Use strings directly (model doesn't use enums)
|
||||
check_type = template_data["check_type"]
|
||||
applicable_stages = template_data.get("applicable_stages", [])
|
||||
|
||||
# For San Pablo, use original IDs. For La Espiga, generate new UUIDs
|
||||
if tenant_id == DEMO_TENANT_PROFESSIONAL:
|
||||
template_id = uuid.UUID(template_data["id"])
|
||||
else:
|
||||
# Generate deterministic UUID for La Espiga based on original ID
|
||||
base_uuid = uuid.UUID(template_data["id"])
|
||||
# Add a fixed offset to create a unique but deterministic ID
|
||||
template_id = uuid.UUID(int=base_uuid.int + 0x10000000000000000000000000000000)
|
||||
|
||||
# Create quality check template
|
||||
template = QualityCheckTemplate(
|
||||
id=template_id,
|
||||
tenant_id=tenant_id,
|
||||
name=template_data["name"],
|
||||
template_code=template_data["template_code"],
|
||||
check_type=check_type,
|
||||
category=template_data.get("category"),
|
||||
description=template_data.get("description"),
|
||||
instructions=template_data.get("instructions"),
|
||||
parameters=template_data.get("parameters"),
|
||||
thresholds=template_data.get("thresholds"),
|
||||
scoring_criteria=template_data.get("scoring_criteria"),
|
||||
is_active=template_data.get("is_active", True),
|
||||
is_required=template_data.get("is_required", False),
|
||||
is_critical=template_data.get("is_critical", False),
|
||||
weight=template_data.get("weight", 1.0),
|
||||
min_value=template_data.get("min_value"),
|
||||
max_value=template_data.get("max_value"),
|
||||
target_value=template_data.get("target_value"),
|
||||
unit=template_data.get("unit"),
|
||||
tolerance_percentage=template_data.get("tolerance_percentage"),
|
||||
applicable_stages=applicable_stages,
|
||||
created_by=SYSTEM_USER_ID,
|
||||
created_at=BASE_REFERENCE_DATE,
|
||||
updated_at=BASE_REFERENCE_DATE
|
||||
)
|
||||
|
||||
db.add(template)
|
||||
count += 1
|
||||
logger.debug(f"Created quality template: {template.name}", template_id=str(template.id))
|
||||
|
||||
await db.commit()
|
||||
logger.info(f"Successfully created {count} quality templates for {tenant_name}")
|
||||
|
||||
return {
|
||||
"tenant_id": str(tenant_id),
|
||||
"templates_created": count,
|
||||
"skipped": False
|
||||
}
|
||||
|
||||
|
||||
async def seed_all(db: AsyncSession):
|
||||
"""Seed all demo tenants with quality templates"""
|
||||
logger.info("Starting demo quality templates seed process")
|
||||
|
||||
# Load quality templates data
|
||||
data = load_quality_templates_data()
|
||||
|
||||
results = []
|
||||
|
||||
# Seed Professional Bakery with quality templates (single location)
|
||||
result_professional = await seed_quality_templates_for_tenant(
|
||||
db,
|
||||
DEMO_TENANT_PROFESSIONAL,
|
||||
"Panadería Artesana Madrid (Professional)",
|
||||
data["plantillas_calidad"]
|
||||
)
|
||||
results.append(result_professional)
|
||||
|
||||
# Seed Enterprise Parent (central production - Obrador) with same quality templates
|
||||
result_enterprise_parent = await seed_quality_templates_for_tenant(
|
||||
db,
|
||||
DEMO_TENANT_ENTERPRISE_CHAIN,
|
||||
"Panadería Central - Obrador Madrid (Enterprise Parent)",
|
||||
data["plantillas_calidad"]
|
||||
)
|
||||
results.append(result_enterprise_parent)
|
||||
|
||||
total_created = sum(r["templates_created"] for r in results)
|
||||
|
||||
return {
|
||||
"results": results,
|
||||
"total_templates_created": total_created,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main execution function"""
|
||||
# Get database URL from environment
|
||||
database_url = os.getenv("PRODUCTION_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.error("PRODUCTION_DATABASE_URL environment variable must be set")
|
||||
return 1
|
||||
|
||||
# Ensure asyncpg driver
|
||||
if database_url.startswith("postgresql://"):
|
||||
database_url = database_url.replace("postgresql://", "postgresql+asyncpg://", 1)
|
||||
|
||||
# Create async engine
|
||||
engine = create_async_engine(database_url, echo=False)
|
||||
async_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
||||
|
||||
try:
|
||||
async with async_session() as session:
|
||||
result = await seed_all(session)
|
||||
|
||||
logger.info(
|
||||
"Quality templates seed completed successfully!",
|
||||
total_templates=result["total_templates_created"],
|
||||
status=result["status"]
|
||||
)
|
||||
|
||||
# Print summary
|
||||
print("\n" + "="*60)
|
||||
print("DEMO QUALITY TEMPLATES SEED SUMMARY")
|
||||
print("="*60)
|
||||
for tenant_result in result["results"]:
|
||||
tenant_id = tenant_result["tenant_id"]
|
||||
count = tenant_result["templates_created"]
|
||||
skipped = tenant_result.get("skipped", False)
|
||||
status = "SKIPPED (already exists)" if skipped else f"CREATED {count} templates"
|
||||
print(f"Tenant {tenant_id}: {status}")
|
||||
print(f"\nTotal Templates Created: {result['total_templates_created']}")
|
||||
print("="*60 + "\n")
|
||||
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Quality templates seed failed: {str(e)}", exc_info=True)
|
||||
return 1
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
sys.exit(exit_code)
|
||||
Reference in New Issue
Block a user