524 lines
21 KiB
Python
524 lines
21 KiB
Python
"""
|
|
Internal Demo Cloning API for Production Service
|
|
Service-to-service endpoint for cloning production data
|
|
"""
|
|
|
|
from fastapi import APIRouter, Depends, HTTPException, Header
|
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
from sqlalchemy import select
|
|
import structlog
|
|
import uuid
|
|
from datetime import datetime, timezone, timedelta
|
|
from typing import Optional, Dict, Any
|
|
import os
|
|
|
|
from app.core.database import get_db
|
|
from app.models.production import (
|
|
ProductionBatch, ProductionSchedule, ProductionCapacity,
|
|
QualityCheckTemplate, QualityCheck, Equipment,
|
|
ProductionStatus, ProductionPriority, ProcessStage,
|
|
EquipmentStatus, EquipmentType
|
|
)
|
|
from shared.utils.demo_dates import adjust_date_for_demo, BASE_REFERENCE_DATE
|
|
from shared.utils.alert_generator import generate_equipment_alerts
|
|
from shared.messaging.rabbitmq import RabbitMQClient
|
|
|
|
logger = structlog.get_logger()
|
|
router = APIRouter(prefix="/internal/demo", tags=["internal"])
|
|
|
|
# Internal API key for service-to-service auth
|
|
INTERNAL_API_KEY = os.getenv("INTERNAL_API_KEY", "dev-internal-key-change-in-production")
|
|
|
|
# Base demo tenant IDs
|
|
DEMO_TENANT_SAN_PABLO = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
|
DEMO_TENANT_LA_ESPIGA = "b2c3d4e5-f6a7-48b9-c0d1-e2f3a4b5c6d7"
|
|
|
|
|
|
def verify_internal_api_key(x_internal_api_key: Optional[str] = Header(None)):
|
|
"""Verify internal API key for service-to-service communication"""
|
|
if x_internal_api_key != INTERNAL_API_KEY:
|
|
logger.warning("Unauthorized internal API access attempted")
|
|
raise HTTPException(status_code=403, detail="Invalid internal API key")
|
|
return True
|
|
|
|
|
|
@router.post("/clone")
|
|
async def clone_demo_data(
|
|
base_tenant_id: str,
|
|
virtual_tenant_id: str,
|
|
demo_account_type: str,
|
|
session_id: Optional[str] = None,
|
|
session_created_at: Optional[str] = None,
|
|
db: AsyncSession = Depends(get_db),
|
|
_: bool = Depends(verify_internal_api_key)
|
|
):
|
|
"""
|
|
Clone production service data for a virtual demo tenant
|
|
|
|
Clones:
|
|
- Production batches (historical production runs)
|
|
- Production schedules (daily planning)
|
|
- Production capacity records
|
|
- Quality check templates
|
|
- Quality checks (inspection records)
|
|
- Equipment (machines and tools)
|
|
|
|
Args:
|
|
base_tenant_id: Template tenant UUID to clone from
|
|
virtual_tenant_id: Target virtual tenant UUID
|
|
demo_account_type: Type of demo account
|
|
session_id: Originating session ID for tracing
|
|
session_created_at: Session creation timestamp for date adjustment
|
|
|
|
Returns:
|
|
Cloning status and record counts
|
|
"""
|
|
start_time = datetime.now(timezone.utc)
|
|
|
|
# Parse session creation time for date adjustment
|
|
if session_created_at:
|
|
try:
|
|
session_time = datetime.fromisoformat(session_created_at.replace('Z', '+00:00'))
|
|
except (ValueError, AttributeError):
|
|
session_time = start_time
|
|
else:
|
|
session_time = start_time
|
|
|
|
logger.info(
|
|
"Starting production data cloning",
|
|
base_tenant_id=base_tenant_id,
|
|
virtual_tenant_id=virtual_tenant_id,
|
|
demo_account_type=demo_account_type,
|
|
session_id=session_id,
|
|
session_created_at=session_created_at
|
|
)
|
|
|
|
try:
|
|
# Validate UUIDs
|
|
base_uuid = uuid.UUID(base_tenant_id)
|
|
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
|
|
|
# Track cloning statistics
|
|
stats = {
|
|
"production_batches": 0,
|
|
"production_schedules": 0,
|
|
"production_capacity": 0,
|
|
"quality_check_templates": 0,
|
|
"quality_checks": 0,
|
|
"equipment": 0,
|
|
"alerts_generated": 0
|
|
}
|
|
|
|
# ID mappings
|
|
batch_id_map = {}
|
|
template_id_map = {}
|
|
equipment_id_map = {}
|
|
|
|
# Clone Equipment first (no dependencies)
|
|
result = await db.execute(
|
|
select(Equipment).where(Equipment.tenant_id == base_uuid)
|
|
)
|
|
base_equipment = result.scalars().all()
|
|
|
|
logger.info(
|
|
"Found equipment to clone",
|
|
count=len(base_equipment),
|
|
base_tenant=str(base_uuid)
|
|
)
|
|
|
|
for equipment in base_equipment:
|
|
new_equipment_id = uuid.uuid4()
|
|
equipment_id_map[equipment.id] = new_equipment_id
|
|
|
|
# Adjust dates relative to session creation time
|
|
adjusted_install_date = adjust_date_for_demo(
|
|
equipment.install_date, session_time, BASE_REFERENCE_DATE
|
|
)
|
|
adjusted_last_maintenance = adjust_date_for_demo(
|
|
equipment.last_maintenance_date, session_time, BASE_REFERENCE_DATE
|
|
)
|
|
adjusted_next_maintenance = adjust_date_for_demo(
|
|
equipment.next_maintenance_date, session_time, BASE_REFERENCE_DATE
|
|
)
|
|
|
|
new_equipment = Equipment(
|
|
id=new_equipment_id,
|
|
tenant_id=virtual_uuid,
|
|
name=equipment.name,
|
|
type=equipment.type,
|
|
model=equipment.model,
|
|
serial_number=equipment.serial_number,
|
|
location=equipment.location,
|
|
status=equipment.status,
|
|
install_date=adjusted_install_date,
|
|
last_maintenance_date=adjusted_last_maintenance,
|
|
next_maintenance_date=adjusted_next_maintenance,
|
|
maintenance_interval_days=equipment.maintenance_interval_days,
|
|
efficiency_percentage=equipment.efficiency_percentage,
|
|
uptime_percentage=equipment.uptime_percentage,
|
|
energy_usage_kwh=equipment.energy_usage_kwh,
|
|
power_kw=equipment.power_kw,
|
|
capacity=equipment.capacity,
|
|
weight_kg=equipment.weight_kg,
|
|
current_temperature=equipment.current_temperature,
|
|
target_temperature=equipment.target_temperature,
|
|
is_active=equipment.is_active,
|
|
notes=equipment.notes,
|
|
created_at=session_time,
|
|
updated_at=session_time
|
|
)
|
|
db.add(new_equipment)
|
|
stats["equipment"] += 1
|
|
|
|
# Flush to get equipment IDs
|
|
await db.flush()
|
|
|
|
# Clone Quality Check Templates
|
|
result = await db.execute(
|
|
select(QualityCheckTemplate).where(QualityCheckTemplate.tenant_id == base_uuid)
|
|
)
|
|
base_templates = result.scalars().all()
|
|
|
|
logger.info(
|
|
"Found quality check templates to clone",
|
|
count=len(base_templates),
|
|
base_tenant=str(base_uuid)
|
|
)
|
|
|
|
for template in base_templates:
|
|
new_template_id = uuid.uuid4()
|
|
template_id_map[template.id] = new_template_id
|
|
|
|
new_template = QualityCheckTemplate(
|
|
id=new_template_id,
|
|
tenant_id=virtual_uuid,
|
|
name=template.name,
|
|
template_code=template.template_code,
|
|
check_type=template.check_type,
|
|
category=template.category,
|
|
description=template.description,
|
|
instructions=template.instructions,
|
|
parameters=template.parameters,
|
|
thresholds=template.thresholds,
|
|
scoring_criteria=template.scoring_criteria,
|
|
is_active=template.is_active,
|
|
is_required=template.is_required,
|
|
is_critical=template.is_critical,
|
|
weight=template.weight,
|
|
min_value=template.min_value,
|
|
max_value=template.max_value,
|
|
target_value=template.target_value,
|
|
unit=template.unit,
|
|
tolerance_percentage=template.tolerance_percentage,
|
|
applicable_stages=template.applicable_stages,
|
|
created_by=template.created_by,
|
|
created_at=session_time,
|
|
updated_at=session_time
|
|
)
|
|
db.add(new_template)
|
|
stats["quality_check_templates"] += 1
|
|
|
|
# Flush to get template IDs
|
|
await db.flush()
|
|
|
|
# Clone Production Batches
|
|
result = await db.execute(
|
|
select(ProductionBatch).where(ProductionBatch.tenant_id == base_uuid)
|
|
)
|
|
base_batches = result.scalars().all()
|
|
|
|
logger.info(
|
|
"Found production batches to clone",
|
|
count=len(base_batches),
|
|
base_tenant=str(base_uuid)
|
|
)
|
|
|
|
# Calculate date offset to make production recent
|
|
if base_batches:
|
|
max_date = max(batch.planned_start_time for batch in base_batches if batch.planned_start_time)
|
|
today = datetime.now(timezone.utc)
|
|
date_offset = today - max_date
|
|
else:
|
|
date_offset = timedelta(days=0)
|
|
|
|
for batch in base_batches:
|
|
new_batch_id = uuid.uuid4()
|
|
batch_id_map[batch.id] = new_batch_id
|
|
|
|
new_batch = ProductionBatch(
|
|
id=new_batch_id,
|
|
tenant_id=virtual_uuid,
|
|
batch_number=f"BATCH-{uuid.uuid4().hex[:8].upper()}", # New batch number
|
|
product_id=batch.product_id, # Keep product reference
|
|
product_name=batch.product_name,
|
|
recipe_id=batch.recipe_id, # Keep recipe reference
|
|
planned_start_time=batch.planned_start_time + date_offset if batch.planned_start_time else None,
|
|
planned_end_time=batch.planned_end_time + date_offset if batch.planned_end_time else None,
|
|
planned_quantity=batch.planned_quantity,
|
|
planned_duration_minutes=batch.planned_duration_minutes,
|
|
actual_start_time=batch.actual_start_time + date_offset if batch.actual_start_time else None,
|
|
actual_end_time=batch.actual_end_time + date_offset if batch.actual_end_time else None,
|
|
actual_quantity=batch.actual_quantity,
|
|
actual_duration_minutes=batch.actual_duration_minutes,
|
|
status=batch.status,
|
|
priority=batch.priority,
|
|
current_process_stage=batch.current_process_stage,
|
|
process_stage_history=batch.process_stage_history,
|
|
pending_quality_checks=batch.pending_quality_checks,
|
|
completed_quality_checks=batch.completed_quality_checks,
|
|
estimated_cost=batch.estimated_cost,
|
|
actual_cost=batch.actual_cost,
|
|
labor_cost=batch.labor_cost,
|
|
material_cost=batch.material_cost,
|
|
overhead_cost=batch.overhead_cost,
|
|
yield_percentage=batch.yield_percentage,
|
|
quality_score=batch.quality_score,
|
|
waste_quantity=batch.waste_quantity,
|
|
defect_quantity=batch.defect_quantity,
|
|
equipment_used=batch.equipment_used,
|
|
staff_assigned=batch.staff_assigned,
|
|
station_id=batch.station_id,
|
|
order_id=batch.order_id,
|
|
forecast_id=batch.forecast_id,
|
|
is_rush_order=batch.is_rush_order,
|
|
is_special_recipe=batch.is_special_recipe,
|
|
production_notes=batch.production_notes,
|
|
quality_notes=batch.quality_notes,
|
|
delay_reason=batch.delay_reason,
|
|
cancellation_reason=batch.cancellation_reason,
|
|
created_at=datetime.now(timezone.utc),
|
|
updated_at=datetime.now(timezone.utc),
|
|
completed_at=batch.completed_at + date_offset if batch.completed_at else None
|
|
)
|
|
db.add(new_batch)
|
|
stats["production_batches"] += 1
|
|
|
|
# Flush to get batch IDs
|
|
await db.flush()
|
|
|
|
# Clone Quality Checks
|
|
result = await db.execute(
|
|
select(QualityCheck).where(QualityCheck.tenant_id == base_uuid)
|
|
)
|
|
base_checks = result.scalars().all()
|
|
|
|
logger.info(
|
|
"Found quality checks to clone",
|
|
count=len(base_checks),
|
|
base_tenant=str(base_uuid)
|
|
)
|
|
|
|
for check in base_checks:
|
|
new_batch_id = batch_id_map.get(check.batch_id, check.batch_id)
|
|
new_template_id = template_id_map.get(check.template_id, check.template_id) if check.template_id else None
|
|
|
|
new_check = QualityCheck(
|
|
id=uuid.uuid4(),
|
|
tenant_id=virtual_uuid,
|
|
batch_id=new_batch_id,
|
|
template_id=new_template_id,
|
|
check_type=check.check_type,
|
|
process_stage=check.process_stage,
|
|
check_time=check.check_time + date_offset,
|
|
checker_id=check.checker_id,
|
|
quality_score=check.quality_score,
|
|
pass_fail=check.pass_fail,
|
|
defect_count=check.defect_count,
|
|
defect_types=check.defect_types,
|
|
measured_weight=check.measured_weight,
|
|
measured_temperature=check.measured_temperature,
|
|
measured_moisture=check.measured_moisture,
|
|
measured_dimensions=check.measured_dimensions,
|
|
stage_specific_data=check.stage_specific_data,
|
|
target_weight=check.target_weight,
|
|
target_temperature=check.target_temperature,
|
|
target_moisture=check.target_moisture,
|
|
tolerance_percentage=check.tolerance_percentage,
|
|
within_tolerance=check.within_tolerance,
|
|
corrective_action_needed=check.corrective_action_needed,
|
|
corrective_actions=check.corrective_actions,
|
|
template_results=check.template_results,
|
|
criteria_scores=check.criteria_scores,
|
|
check_notes=check.check_notes,
|
|
photos_urls=check.photos_urls,
|
|
certificate_url=check.certificate_url,
|
|
created_at=datetime.now(timezone.utc),
|
|
updated_at=datetime.now(timezone.utc)
|
|
)
|
|
db.add(new_check)
|
|
stats["quality_checks"] += 1
|
|
|
|
# Clone Production Schedules
|
|
result = await db.execute(
|
|
select(ProductionSchedule).where(ProductionSchedule.tenant_id == base_uuid)
|
|
)
|
|
base_schedules = result.scalars().all()
|
|
|
|
logger.info(
|
|
"Found production schedules to clone",
|
|
count=len(base_schedules),
|
|
base_tenant=str(base_uuid)
|
|
)
|
|
|
|
for schedule in base_schedules:
|
|
new_schedule = ProductionSchedule(
|
|
id=uuid.uuid4(),
|
|
tenant_id=virtual_uuid,
|
|
schedule_date=schedule.schedule_date + date_offset,
|
|
shift_start=schedule.shift_start + date_offset,
|
|
shift_end=schedule.shift_end + date_offset,
|
|
total_capacity_hours=schedule.total_capacity_hours,
|
|
planned_capacity_hours=schedule.planned_capacity_hours,
|
|
actual_capacity_hours=schedule.actual_capacity_hours,
|
|
overtime_hours=schedule.overtime_hours,
|
|
staff_count=schedule.staff_count,
|
|
equipment_capacity=schedule.equipment_capacity,
|
|
station_assignments=schedule.station_assignments,
|
|
total_batches_planned=schedule.total_batches_planned,
|
|
total_batches_completed=schedule.total_batches_completed,
|
|
total_quantity_planned=schedule.total_quantity_planned,
|
|
total_quantity_produced=schedule.total_quantity_produced,
|
|
is_finalized=schedule.is_finalized,
|
|
is_active=schedule.is_active,
|
|
efficiency_percentage=schedule.efficiency_percentage,
|
|
utilization_percentage=schedule.utilization_percentage,
|
|
on_time_completion_rate=schedule.on_time_completion_rate,
|
|
schedule_notes=schedule.schedule_notes,
|
|
schedule_adjustments=schedule.schedule_adjustments,
|
|
created_at=datetime.now(timezone.utc),
|
|
updated_at=datetime.now(timezone.utc),
|
|
finalized_at=schedule.finalized_at + date_offset if schedule.finalized_at else None
|
|
)
|
|
db.add(new_schedule)
|
|
stats["production_schedules"] += 1
|
|
|
|
# Clone Production Capacity
|
|
result = await db.execute(
|
|
select(ProductionCapacity).where(ProductionCapacity.tenant_id == base_uuid)
|
|
)
|
|
base_capacity = result.scalars().all()
|
|
|
|
for capacity in base_capacity:
|
|
new_capacity = ProductionCapacity(
|
|
id=uuid.uuid4(),
|
|
tenant_id=virtual_uuid,
|
|
resource_type=capacity.resource_type,
|
|
resource_id=capacity.resource_id,
|
|
resource_name=capacity.resource_name,
|
|
date=capacity.date + date_offset,
|
|
start_time=capacity.start_time + date_offset,
|
|
end_time=capacity.end_time + date_offset,
|
|
total_capacity_units=capacity.total_capacity_units,
|
|
allocated_capacity_units=capacity.allocated_capacity_units,
|
|
remaining_capacity_units=capacity.remaining_capacity_units,
|
|
is_available=capacity.is_available,
|
|
is_maintenance=capacity.is_maintenance,
|
|
is_reserved=capacity.is_reserved,
|
|
equipment_type=capacity.equipment_type,
|
|
max_batch_size=capacity.max_batch_size,
|
|
min_batch_size=capacity.min_batch_size,
|
|
setup_time_minutes=capacity.setup_time_minutes,
|
|
cleanup_time_minutes=capacity.cleanup_time_minutes,
|
|
efficiency_rating=capacity.efficiency_rating,
|
|
maintenance_status=capacity.maintenance_status,
|
|
last_maintenance_date=capacity.last_maintenance_date + date_offset if capacity.last_maintenance_date else None,
|
|
notes=capacity.notes,
|
|
restrictions=capacity.restrictions,
|
|
created_at=datetime.now(timezone.utc),
|
|
updated_at=datetime.now(timezone.utc)
|
|
)
|
|
db.add(new_capacity)
|
|
stats["production_capacity"] += 1
|
|
|
|
# Commit cloned data first
|
|
await db.commit()
|
|
|
|
# Generate equipment maintenance and status alerts with RabbitMQ publishing
|
|
rabbitmq_client = None
|
|
try:
|
|
# Initialize RabbitMQ client for alert publishing
|
|
rabbitmq_host = os.getenv("RABBITMQ_HOST", "rabbitmq-service")
|
|
rabbitmq_user = os.getenv("RABBITMQ_USER", "bakery")
|
|
rabbitmq_password = os.getenv("RABBITMQ_PASSWORD", "forecast123")
|
|
rabbitmq_port = os.getenv("RABBITMQ_PORT", "5672")
|
|
rabbitmq_vhost = os.getenv("RABBITMQ_VHOST", "/")
|
|
rabbitmq_url = f"amqp://{rabbitmq_user}:{rabbitmq_password}@{rabbitmq_host}:{rabbitmq_port}{rabbitmq_vhost}"
|
|
|
|
rabbitmq_client = RabbitMQClient(rabbitmq_url, service_name="production")
|
|
await rabbitmq_client.connect()
|
|
|
|
# Generate alerts and publish to RabbitMQ
|
|
alerts_count = await generate_equipment_alerts(
|
|
db,
|
|
virtual_uuid,
|
|
session_time,
|
|
rabbitmq_client=rabbitmq_client
|
|
)
|
|
stats["alerts_generated"] += alerts_count
|
|
await db.commit()
|
|
logger.info(f"Generated {alerts_count} equipment alerts")
|
|
except Exception as alert_error:
|
|
logger.warning(f"Alert generation failed: {alert_error}", exc_info=True)
|
|
finally:
|
|
# Clean up RabbitMQ connection
|
|
if rabbitmq_client:
|
|
try:
|
|
await rabbitmq_client.disconnect()
|
|
except Exception as cleanup_error:
|
|
logger.warning(f"Error disconnecting RabbitMQ: {cleanup_error}")
|
|
|
|
total_records = sum(stats.values())
|
|
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
|
|
|
logger.info(
|
|
"Production data cloning completed",
|
|
virtual_tenant_id=virtual_tenant_id,
|
|
total_records=total_records,
|
|
stats=stats,
|
|
duration_ms=duration_ms
|
|
)
|
|
|
|
return {
|
|
"service": "production",
|
|
"status": "completed",
|
|
"records_cloned": total_records,
|
|
"duration_ms": duration_ms,
|
|
"details": stats
|
|
}
|
|
|
|
except ValueError as e:
|
|
logger.error("Invalid UUID format", error=str(e))
|
|
raise HTTPException(status_code=400, detail=f"Invalid UUID: {str(e)}")
|
|
|
|
except Exception as e:
|
|
logger.error(
|
|
"Failed to clone production data",
|
|
error=str(e),
|
|
virtual_tenant_id=virtual_tenant_id,
|
|
exc_info=True
|
|
)
|
|
|
|
# Rollback on error
|
|
await db.rollback()
|
|
|
|
return {
|
|
"service": "production",
|
|
"status": "failed",
|
|
"records_cloned": 0,
|
|
"duration_ms": int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000),
|
|
"error": str(e)
|
|
}
|
|
|
|
|
|
@router.get("/clone/health")
|
|
async def clone_health_check(_: bool = Depends(verify_internal_api_key)):
|
|
"""
|
|
Health check for internal cloning endpoint
|
|
Used by orchestrator to verify service availability
|
|
"""
|
|
return {
|
|
"service": "production",
|
|
"clone_endpoint": "available",
|
|
"version": "2.0.0"
|
|
}
|