Files
bakery-ia/services/production/app/api/internal_demo.py

546 lines
22 KiB
Python
Raw Normal View History

"""
Internal Demo Cloning API for Production Service
Service-to-service endpoint for cloning production data
"""
from fastapi import APIRouter, Depends, HTTPException, Header
from sqlalchemy.ext.asyncio import AsyncSession
2025-10-24 13:05:04 +02:00
from sqlalchemy import select, delete, func
import structlog
import uuid
from datetime import datetime, timezone, timedelta
2025-10-17 07:31:14 +02:00
from typing import Optional, Dict, Any
import os
from app.core.database import get_db
from app.models.production import (
ProductionBatch, ProductionSchedule, ProductionCapacity,
QualityCheckTemplate, QualityCheck, Equipment,
ProductionStatus, ProductionPriority, ProcessStage,
EquipmentStatus, EquipmentType
)
2025-10-17 07:31:14 +02:00
from shared.utils.demo_dates import adjust_date_for_demo, BASE_REFERENCE_DATE
logger = structlog.get_logger()
router = APIRouter(prefix="/internal/demo", tags=["internal"])
# Internal API key for service-to-service auth
INTERNAL_API_KEY = os.getenv("INTERNAL_API_KEY", "dev-internal-key-change-in-production")
# Base demo tenant IDs
DEMO_TENANT_SAN_PABLO = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
DEMO_TENANT_LA_ESPIGA = "b2c3d4e5-f6a7-48b9-c0d1-e2f3a4b5c6d7"
def verify_internal_api_key(x_internal_api_key: Optional[str] = Header(None)):
"""Verify internal API key for service-to-service communication"""
if x_internal_api_key != INTERNAL_API_KEY:
logger.warning("Unauthorized internal API access attempted")
raise HTTPException(status_code=403, detail="Invalid internal API key")
return True
@router.post("/clone")
async def clone_demo_data(
base_tenant_id: str,
virtual_tenant_id: str,
demo_account_type: str,
session_id: Optional[str] = None,
2025-10-17 07:31:14 +02:00
session_created_at: Optional[str] = None,
db: AsyncSession = Depends(get_db),
_: bool = Depends(verify_internal_api_key)
):
"""
Clone production service data for a virtual demo tenant
Clones:
- Production batches (historical production runs)
- Production schedules (daily planning)
- Production capacity records
- Quality check templates
- Quality checks (inspection records)
- Equipment (machines and tools)
Args:
base_tenant_id: Template tenant UUID to clone from
virtual_tenant_id: Target virtual tenant UUID
demo_account_type: Type of demo account
session_id: Originating session ID for tracing
2025-10-17 07:31:14 +02:00
session_created_at: Session creation timestamp for date adjustment
Returns:
Cloning status and record counts
"""
start_time = datetime.now(timezone.utc)
2025-10-17 07:31:14 +02:00
# Parse session creation time for date adjustment
if session_created_at:
try:
session_time = datetime.fromisoformat(session_created_at.replace('Z', '+00:00'))
except (ValueError, AttributeError):
session_time = start_time
else:
session_time = start_time
logger.info(
"Starting production data cloning",
base_tenant_id=base_tenant_id,
virtual_tenant_id=virtual_tenant_id,
demo_account_type=demo_account_type,
2025-10-17 07:31:14 +02:00
session_id=session_id,
session_created_at=session_created_at
)
try:
# Validate UUIDs
base_uuid = uuid.UUID(base_tenant_id)
virtual_uuid = uuid.UUID(virtual_tenant_id)
# Track cloning statistics
stats = {
"production_batches": 0,
"production_schedules": 0,
"production_capacity": 0,
"quality_check_templates": 0,
"quality_checks": 0,
2025-10-17 07:31:14 +02:00
"equipment": 0,
"alerts_generated": 0
}
# ID mappings
batch_id_map = {}
template_id_map = {}
equipment_id_map = {}
# Clone Equipment first (no dependencies)
result = await db.execute(
select(Equipment).where(Equipment.tenant_id == base_uuid)
)
base_equipment = result.scalars().all()
logger.info(
"Found equipment to clone",
count=len(base_equipment),
base_tenant=str(base_uuid)
)
for equipment in base_equipment:
new_equipment_id = uuid.uuid4()
equipment_id_map[equipment.id] = new_equipment_id
2025-10-17 07:31:14 +02:00
# Adjust dates relative to session creation time
adjusted_install_date = adjust_date_for_demo(
equipment.install_date, session_time, BASE_REFERENCE_DATE
)
adjusted_last_maintenance = adjust_date_for_demo(
equipment.last_maintenance_date, session_time, BASE_REFERENCE_DATE
)
adjusted_next_maintenance = adjust_date_for_demo(
equipment.next_maintenance_date, session_time, BASE_REFERENCE_DATE
)
new_equipment = Equipment(
id=new_equipment_id,
tenant_id=virtual_uuid,
name=equipment.name,
type=equipment.type,
model=equipment.model,
serial_number=equipment.serial_number,
location=equipment.location,
status=equipment.status,
2025-10-17 07:31:14 +02:00
install_date=adjusted_install_date,
last_maintenance_date=adjusted_last_maintenance,
next_maintenance_date=adjusted_next_maintenance,
maintenance_interval_days=equipment.maintenance_interval_days,
efficiency_percentage=equipment.efficiency_percentage,
uptime_percentage=equipment.uptime_percentage,
energy_usage_kwh=equipment.energy_usage_kwh,
power_kw=equipment.power_kw,
capacity=equipment.capacity,
weight_kg=equipment.weight_kg,
current_temperature=equipment.current_temperature,
target_temperature=equipment.target_temperature,
is_active=equipment.is_active,
notes=equipment.notes,
2025-10-17 07:31:14 +02:00
created_at=session_time,
updated_at=session_time
)
db.add(new_equipment)
stats["equipment"] += 1
# Flush to get equipment IDs
await db.flush()
# Clone Quality Check Templates
result = await db.execute(
select(QualityCheckTemplate).where(QualityCheckTemplate.tenant_id == base_uuid)
)
base_templates = result.scalars().all()
logger.info(
"Found quality check templates to clone",
count=len(base_templates),
base_tenant=str(base_uuid)
)
for template in base_templates:
new_template_id = uuid.uuid4()
template_id_map[template.id] = new_template_id
new_template = QualityCheckTemplate(
id=new_template_id,
tenant_id=virtual_uuid,
name=template.name,
template_code=template.template_code,
check_type=template.check_type,
category=template.category,
description=template.description,
instructions=template.instructions,
parameters=template.parameters,
thresholds=template.thresholds,
scoring_criteria=template.scoring_criteria,
is_active=template.is_active,
is_required=template.is_required,
is_critical=template.is_critical,
weight=template.weight,
min_value=template.min_value,
max_value=template.max_value,
target_value=template.target_value,
unit=template.unit,
tolerance_percentage=template.tolerance_percentage,
applicable_stages=template.applicable_stages,
created_by=template.created_by,
2025-10-17 07:31:14 +02:00
created_at=session_time,
updated_at=session_time
)
db.add(new_template)
stats["quality_check_templates"] += 1
# Flush to get template IDs
await db.flush()
# Clone Production Batches
result = await db.execute(
select(ProductionBatch).where(ProductionBatch.tenant_id == base_uuid)
)
base_batches = result.scalars().all()
logger.info(
"Found production batches to clone",
count=len(base_batches),
base_tenant=str(base_uuid)
)
# Calculate date offset to make production recent
if base_batches:
max_date = max(batch.planned_start_time for batch in base_batches if batch.planned_start_time)
today = datetime.now(timezone.utc)
date_offset = today - max_date
else:
date_offset = timedelta(days=0)
for batch in base_batches:
new_batch_id = uuid.uuid4()
batch_id_map[batch.id] = new_batch_id
new_batch = ProductionBatch(
id=new_batch_id,
tenant_id=virtual_uuid,
batch_number=f"BATCH-{uuid.uuid4().hex[:8].upper()}", # New batch number
product_id=batch.product_id, # Keep product reference
product_name=batch.product_name,
recipe_id=batch.recipe_id, # Keep recipe reference
planned_start_time=batch.planned_start_time + date_offset if batch.planned_start_time else None,
planned_end_time=batch.planned_end_time + date_offset if batch.planned_end_time else None,
planned_quantity=batch.planned_quantity,
planned_duration_minutes=batch.planned_duration_minutes,
actual_start_time=batch.actual_start_time + date_offset if batch.actual_start_time else None,
actual_end_time=batch.actual_end_time + date_offset if batch.actual_end_time else None,
actual_quantity=batch.actual_quantity,
actual_duration_minutes=batch.actual_duration_minutes,
status=batch.status,
priority=batch.priority,
current_process_stage=batch.current_process_stage,
process_stage_history=batch.process_stage_history,
pending_quality_checks=batch.pending_quality_checks,
completed_quality_checks=batch.completed_quality_checks,
estimated_cost=batch.estimated_cost,
actual_cost=batch.actual_cost,
labor_cost=batch.labor_cost,
material_cost=batch.material_cost,
overhead_cost=batch.overhead_cost,
yield_percentage=batch.yield_percentage,
quality_score=batch.quality_score,
waste_quantity=batch.waste_quantity,
defect_quantity=batch.defect_quantity,
equipment_used=batch.equipment_used,
staff_assigned=batch.staff_assigned,
station_id=batch.station_id,
order_id=batch.order_id,
forecast_id=batch.forecast_id,
is_rush_order=batch.is_rush_order,
is_special_recipe=batch.is_special_recipe,
production_notes=batch.production_notes,
quality_notes=batch.quality_notes,
delay_reason=batch.delay_reason,
cancellation_reason=batch.cancellation_reason,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
completed_at=batch.completed_at + date_offset if batch.completed_at else None
)
db.add(new_batch)
stats["production_batches"] += 1
# Flush to get batch IDs
await db.flush()
# Clone Quality Checks
result = await db.execute(
select(QualityCheck).where(QualityCheck.tenant_id == base_uuid)
)
base_checks = result.scalars().all()
logger.info(
"Found quality checks to clone",
count=len(base_checks),
base_tenant=str(base_uuid)
)
for check in base_checks:
new_batch_id = batch_id_map.get(check.batch_id, check.batch_id)
new_template_id = template_id_map.get(check.template_id, check.template_id) if check.template_id else None
new_check = QualityCheck(
id=uuid.uuid4(),
tenant_id=virtual_uuid,
batch_id=new_batch_id,
template_id=new_template_id,
check_type=check.check_type,
process_stage=check.process_stage,
check_time=check.check_time + date_offset,
checker_id=check.checker_id,
quality_score=check.quality_score,
pass_fail=check.pass_fail,
defect_count=check.defect_count,
defect_types=check.defect_types,
measured_weight=check.measured_weight,
measured_temperature=check.measured_temperature,
measured_moisture=check.measured_moisture,
measured_dimensions=check.measured_dimensions,
stage_specific_data=check.stage_specific_data,
target_weight=check.target_weight,
target_temperature=check.target_temperature,
target_moisture=check.target_moisture,
tolerance_percentage=check.tolerance_percentage,
within_tolerance=check.within_tolerance,
corrective_action_needed=check.corrective_action_needed,
corrective_actions=check.corrective_actions,
template_results=check.template_results,
criteria_scores=check.criteria_scores,
check_notes=check.check_notes,
photos_urls=check.photos_urls,
certificate_url=check.certificate_url,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc)
)
db.add(new_check)
stats["quality_checks"] += 1
# Clone Production Schedules
result = await db.execute(
select(ProductionSchedule).where(ProductionSchedule.tenant_id == base_uuid)
)
base_schedules = result.scalars().all()
logger.info(
"Found production schedules to clone",
count=len(base_schedules),
base_tenant=str(base_uuid)
)
for schedule in base_schedules:
new_schedule = ProductionSchedule(
id=uuid.uuid4(),
tenant_id=virtual_uuid,
schedule_date=schedule.schedule_date + date_offset,
shift_start=schedule.shift_start + date_offset,
shift_end=schedule.shift_end + date_offset,
total_capacity_hours=schedule.total_capacity_hours,
planned_capacity_hours=schedule.planned_capacity_hours,
actual_capacity_hours=schedule.actual_capacity_hours,
overtime_hours=schedule.overtime_hours,
staff_count=schedule.staff_count,
equipment_capacity=schedule.equipment_capacity,
station_assignments=schedule.station_assignments,
total_batches_planned=schedule.total_batches_planned,
total_batches_completed=schedule.total_batches_completed,
total_quantity_planned=schedule.total_quantity_planned,
total_quantity_produced=schedule.total_quantity_produced,
is_finalized=schedule.is_finalized,
is_active=schedule.is_active,
efficiency_percentage=schedule.efficiency_percentage,
utilization_percentage=schedule.utilization_percentage,
on_time_completion_rate=schedule.on_time_completion_rate,
schedule_notes=schedule.schedule_notes,
schedule_adjustments=schedule.schedule_adjustments,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
finalized_at=schedule.finalized_at + date_offset if schedule.finalized_at else None
)
db.add(new_schedule)
stats["production_schedules"] += 1
# Clone Production Capacity
result = await db.execute(
select(ProductionCapacity).where(ProductionCapacity.tenant_id == base_uuid)
)
base_capacity = result.scalars().all()
for capacity in base_capacity:
new_capacity = ProductionCapacity(
id=uuid.uuid4(),
tenant_id=virtual_uuid,
resource_type=capacity.resource_type,
resource_id=capacity.resource_id,
resource_name=capacity.resource_name,
date=capacity.date + date_offset,
start_time=capacity.start_time + date_offset,
end_time=capacity.end_time + date_offset,
total_capacity_units=capacity.total_capacity_units,
allocated_capacity_units=capacity.allocated_capacity_units,
remaining_capacity_units=capacity.remaining_capacity_units,
is_available=capacity.is_available,
is_maintenance=capacity.is_maintenance,
is_reserved=capacity.is_reserved,
equipment_type=capacity.equipment_type,
max_batch_size=capacity.max_batch_size,
min_batch_size=capacity.min_batch_size,
setup_time_minutes=capacity.setup_time_minutes,
cleanup_time_minutes=capacity.cleanup_time_minutes,
efficiency_rating=capacity.efficiency_rating,
maintenance_status=capacity.maintenance_status,
last_maintenance_date=capacity.last_maintenance_date + date_offset if capacity.last_maintenance_date else None,
notes=capacity.notes,
restrictions=capacity.restrictions,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc)
)
db.add(new_capacity)
stats["production_capacity"] += 1
# Commit cloned data
await db.commit()
# NOTE: Alert generation removed - alerts are now generated automatically by the
# production alert service which runs scheduled checks at appropriate intervals.
# This eliminates duplicate alerts and provides a more realistic demo experience.
stats["alerts_generated"] = 0
# Calculate total from non-alert stats
total_records = (stats["equipment"] + stats["batches"] + stats["schedules"] +
stats["quality_templates"] + stats["quality_checks"] +
stats["production_capacity"])
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
logger.info(
"Production data cloning completed",
virtual_tenant_id=virtual_tenant_id,
total_records=total_records,
stats=stats,
duration_ms=duration_ms
)
return {
"service": "production",
"status": "completed",
"records_cloned": total_records,
"duration_ms": duration_ms,
"details": stats
}
except ValueError as e:
logger.error("Invalid UUID format", error=str(e))
raise HTTPException(status_code=400, detail=f"Invalid UUID: {str(e)}")
except Exception as e:
logger.error(
"Failed to clone production data",
error=str(e),
virtual_tenant_id=virtual_tenant_id,
exc_info=True
)
# Rollback on error
await db.rollback()
return {
"service": "production",
"status": "failed",
"records_cloned": 0,
"duration_ms": int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000),
"error": str(e)
}
@router.get("/clone/health")
async def clone_health_check(_: bool = Depends(verify_internal_api_key)):
"""
Health check for internal cloning endpoint
Used by orchestrator to verify service availability
"""
return {
"service": "production",
"clone_endpoint": "available",
"version": "2.0.0"
}
2025-10-24 13:05:04 +02:00
@router.delete("/tenant/{virtual_tenant_id}")
async def delete_demo_data(
virtual_tenant_id: str,
db: AsyncSession = Depends(get_db),
_: bool = Depends(verify_internal_api_key)
):
"""Delete all production data for a virtual demo tenant"""
logger.info("Deleting production data for virtual tenant", virtual_tenant_id=virtual_tenant_id)
start_time = datetime.now(timezone.utc)
try:
virtual_uuid = uuid.UUID(virtual_tenant_id)
# Count records
batch_count = await db.scalar(select(func.count(ProductionBatch.id)).where(ProductionBatch.tenant_id == virtual_uuid))
schedule_count = await db.scalar(select(func.count(ProductionSchedule.id)).where(ProductionSchedule.tenant_id == virtual_uuid))
quality_count = await db.scalar(select(func.count(QualityCheck.id)).where(QualityCheck.tenant_id == virtual_uuid))
equipment_count = await db.scalar(select(func.count(Equipment.id)).where(Equipment.tenant_id == virtual_uuid))
# Delete in order
await db.execute(delete(QualityCheck).where(QualityCheck.tenant_id == virtual_uuid))
await db.execute(delete(ProductionBatch).where(ProductionBatch.tenant_id == virtual_uuid))
await db.execute(delete(ProductionSchedule).where(ProductionSchedule.tenant_id == virtual_uuid))
await db.execute(delete(QualityCheckTemplate).where(QualityCheckTemplate.tenant_id == virtual_uuid))
await db.execute(delete(Equipment).where(Equipment.tenant_id == virtual_uuid))
await db.execute(delete(ProductionCapacity).where(ProductionCapacity.tenant_id == virtual_uuid))
await db.commit()
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
logger.info("Production data deleted successfully", virtual_tenant_id=virtual_tenant_id, duration_ms=duration_ms)
return {
"service": "production",
"status": "deleted",
"virtual_tenant_id": virtual_tenant_id,
"records_deleted": {
"batches": batch_count,
"schedules": schedule_count,
"quality_checks": quality_count,
"equipment": equipment_count,
"total": batch_count + schedule_count + quality_count + equipment_count
},
"duration_ms": duration_ms
}
except Exception as e:
logger.error("Failed to delete production data", error=str(e), exc_info=True)
await db.rollback()
raise HTTPException(status_code=500, detail=str(e))