Files
bakery-ia/services/orchestrator/app/api/internal_demo.py

314 lines
13 KiB
Python
Raw Normal View History

2025-11-21 16:15:09 +01:00
"""
Internal Demo API Endpoints for Orchestrator Service
Used by demo_session service to clone data for virtual demo tenants
"""
from fastapi import APIRouter, Depends, HTTPException, Header
from typing import Dict, Any
from uuid import UUID
import structlog
import os
from app.core.database import get_db
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, delete, func
from app.models.orchestration_run import OrchestrationRun
import uuid
from datetime import datetime, timezone, timedelta
from typing import Optional
import sys
from pathlib import Path
# Add shared utilities to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
from shared.utils.demo_dates import adjust_date_for_demo, BASE_REFERENCE_DATE
2025-11-21 16:15:09 +01:00
router = APIRouter()
logger = structlog.get_logger()
# Internal API key for service-to-service communication
INTERNAL_API_KEY = os.getenv("INTERNAL_API_KEY", "dev-internal-key-change-in-production")
async def ensure_unique_run_number(db: AsyncSession, base_run_number: str) -> str:
"""Ensure the run number is unique by appending a suffix if needed"""
proposed_run_number = base_run_number
# Check if the proposed run number already exists in the database
while True:
result = await db.execute(
select(OrchestrationRun)
.where(OrchestrationRun.run_number == proposed_run_number)
)
existing_run = result.scalar_one_or_none()
if not existing_run:
# Run number is unique, return it
return proposed_run_number
# Generate a new run number with an additional random suffix
random_suffix = str(uuid.uuid4())[:4].upper()
proposed_run_number = f"{base_run_number[:50-len(random_suffix)-1]}-{random_suffix}"
2025-11-21 16:15:09 +01:00
def verify_internal_api_key(x_internal_api_key: str = Header(...)):
"""Verify internal API key for service-to-service communication"""
if x_internal_api_key != INTERNAL_API_KEY:
raise HTTPException(status_code=403, detail="Invalid internal API key")
return True
@router.post("/internal/demo/clone")
async def clone_demo_data(
base_tenant_id: str,
virtual_tenant_id: str,
demo_account_type: str,
session_id: Optional[str] = None,
session_created_at: Optional[str] = None,
db: AsyncSession = Depends(get_db),
_: bool = Depends(verify_internal_api_key)
):
"""
Clone orchestration run demo data from base tenant to virtual tenant
This endpoint is called by the demo_session service during session initialization.
It clones orchestration runs with date adjustments to make them appear recent.
"""
start_time = datetime.now(timezone.utc)
# Parse session_created_at or use current time
if session_created_at:
try:
reference_time = datetime.fromisoformat(session_created_at.replace('Z', '+00:00'))
except:
reference_time = datetime.now(timezone.utc)
else:
reference_time = datetime.now(timezone.utc)
logger.info(
"Starting orchestration runs cloning with date adjustment",
base_tenant_id=base_tenant_id,
virtual_tenant_id=virtual_tenant_id,
demo_account_type=demo_account_type,
session_id=session_id,
reference_time=reference_time.isoformat()
)
try:
base_uuid = uuid.UUID(base_tenant_id)
virtual_uuid = uuid.UUID(virtual_tenant_id)
# Fetch base tenant orchestration runs
# Get all completed and partial_success runs from the base tenant
result = await db.execute(
select(OrchestrationRun)
.where(OrchestrationRun.tenant_id == base_uuid)
.order_by(OrchestrationRun.started_at.desc())
.limit(10) # Clone last 10 runs for demo
)
base_runs = list(result.scalars().all())
runs_cloned = 0
# Clone each orchestration run with date adjustment
for base_run in base_runs:
# Use the shared date adjustment utility to ensure dates are always in the past
# This calculates the offset from BASE_REFERENCE_DATE and applies it to session creation time
2025-11-21 16:15:09 +01:00
if base_run.started_at:
new_started_at = adjust_date_for_demo(
base_run.started_at, reference_time, BASE_REFERENCE_DATE
)
2025-11-21 16:15:09 +01:00
else:
new_started_at = reference_time - timedelta(hours=2)
# Adjust completed_at using the same utility
if base_run.completed_at:
new_completed_at = adjust_date_for_demo(
base_run.completed_at, reference_time, BASE_REFERENCE_DATE
)
# Ensure completion is after start (in case of edge cases)
if new_completed_at and new_started_at and new_completed_at < new_started_at:
# Preserve original duration
duration = base_run.completed_at - base_run.started_at
new_completed_at = new_started_at + duration
2025-11-21 16:15:09 +01:00
else:
new_completed_at = None
# Adjust all step timestamps using the shared utility
2025-11-21 16:15:09 +01:00
def adjust_timestamp(original_timestamp):
if not original_timestamp:
2025-11-21 16:15:09 +01:00
return None
return adjust_date_for_demo(original_timestamp, reference_time, BASE_REFERENCE_DATE)
2025-11-21 16:15:09 +01:00
# Create new orchestration run for virtual tenant
# Update run_number to have current year instead of original year, and make it unique
current_year = reference_time.year
# Extract type from original run number and create new format
parts = base_run.run_number.split('-')
if len(parts) >= 4:
tenant_prefix = parts[1] if len(parts) > 1 else "DEMO"
type_code = parts[2] if len(parts) > 2 else "TST"
original_index = parts[3] if len(parts) > 3 else "001"
# Generate a more robust unique suffix to avoid collisions
# Use UUID instead of just session_id substring to ensure uniqueness
unique_suffix = str(uuid.uuid4())[:8].upper()
proposed_run_number = f"ORCH-{tenant_prefix}-{type_code}-{current_year}-{original_index}-{unique_suffix}"
else:
unique_suffix = str(uuid.uuid4())[:12].upper()
proposed_run_number = f"{base_run.run_number}-{unique_suffix}"
# Ensure the run number is truly unique by checking against existing entries
# This prevents collisions especially in high-concurrency scenarios
run_number = await ensure_unique_run_number(db, proposed_run_number)
2025-11-21 16:15:09 +01:00
new_run = OrchestrationRun(
id=uuid.uuid4(),
tenant_id=virtual_uuid,
run_number=run_number,
2025-11-21 16:15:09 +01:00
status=base_run.status,
run_type=base_run.run_type,
priority=base_run.priority,
started_at=new_started_at,
completed_at=new_completed_at,
duration_seconds=base_run.duration_seconds,
# Forecasting step
forecasting_started_at=adjust_timestamp(base_run.forecasting_started_at),
forecasting_completed_at=adjust_timestamp(base_run.forecasting_completed_at),
forecasting_status=base_run.forecasting_status,
forecasting_error=base_run.forecasting_error,
# Production step
production_started_at=adjust_timestamp(base_run.production_started_at),
production_completed_at=adjust_timestamp(base_run.production_completed_at),
production_status=base_run.production_status,
production_error=base_run.production_error,
# Procurement step
procurement_started_at=adjust_timestamp(base_run.procurement_started_at),
procurement_completed_at=adjust_timestamp(base_run.procurement_completed_at),
procurement_status=base_run.procurement_status,
procurement_error=base_run.procurement_error,
# Notification step
notification_started_at=adjust_timestamp(base_run.notification_started_at),
notification_completed_at=adjust_timestamp(base_run.notification_completed_at),
notification_status=base_run.notification_status,
notification_error=base_run.notification_error,
# AI Insights (if exists)
ai_insights_started_at=adjust_timestamp(base_run.ai_insights_started_at) if hasattr(base_run, 'ai_insights_started_at') else None,
ai_insights_completed_at=adjust_timestamp(base_run.ai_insights_completed_at) if hasattr(base_run, 'ai_insights_completed_at') else None,
ai_insights_status=base_run.ai_insights_status if hasattr(base_run, 'ai_insights_status') else None,
ai_insights_generated=base_run.ai_insights_generated if hasattr(base_run, 'ai_insights_generated') else None,
ai_insights_posted=base_run.ai_insights_posted if hasattr(base_run, 'ai_insights_posted') else None,
# Results summary
forecasts_generated=base_run.forecasts_generated,
production_batches_created=base_run.production_batches_created,
procurement_plans_created=base_run.procurement_plans_created,
purchase_orders_created=base_run.purchase_orders_created,
notifications_sent=base_run.notifications_sent,
# Performance metrics
fulfillment_rate=base_run.fulfillment_rate,
on_time_delivery_rate=base_run.on_time_delivery_rate,
cost_accuracy=base_run.cost_accuracy,
quality_score=base_run.quality_score,
# Data
forecast_data=base_run.forecast_data,
run_metadata=base_run.run_metadata,
# Metadata
triggered_by=base_run.triggered_by,
created_at=reference_time,
updated_at=reference_time
)
db.add(new_run)
await db.flush()
runs_cloned += 1
await db.commit()
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
logger.info(
"Orchestration runs cloned successfully",
virtual_tenant_id=str(virtual_tenant_id),
runs_cloned=runs_cloned,
duration_ms=duration_ms
)
return {
"service": "orchestrator",
"status": "completed",
"success": True,
"records_cloned": runs_cloned,
"runs_cloned": runs_cloned,
"duration_ms": duration_ms
}
except Exception as e:
logger.error("Failed to clone orchestration runs", error=str(e), exc_info=True)
await db.rollback()
raise HTTPException(status_code=500, detail=f"Failed to clone orchestration runs: {str(e)}")
@router.delete("/internal/demo/tenant/{virtual_tenant_id}")
async def delete_demo_data(
virtual_tenant_id: str,
db: AsyncSession = Depends(get_db),
_: bool = Depends(verify_internal_api_key)
):
"""Delete all orchestration runs for a virtual demo tenant"""
logger.info("Deleting orchestration runs for virtual tenant", virtual_tenant_id=virtual_tenant_id)
start_time = datetime.now(timezone.utc)
try:
virtual_uuid = uuid.UUID(virtual_tenant_id)
# Count records
run_count = await db.scalar(
select(func.count(OrchestrationRun.id))
.where(OrchestrationRun.tenant_id == virtual_uuid)
)
# Delete orchestration runs
await db.execute(
delete(OrchestrationRun)
.where(OrchestrationRun.tenant_id == virtual_uuid)
)
await db.commit()
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
logger.info(
"Orchestration runs deleted successfully",
virtual_tenant_id=virtual_tenant_id,
duration_ms=duration_ms
)
return {
"service": "orchestrator",
"status": "deleted",
"virtual_tenant_id": virtual_tenant_id,
"records_deleted": {
"orchestration_runs": run_count,
"total": run_count
},
"duration_ms": duration_ms
}
except Exception as e:
logger.error("Failed to delete orchestration runs", error=str(e), exc_info=True)
await db.rollback()
raise HTTPException(status_code=500, detail=str(e))
@router.get("/internal/demo/clone/health")
async def health_check(_: bool = Depends(verify_internal_api_key)):
"""Health check for demo cloning endpoint"""
return {"status": "healthy", "service": "orchestrator"}