Initial commit - production deployment
This commit is contained in:
6
services/production/app/api/__init__.py
Normal file
6
services/production/app/api/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
# ================================================================
|
||||
# services/production/app/api/__init__.py
|
||||
# ================================================================
|
||||
"""
|
||||
API routes and endpoints for production service
|
||||
"""
|
||||
528
services/production/app/api/analytics.py
Normal file
528
services/production/app/api/analytics.py
Normal file
@@ -0,0 +1,528 @@
|
||||
# services/production/app/api/analytics.py
|
||||
"""
|
||||
Analytics API endpoints for Production Service
|
||||
Following standardized URL structure: /api/v1/tenants/{tenant_id}/production/analytics/{operation}
|
||||
Requires: Professional or Enterprise subscription tier
|
||||
"""
|
||||
|
||||
from datetime import date, datetime, timedelta
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import analytics_tier_required
|
||||
from app.services.production_service import ProductionService
|
||||
from app.core.config import settings
|
||||
from shared.routing import RouteBuilder
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Create route builder for consistent URL structure
|
||||
route_builder = RouteBuilder('production')
|
||||
|
||||
router = APIRouter(tags=["production-analytics"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
# ===== ANALYTICS ENDPOINTS (Professional/Enterprise Only) =====
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("equipment-efficiency"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_equipment_efficiency(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None, description="Start date for analysis"),
|
||||
end_date: Optional[date] = Query(None, description="End date for analysis"),
|
||||
equipment_id: Optional[UUID] = Query(None, description="Filter by equipment"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze equipment efficiency (Professional/Enterprise only)
|
||||
|
||||
Metrics:
|
||||
- Overall Equipment Effectiveness (OEE)
|
||||
- Availability rate
|
||||
- Performance rate
|
||||
- Quality rate
|
||||
- Downtime analysis
|
||||
"""
|
||||
try:
|
||||
# Set default dates
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing method: get_equipment_efficiency_analytics
|
||||
efficiency_data = await production_service.get_equipment_efficiency_analytics(tenant_id)
|
||||
|
||||
logger.info("Equipment efficiency analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_id=str(equipment_id) if equipment_id else "all",
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return efficiency_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing equipment efficiency",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze equipment efficiency"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("production-trends"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_production_trends(
|
||||
tenant_id: UUID = Path(...),
|
||||
days_back: int = Query(90, ge=7, le=365, description="Days to analyze"),
|
||||
product_id: Optional[UUID] = Query(None, description="Filter by product"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze production trends (Professional/Enterprise only)
|
||||
|
||||
Provides:
|
||||
- Production volume trends
|
||||
- Batch completion rates
|
||||
- Cycle time analysis
|
||||
- Quality trends
|
||||
- Seasonal patterns
|
||||
"""
|
||||
try:
|
||||
# Use existing methods: get_performance_analytics + get_yield_trends_analytics
|
||||
end_date_calc = datetime.now().date()
|
||||
start_date_calc = end_date_calc - timedelta(days=days_back)
|
||||
|
||||
performance = await production_service.get_performance_analytics(
|
||||
tenant_id, start_date_calc, end_date_calc
|
||||
)
|
||||
|
||||
# Map days_back to period string for yield trends
|
||||
period = "weekly" if days_back <= 30 else "monthly"
|
||||
yield_trends = await production_service.get_yield_trends_analytics(tenant_id, period)
|
||||
|
||||
trends = {
|
||||
"performance_metrics": performance,
|
||||
"yield_trends": yield_trends,
|
||||
"days_analyzed": days_back,
|
||||
"product_filter": str(product_id) if product_id else None
|
||||
}
|
||||
|
||||
logger.info("Production trends analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
days_analyzed=days_back,
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return trends
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing production trends",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze production trends"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("capacity-utilization"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_capacity_utilization(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze production capacity utilization (Professional/Enterprise only)
|
||||
|
||||
Metrics:
|
||||
- Capacity utilization percentage
|
||||
- Bottleneck identification
|
||||
- Resource allocation efficiency
|
||||
- Optimization recommendations
|
||||
"""
|
||||
try:
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing method: get_capacity_usage_report
|
||||
utilization = await production_service.get_capacity_usage_report(
|
||||
tenant_id, start_date, end_date
|
||||
)
|
||||
|
||||
logger.info("Capacity utilization analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return utilization
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing capacity utilization",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze capacity utilization"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("quality-metrics"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_quality_metrics(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
product_id: Optional[UUID] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze quality control metrics (Professional/Enterprise only)
|
||||
|
||||
Metrics:
|
||||
- First pass yield
|
||||
- Defect rates by type
|
||||
- Quality trends over time
|
||||
- Root cause analysis
|
||||
"""
|
||||
try:
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing methods: get_quality_trends + get_top_defects_analytics
|
||||
quality_trends = await production_service.get_quality_trends(
|
||||
tenant_id, start_date, end_date
|
||||
)
|
||||
top_defects = await production_service.get_top_defects_analytics(tenant_id)
|
||||
|
||||
quality_data = {
|
||||
"quality_trends": quality_trends,
|
||||
"top_defects": top_defects,
|
||||
"period": {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat()
|
||||
},
|
||||
"product_filter": str(product_id) if product_id else None
|
||||
}
|
||||
|
||||
logger.info("Quality metrics analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return quality_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing quality metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze quality metrics"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("waste-analysis"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_production_waste_analysis(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze production waste (Professional/Enterprise only)
|
||||
|
||||
Provides:
|
||||
- Material waste percentages
|
||||
- Waste by category/product
|
||||
- Cost impact analysis
|
||||
- Reduction recommendations
|
||||
"""
|
||||
try:
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing method: get_batch_statistics to calculate waste from yield data
|
||||
batch_stats = await production_service.get_batch_statistics(
|
||||
tenant_id, start_date, end_date
|
||||
)
|
||||
|
||||
# Calculate waste metrics from batch statistics
|
||||
waste_analysis = {
|
||||
"batch_statistics": batch_stats,
|
||||
"waste_metrics": {
|
||||
"calculated_from": "yield_variance",
|
||||
"note": "Waste derived from planned vs actual quantity differences"
|
||||
},
|
||||
"period": {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat()
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("Production waste analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return waste_analysis
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing production waste",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze production waste"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("cost-analysis"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_production_cost_analysis(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
product_id: Optional[UUID] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze production costs (Professional/Enterprise only)
|
||||
|
||||
Metrics:
|
||||
- Cost per unit
|
||||
- Direct vs indirect costs
|
||||
- Cost trends over time
|
||||
- Cost variance analysis
|
||||
- Profitability insights
|
||||
"""
|
||||
try:
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing method: get_batch_statistics for cost-related data
|
||||
batch_stats = await production_service.get_batch_statistics(
|
||||
tenant_id, start_date, end_date
|
||||
)
|
||||
|
||||
cost_analysis = {
|
||||
"batch_statistics": batch_stats,
|
||||
"cost_metrics": {
|
||||
"note": "Cost analysis requires additional cost tracking data",
|
||||
"available_metrics": ["batch_count", "production_volume", "efficiency"]
|
||||
},
|
||||
"period": {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat()
|
||||
},
|
||||
"product_filter": str(product_id) if product_id else None
|
||||
}
|
||||
|
||||
logger.info("Production cost analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
product_id=str(product_id) if product_id else "all",
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return cost_analysis
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing production costs",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze production costs"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("predictive-maintenance"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_predictive_maintenance_insights(
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: Optional[UUID] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get predictive maintenance insights (Professional/Enterprise only)
|
||||
|
||||
Provides:
|
||||
- Equipment failure predictions
|
||||
- Maintenance schedule recommendations
|
||||
- Parts replacement forecasts
|
||||
- Downtime risk assessment
|
||||
"""
|
||||
try:
|
||||
# Use existing method: predict_capacity_bottlenecks as proxy for maintenance insights
|
||||
days_ahead = 7 # Predict one week ahead
|
||||
bottlenecks = await production_service.predict_capacity_bottlenecks(
|
||||
tenant_id, days_ahead
|
||||
)
|
||||
|
||||
maintenance_insights = {
|
||||
"capacity_bottlenecks": bottlenecks,
|
||||
"maintenance_recommendations": {
|
||||
"note": "Derived from capacity predictions and bottleneck analysis",
|
||||
"days_predicted": days_ahead
|
||||
},
|
||||
"equipment_filter": str(equipment_id) if equipment_id else None
|
||||
}
|
||||
|
||||
logger.info("Predictive maintenance insights generated",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_id=str(equipment_id) if equipment_id else "all",
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return maintenance_insights
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating predictive maintenance insights",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to generate predictive maintenance insights"
|
||||
)
|
||||
|
||||
|
||||
# ===== SUSTAINABILITY / WASTE ANALYTICS ENDPOINT =====
|
||||
# Called by Inventory Service for sustainability metrics
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/waste-analytics",
|
||||
response_model=dict
|
||||
)
|
||||
async def get_waste_analytics_for_sustainability(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: datetime = Query(..., description="Start date for waste analysis"),
|
||||
end_date: datetime = Query(..., description="End date for waste analysis"),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get production waste analytics for sustainability tracking
|
||||
|
||||
This endpoint is called by the Inventory Service's sustainability module
|
||||
to calculate environmental impact and SDG 12.3 compliance.
|
||||
|
||||
Does NOT require analytics tier - this is core sustainability data.
|
||||
|
||||
Returns:
|
||||
- total_production_waste: Sum of waste_quantity from all batches
|
||||
- total_defects: Sum of defect_quantity from all batches
|
||||
- total_planned: Sum of planned_quantity
|
||||
- total_actual: Sum of actual_quantity
|
||||
"""
|
||||
try:
|
||||
waste_data = await production_service.get_waste_analytics(
|
||||
tenant_id,
|
||||
start_date,
|
||||
end_date
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Production waste analytics retrieved for sustainability",
|
||||
tenant_id=str(tenant_id),
|
||||
total_waste=waste_data.get('total_production_waste', 0),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat()
|
||||
)
|
||||
|
||||
return waste_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting waste analytics for sustainability",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve waste analytics: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/baseline",
|
||||
response_model=dict
|
||||
)
|
||||
async def get_baseline_metrics(
|
||||
tenant_id: UUID = Path(...),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get baseline production metrics from first 90 days
|
||||
|
||||
Used by sustainability service to establish waste baseline
|
||||
for SDG 12.3 compliance tracking.
|
||||
|
||||
Returns:
|
||||
- waste_percentage: Baseline waste percentage from first 90 days
|
||||
- total_production_kg: Total production in first 90 days
|
||||
- total_waste_kg: Total waste in first 90 days
|
||||
- period: Date range of baseline period
|
||||
"""
|
||||
try:
|
||||
baseline_data = await production_service.get_baseline_metrics(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Baseline metrics retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
baseline_percentage=baseline_data.get('waste_percentage', 0)
|
||||
)
|
||||
|
||||
return baseline_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting baseline metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve baseline metrics: {str(e)}"
|
||||
)
|
||||
237
services/production/app/api/audit.py
Normal file
237
services/production/app/api/audit.py
Normal file
@@ -0,0 +1,237 @@
|
||||
# services/production/app/api/audit.py
|
||||
"""
|
||||
Audit Logs API - Retrieve audit trail for production service
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Path, status
|
||||
from typing import Optional, Dict, Any
|
||||
from uuid import UUID
|
||||
from datetime import datetime
|
||||
import structlog
|
||||
from sqlalchemy import select, func, and_
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.models import AuditLog
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.models.audit_log_schemas import (
|
||||
AuditLogResponse,
|
||||
AuditLogListResponse,
|
||||
AuditLogStatsResponse
|
||||
)
|
||||
from app.core.database import database_manager
|
||||
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["audit-logs"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
async def get_db():
|
||||
"""Database session dependency"""
|
||||
async with database_manager.get_session() as session:
|
||||
yield session
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("audit-logs"),
|
||||
response_model=AuditLogListResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def get_audit_logs(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Filter logs from this date"),
|
||||
end_date: Optional[datetime] = Query(None, description="Filter logs until this date"),
|
||||
user_id: Optional[UUID] = Query(None, description="Filter by user ID"),
|
||||
action: Optional[str] = Query(None, description="Filter by action type"),
|
||||
resource_type: Optional[str] = Query(None, description="Filter by resource type"),
|
||||
severity: Optional[str] = Query(None, description="Filter by severity level"),
|
||||
search: Optional[str] = Query(None, description="Search in description field"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Number of records to return"),
|
||||
offset: int = Query(0, ge=0, description="Number of records to skip"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get audit logs for production service.
|
||||
Requires admin or owner role.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Retrieving audit logs",
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.get("user_id"),
|
||||
filters={
|
||||
"start_date": start_date,
|
||||
"end_date": end_date,
|
||||
"action": action,
|
||||
"resource_type": resource_type,
|
||||
"severity": severity
|
||||
}
|
||||
)
|
||||
|
||||
# Build query filters
|
||||
filters = [AuditLog.tenant_id == tenant_id]
|
||||
|
||||
if start_date:
|
||||
filters.append(AuditLog.created_at >= start_date)
|
||||
if end_date:
|
||||
filters.append(AuditLog.created_at <= end_date)
|
||||
if user_id:
|
||||
filters.append(AuditLog.user_id == user_id)
|
||||
if action:
|
||||
filters.append(AuditLog.action == action)
|
||||
if resource_type:
|
||||
filters.append(AuditLog.resource_type == resource_type)
|
||||
if severity:
|
||||
filters.append(AuditLog.severity == severity)
|
||||
if search:
|
||||
filters.append(AuditLog.description.ilike(f"%{search}%"))
|
||||
|
||||
# Count total matching records
|
||||
count_query = select(func.count()).select_from(AuditLog).where(and_(*filters))
|
||||
total_result = await db.execute(count_query)
|
||||
total = total_result.scalar() or 0
|
||||
|
||||
# Fetch paginated results
|
||||
query = (
|
||||
select(AuditLog)
|
||||
.where(and_(*filters))
|
||||
.order_by(AuditLog.created_at.desc())
|
||||
.limit(limit)
|
||||
.offset(offset)
|
||||
)
|
||||
|
||||
result = await db.execute(query)
|
||||
audit_logs = result.scalars().all()
|
||||
|
||||
# Convert to response models
|
||||
items = [AuditLogResponse.from_orm(log) for log in audit_logs]
|
||||
|
||||
logger.info(
|
||||
"Successfully retrieved audit logs",
|
||||
tenant_id=tenant_id,
|
||||
total=total,
|
||||
returned=len(items)
|
||||
)
|
||||
|
||||
return AuditLogListResponse(
|
||||
items=items,
|
||||
total=total,
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
has_more=(offset + len(items)) < total
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to retrieve audit logs",
|
||||
error=str(e),
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve audit logs: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("audit-logs/stats"),
|
||||
response_model=AuditLogStatsResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def get_audit_log_stats(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Filter logs from this date"),
|
||||
end_date: Optional[datetime] = Query(None, description="Filter logs until this date"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get audit log statistics for production service.
|
||||
Requires admin or owner role.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Retrieving audit log statistics",
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
# Build base filters
|
||||
filters = [AuditLog.tenant_id == tenant_id]
|
||||
if start_date:
|
||||
filters.append(AuditLog.created_at >= start_date)
|
||||
if end_date:
|
||||
filters.append(AuditLog.created_at <= end_date)
|
||||
|
||||
# Total events
|
||||
count_query = select(func.count()).select_from(AuditLog).where(and_(*filters))
|
||||
total_result = await db.execute(count_query)
|
||||
total_events = total_result.scalar() or 0
|
||||
|
||||
# Events by action
|
||||
action_query = (
|
||||
select(AuditLog.action, func.count().label('count'))
|
||||
.where(and_(*filters))
|
||||
.group_by(AuditLog.action)
|
||||
)
|
||||
action_result = await db.execute(action_query)
|
||||
events_by_action = {row.action: row.count for row in action_result}
|
||||
|
||||
# Events by severity
|
||||
severity_query = (
|
||||
select(AuditLog.severity, func.count().label('count'))
|
||||
.where(and_(*filters))
|
||||
.group_by(AuditLog.severity)
|
||||
)
|
||||
severity_result = await db.execute(severity_query)
|
||||
events_by_severity = {row.severity: row.count for row in severity_result}
|
||||
|
||||
# Events by resource type
|
||||
resource_query = (
|
||||
select(AuditLog.resource_type, func.count().label('count'))
|
||||
.where(and_(*filters))
|
||||
.group_by(AuditLog.resource_type)
|
||||
)
|
||||
resource_result = await db.execute(resource_query)
|
||||
events_by_resource_type = {row.resource_type: row.count for row in resource_result}
|
||||
|
||||
# Date range
|
||||
date_range_query = (
|
||||
select(
|
||||
func.min(AuditLog.created_at).label('min_date'),
|
||||
func.max(AuditLog.created_at).label('max_date')
|
||||
)
|
||||
.where(and_(*filters))
|
||||
)
|
||||
date_result = await db.execute(date_range_query)
|
||||
date_row = date_result.one()
|
||||
|
||||
logger.info(
|
||||
"Successfully retrieved audit log statistics",
|
||||
tenant_id=tenant_id,
|
||||
total_events=total_events
|
||||
)
|
||||
|
||||
return AuditLogStatsResponse(
|
||||
total_events=total_events,
|
||||
events_by_action=events_by_action,
|
||||
events_by_severity=events_by_severity,
|
||||
events_by_resource_type=events_by_resource_type,
|
||||
date_range={
|
||||
"min": date_row.min_date,
|
||||
"max": date_row.max_date
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to retrieve audit log statistics",
|
||||
error=str(e),
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve audit log statistics: {str(e)}"
|
||||
)
|
||||
167
services/production/app/api/batch.py
Normal file
167
services/production/app/api/batch.py
Normal file
@@ -0,0 +1,167 @@
|
||||
# services/production/app/api/batch.py
|
||||
"""
|
||||
Production Batch API - Batch operations for enterprise dashboards
|
||||
|
||||
Phase 2 optimization: Eliminate N+1 query patterns by fetching production data
|
||||
for multiple tenants in a single request.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Body
|
||||
from typing import List, Dict, Any
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
import asyncio
|
||||
|
||||
from fastapi import Request
|
||||
from app.services.production_service import ProductionService
|
||||
from app.core.config import settings
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
|
||||
router = APIRouter(tags=["production-batch"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
class ProductionSummaryBatchRequest(BaseModel):
|
||||
"""Request model for batch production summary"""
|
||||
tenant_ids: List[str] = Field(..., description="List of tenant IDs", max_length=100)
|
||||
|
||||
|
||||
class ProductionSummary(BaseModel):
|
||||
"""Production summary for a single tenant"""
|
||||
tenant_id: str
|
||||
total_batches: int
|
||||
pending_batches: int
|
||||
in_progress_batches: int
|
||||
completed_batches: int
|
||||
on_hold_batches: int
|
||||
cancelled_batches: int
|
||||
total_planned_quantity: float
|
||||
total_actual_quantity: float
|
||||
efficiency_rate: float
|
||||
|
||||
|
||||
@router.post("/batch/production-summary", response_model=Dict[str, ProductionSummary])
|
||||
async def get_production_summary_batch(
|
||||
request: ProductionSummaryBatchRequest = Body(...),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get production summary for multiple tenants in a single request.
|
||||
|
||||
Optimized for enterprise dashboards to eliminate N+1 query patterns.
|
||||
Fetches production data for all tenants in parallel.
|
||||
|
||||
Args:
|
||||
request: Batch request with tenant IDs
|
||||
|
||||
Returns:
|
||||
Dictionary mapping tenant_id -> production summary
|
||||
|
||||
Example:
|
||||
POST /api/v1/production/batch/production-summary
|
||||
{
|
||||
"tenant_ids": ["tenant-1", "tenant-2", "tenant-3"]
|
||||
}
|
||||
|
||||
Response:
|
||||
{
|
||||
"tenant-1": {"tenant_id": "tenant-1", "total_batches": 25, ...},
|
||||
"tenant-2": {"tenant_id": "tenant-2", "total_batches": 18, ...},
|
||||
"tenant-3": {"tenant_id": "tenant-3", "total_batches": 32, ...}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if len(request.tenant_ids) > 100:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Maximum 100 tenant IDs allowed per batch request"
|
||||
)
|
||||
|
||||
if not request.tenant_ids:
|
||||
return {}
|
||||
|
||||
logger.info(
|
||||
"Batch fetching production summaries",
|
||||
tenant_count=len(request.tenant_ids)
|
||||
)
|
||||
|
||||
async def fetch_tenant_production(tenant_id: str) -> tuple[str, ProductionSummary]:
|
||||
"""Fetch production summary for a single tenant"""
|
||||
try:
|
||||
tenant_uuid = UUID(tenant_id)
|
||||
summary = await production_service.get_dashboard_summary(tenant_uuid)
|
||||
|
||||
# Calculate efficiency rate
|
||||
efficiency_rate = 0.0
|
||||
if summary.total_planned_quantity > 0 and summary.total_actual_quantity is not None:
|
||||
efficiency_rate = (summary.total_actual_quantity / summary.total_planned_quantity) * 100
|
||||
|
||||
return tenant_id, ProductionSummary(
|
||||
tenant_id=tenant_id,
|
||||
total_batches=int(summary.total_batches or 0),
|
||||
pending_batches=int(summary.pending_batches or 0),
|
||||
in_progress_batches=int(summary.in_progress_batches or 0),
|
||||
completed_batches=int(summary.completed_batches or 0),
|
||||
on_hold_batches=int(summary.on_hold_batches or 0),
|
||||
cancelled_batches=int(summary.cancelled_batches or 0),
|
||||
total_planned_quantity=float(summary.total_planned_quantity or 0),
|
||||
total_actual_quantity=float(summary.total_actual_quantity or 0),
|
||||
efficiency_rate=efficiency_rate
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to fetch production for tenant in batch",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
return tenant_id, ProductionSummary(
|
||||
tenant_id=tenant_id,
|
||||
total_batches=0,
|
||||
pending_batches=0,
|
||||
in_progress_batches=0,
|
||||
completed_batches=0,
|
||||
on_hold_batches=0,
|
||||
cancelled_batches=0,
|
||||
total_planned_quantity=0.0,
|
||||
total_actual_quantity=0.0,
|
||||
efficiency_rate=0.0
|
||||
)
|
||||
|
||||
# Fetch all tenant production data in parallel
|
||||
tasks = [fetch_tenant_production(tid) for tid in request.tenant_ids]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# Build result dictionary
|
||||
result_dict = {}
|
||||
for result in results:
|
||||
if isinstance(result, Exception):
|
||||
logger.error("Exception in batch production fetch", error=str(result))
|
||||
continue
|
||||
tenant_id, summary = result
|
||||
result_dict[tenant_id] = summary
|
||||
|
||||
logger.info(
|
||||
"Batch production summaries retrieved",
|
||||
requested_count=len(request.tenant_ids),
|
||||
successful_count=len(result_dict)
|
||||
)
|
||||
|
||||
return result_dict
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error in batch production summary", error=str(e), exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to fetch batch production summaries: {str(e)}"
|
||||
)
|
||||
580
services/production/app/api/equipment.py
Normal file
580
services/production/app/api/equipment.py
Normal file
@@ -0,0 +1,580 @@
|
||||
# services/production/app/api/equipment.py
|
||||
"""
|
||||
Equipment API - CRUD operations on Equipment model
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timezone
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.security import create_audit_logger, AuditSeverity, AuditAction
|
||||
from app.core.database import get_db
|
||||
from app.services.production_service import ProductionService
|
||||
from app.models import AuditLog
|
||||
from app.schemas.equipment import (
|
||||
EquipmentCreate,
|
||||
EquipmentUpdate,
|
||||
EquipmentResponse,
|
||||
EquipmentListResponse,
|
||||
EquipmentDeletionSummary
|
||||
)
|
||||
from app.models.production import EquipmentStatus, EquipmentType
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-equipment"])
|
||||
|
||||
# Initialize audit logger with the production service's AuditLog model
|
||||
audit_logger = create_audit_logger("production-service", AuditLog)
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("equipment"),
|
||||
response_model=EquipmentListResponse
|
||||
)
|
||||
async def list_equipment(
|
||||
tenant_id: UUID = Path(...),
|
||||
status: Optional[EquipmentStatus] = Query(None, description="Filter by status"),
|
||||
type: Optional[EquipmentType] = Query(None, description="Filter by equipment type"),
|
||||
is_active: Optional[bool] = Query(None, description="Filter by active status"),
|
||||
page: int = Query(1, ge=1, description="Page number"),
|
||||
page_size: int = Query(50, ge=1, le=100, description="Page size"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""List equipment with filters: status, type, active status"""
|
||||
try:
|
||||
filters = {
|
||||
"status": status,
|
||||
"type": type,
|
||||
"is_active": is_active
|
||||
}
|
||||
|
||||
equipment_list = await production_service.get_equipment_list(tenant_id, filters, page, page_size)
|
||||
|
||||
logger.info("Retrieved equipment list",
|
||||
tenant_id=str(tenant_id), filters=filters)
|
||||
|
||||
return equipment_list
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error listing equipment",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to list equipment")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("equipment"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def create_equipment(
|
||||
equipment_data: EquipmentCreate,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Create a new equipment item"""
|
||||
try:
|
||||
equipment = await production_service.create_equipment(tenant_id, equipment_data)
|
||||
|
||||
logger.info("Created equipment",
|
||||
equipment_id=str(equipment.id), tenant_id=str(tenant_id))
|
||||
|
||||
# Audit log the equipment creation
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.CREATE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment.id),
|
||||
severity=AuditSeverity.INFO.value,
|
||||
audit_metadata={"equipment_name": equipment.name, "equipment_type": equipment.type.value}
|
||||
)
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Validation error creating equipment",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error creating equipment",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create equipment")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("equipment/{equipment_id}"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def get_equipment(
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get a specific equipment item"""
|
||||
try:
|
||||
equipment = await production_service.get_equipment(tenant_id, equipment_id)
|
||||
|
||||
if not equipment:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info("Retrieved equipment",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error retrieving equipment",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to retrieve equipment")
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_base_route("equipment/{equipment_id}"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def update_equipment(
|
||||
equipment_data: EquipmentUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Update an equipment item"""
|
||||
try:
|
||||
equipment = await production_service.update_equipment(tenant_id, equipment_id, equipment_data)
|
||||
|
||||
if not equipment:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info("Updated equipment",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
# Audit log the equipment update
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.UPDATE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment_id),
|
||||
severity=AuditSeverity.INFO.value,
|
||||
audit_metadata={"updates": equipment_data.model_dump(exclude_unset=True)}
|
||||
)
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.warning("Validation error updating equipment",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error updating equipment",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update equipment")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("equipment/{equipment_id}/deletion-summary"),
|
||||
response_model=EquipmentDeletionSummary
|
||||
)
|
||||
async def get_equipment_deletion_summary(
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get deletion summary for equipment (dependency check)"""
|
||||
try:
|
||||
summary = await production_service.get_equipment_deletion_summary(tenant_id, equipment_id)
|
||||
|
||||
logger.info("Retrieved equipment deletion summary",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
return EquipmentDeletionSummary(**summary)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting equipment deletion summary",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get deletion summary")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("equipment/{equipment_id}/report-failure"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def report_equipment_failure(
|
||||
failure_data: dict,
|
||||
request: Request,
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Report equipment failure and trigger maintenance workflow"""
|
||||
try:
|
||||
# Update equipment status and add failure record
|
||||
equipment = await production_service.report_equipment_failure(
|
||||
tenant_id,
|
||||
equipment_id,
|
||||
failure_data
|
||||
)
|
||||
|
||||
if not equipment:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info("Reported equipment failure",
|
||||
equipment_id=str(equipment_id),
|
||||
tenant_id=str(tenant_id),
|
||||
failure_type=failure_data.get('failureType'))
|
||||
|
||||
# Audit log the failure report
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.UPDATE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment_id),
|
||||
severity=AuditSeverity.WARNING.value,
|
||||
audit_metadata={
|
||||
"action": "report_failure",
|
||||
"failure_type": failure_data.get('failureType'),
|
||||
"severity": failure_data.get('severity')
|
||||
}
|
||||
)
|
||||
|
||||
# Get notification service from app state
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
|
||||
# Trigger notifications if notification service is available
|
||||
if notification_service:
|
||||
try:
|
||||
await trigger_failure_notifications(
|
||||
notification_service,
|
||||
tenant_id,
|
||||
equipment,
|
||||
failure_data
|
||||
)
|
||||
|
||||
# Send primary notification to equipment support contact if available
|
||||
if equipment.support_contact and equipment.support_contact.get('email'):
|
||||
await send_support_contact_notification(
|
||||
notification_service,
|
||||
tenant_id,
|
||||
equipment,
|
||||
failure_data,
|
||||
equipment.support_contact['email']
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to send notifications", error=str(e), equipment_id=str(equipment_id))
|
||||
# Continue even if notifications fail
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error reporting equipment failure",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to report equipment failure")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("equipment/{equipment_id}/mark-repaired"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def mark_equipment_repaired(
|
||||
repair_data: dict,
|
||||
request: Request,
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Mark equipment as repaired and update maintenance records"""
|
||||
try:
|
||||
# Update equipment status and add repair record
|
||||
equipment = await production_service.mark_equipment_repaired(
|
||||
tenant_id,
|
||||
equipment_id,
|
||||
repair_data
|
||||
)
|
||||
|
||||
if not equipment:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info("Marked equipment as repaired",
|
||||
equipment_id=str(equipment_id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
# Audit log the repair completion
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.UPDATE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment_id),
|
||||
severity=AuditSeverity.INFO.value,
|
||||
audit_metadata={
|
||||
"action": "mark_repaired",
|
||||
"technician": repair_data.get('technicianName'),
|
||||
"cost": repair_data.get('cost')
|
||||
}
|
||||
)
|
||||
|
||||
# Get notification service from app state
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
|
||||
# Trigger notifications if notification service is available
|
||||
if notification_service:
|
||||
try:
|
||||
# Calculate downtime for notifications
|
||||
last_maintenance_date = equipment.last_maintenance_date or datetime.now(timezone.utc)
|
||||
repair_date_str = repair_data.get('repairDate')
|
||||
if repair_date_str:
|
||||
if 'T' in repair_date_str:
|
||||
repair_date = datetime.fromisoformat(repair_date_str.replace('Z', '+00:00'))
|
||||
else:
|
||||
repair_date = datetime.fromisoformat(f"{repair_date_str}T00:00:00+00:00")
|
||||
else:
|
||||
repair_date = datetime.now(timezone.utc)
|
||||
|
||||
downtime_hours = int((repair_date - last_maintenance_date).total_seconds() / 3600)
|
||||
|
||||
# Add downtime to repair_data for notification
|
||||
repair_data_with_downtime = {**repair_data, 'downtime': downtime_hours}
|
||||
|
||||
await trigger_repair_notifications(
|
||||
notification_service,
|
||||
tenant_id,
|
||||
equipment,
|
||||
repair_data_with_downtime
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to send notifications", error=str(e), equipment_id=str(equipment_id))
|
||||
# Continue even if notifications fail
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error marking equipment as repaired",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to mark equipment as repaired")
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_base_route("equipment/{equipment_id}")
|
||||
)
|
||||
async def delete_equipment(
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
permanent: bool = Query(False, description="Permanent delete (hard delete) if true"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Delete an equipment item. Use permanent=true for hard delete (requires admin role)"""
|
||||
try:
|
||||
# Hard delete requires admin role
|
||||
if permanent:
|
||||
user_role = current_user.get('role', '').lower()
|
||||
if user_role not in ['admin', 'owner']:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Hard delete requires admin or owner role"
|
||||
)
|
||||
|
||||
success = await production_service.hard_delete_equipment(tenant_id, equipment_id)
|
||||
delete_type = "hard_delete"
|
||||
severity = AuditSeverity.CRITICAL.value
|
||||
else:
|
||||
success = await production_service.delete_equipment(tenant_id, equipment_id)
|
||||
delete_type = "soft_delete"
|
||||
severity = AuditSeverity.WARNING.value
|
||||
|
||||
if not success:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info(f"{'Hard' if permanent else 'Soft'} deleted equipment",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
# Audit log the equipment deletion
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.DELETE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment_id),
|
||||
severity=severity,
|
||||
audit_metadata={"action": delete_type, "permanent": permanent}
|
||||
)
|
||||
|
||||
return {"message": f"Equipment {'permanently deleted' if permanent else 'deleted'} successfully"}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error deleting equipment",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to delete equipment")
|
||||
|
||||
|
||||
# Helper functions for notifications
|
||||
async def trigger_failure_notifications(notification_service: any, tenant_id: UUID, equipment: any, failure_data: dict):
|
||||
"""Trigger failure notifications via email - sends to bakery managers"""
|
||||
try:
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from pathlib import Path
|
||||
|
||||
# Load template from file
|
||||
template_dir = Path(__file__).parent.parent.parent / "notification" / "app" / "templates"
|
||||
env = Environment(loader=FileSystemLoader(str(template_dir)))
|
||||
template = env.get_template('equipment_failure_email.html')
|
||||
|
||||
# Prepare template variables
|
||||
template_vars = {
|
||||
"equipment_name": equipment.name,
|
||||
"equipment_type": equipment.type.value if hasattr(equipment.type, 'value') else equipment.type,
|
||||
"equipment_model": equipment.model or "N/A",
|
||||
"equipment_serial_number": equipment.serial_number or "N/A",
|
||||
"equipment_location": equipment.location or "N/A",
|
||||
"failure_type": failure_data.get('failureType', 'Unknown'),
|
||||
"severity": failure_data.get('severity', 'high'),
|
||||
"description": failure_data.get('description', ''),
|
||||
"reported_time": datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC'),
|
||||
"estimated_impact": "SÍ - Afecta producción" if failure_data.get('estimatedImpact') else "NO - Sin impacto en producción",
|
||||
"support_contact": equipment.support_contact or {},
|
||||
"equipment_link": f"https://app.bakeryia.com/equipment/{equipment.id}",
|
||||
"bakery_name": "BakeryIA",
|
||||
"current_year": datetime.now().year
|
||||
}
|
||||
|
||||
html_content = template.render(**template_vars)
|
||||
|
||||
# Send via notification service (which will handle the actual email sending)
|
||||
# This is a simplified approach - in production you'd want to get manager emails from DB
|
||||
logger.info("Failure notifications triggered (template rendered)",
|
||||
equipment_id=str(equipment.id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error triggering failure notifications",
|
||||
error=str(e), equipment_id=str(equipment.id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
|
||||
async def trigger_repair_notifications(notification_service: any, tenant_id: UUID, equipment: any, repair_data: dict):
|
||||
"""Trigger repair completion notifications via email - sends to bakery managers"""
|
||||
try:
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from pathlib import Path
|
||||
|
||||
# Load template from file
|
||||
template_dir = Path(__file__).parent.parent.parent / "notification" / "app" / "templates"
|
||||
env = Environment(loader=FileSystemLoader(str(template_dir)))
|
||||
template = env.get_template('equipment_repaired_email.html')
|
||||
|
||||
# Prepare template variables
|
||||
template_vars = {
|
||||
"equipment_name": equipment.name,
|
||||
"equipment_type": equipment.type.value if hasattr(equipment.type, 'value') else equipment.type,
|
||||
"equipment_model": equipment.model or "N/A",
|
||||
"equipment_location": equipment.location or "N/A",
|
||||
"repair_date": repair_data.get('repairDate', datetime.now(timezone.utc).strftime('%Y-%m-%d')),
|
||||
"technician_name": repair_data.get('technicianName', 'Unknown'),
|
||||
"repair_description": repair_data.get('repairDescription', ''),
|
||||
"parts_replaced": repair_data.get('partsReplaced', []),
|
||||
"cost": repair_data.get('cost', 0),
|
||||
"downtime_hours": repair_data.get('downtime', 0),
|
||||
"test_results": repair_data.get('testResults', False),
|
||||
"equipment_link": f"https://app.bakeryia.com/equipment/{equipment.id}",
|
||||
"bakery_name": "BakeryIA",
|
||||
"current_year": datetime.now().year
|
||||
}
|
||||
|
||||
html_content = template.render(**template_vars)
|
||||
|
||||
# Send via notification service
|
||||
logger.info("Repair notifications triggered (template rendered)",
|
||||
equipment_id=str(equipment.id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error triggering repair notifications",
|
||||
error=str(e), equipment_id=str(equipment.id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
|
||||
async def send_support_contact_notification(notification_service: any, tenant_id: UUID, equipment: any, failure_data: dict, support_email: str):
|
||||
"""Send direct notification to equipment support contact for repair request"""
|
||||
try:
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from pathlib import Path
|
||||
|
||||
# Load template from file
|
||||
template_dir = Path(__file__).parent.parent.parent / "notification" / "app" / "templates"
|
||||
env = Environment(loader=FileSystemLoader(str(template_dir)))
|
||||
template = env.get_template('equipment_failure_email.html')
|
||||
|
||||
# Prepare template variables
|
||||
template_vars = {
|
||||
"equipment_name": equipment.name,
|
||||
"equipment_type": equipment.type.value if hasattr(equipment.type, 'value') else equipment.type,
|
||||
"equipment_model": equipment.model or "N/A",
|
||||
"equipment_serial_number": equipment.serial_number or "N/A",
|
||||
"equipment_location": equipment.location or "N/A",
|
||||
"failure_type": failure_data.get('failureType', 'Unknown'),
|
||||
"severity": failure_data.get('severity', 'high'),
|
||||
"description": failure_data.get('description', ''),
|
||||
"reported_time": datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC'),
|
||||
"estimated_impact": "SÍ - Afecta producción" if failure_data.get('estimatedImpact') else "NO - Sin impacto en producción",
|
||||
"support_contact": equipment.support_contact or {},
|
||||
"equipment_link": f"https://app.bakeryia.com/equipment/{equipment.id}",
|
||||
"bakery_name": "BakeryIA",
|
||||
"current_year": datetime.now().year
|
||||
}
|
||||
|
||||
html_content = template.render(**template_vars)
|
||||
|
||||
# TODO: Actually send email via notification service
|
||||
# For now, just log that we would send to the support email
|
||||
logger.info("Support contact notification prepared (would send to support)",
|
||||
equipment_id=str(equipment.id),
|
||||
tenant_id=str(tenant_id),
|
||||
support_email=support_email,
|
||||
subject=f"🚨 URGENTE: Fallo de Equipo - {equipment.name}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error sending support contact notification",
|
||||
error=str(e), equipment_id=str(equipment.id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
88
services/production/app/api/internal_alert_trigger.py
Normal file
88
services/production/app/api/internal_alert_trigger.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# services/production/app/api/internal_alert_trigger.py
|
||||
"""
|
||||
Internal API for triggering production alerts.
|
||||
Used by demo session cloning to generate realistic production delay alerts.
|
||||
|
||||
URL Pattern: /api/v1/tenants/{tenant_id}/production/internal/alerts/trigger
|
||||
This follows the tenant-scoped pattern so gateway can proxy correctly.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request, Path
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# New URL pattern: tenant-scoped so gateway proxies to production service correctly
|
||||
@router.post("/api/v1/tenants/{tenant_id}/production/internal/alerts/trigger")
|
||||
async def trigger_production_alerts(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID to check production for"),
|
||||
request: Request = None
|
||||
) -> dict:
|
||||
"""
|
||||
Trigger comprehensive production alert checks for a specific tenant (internal use only).
|
||||
|
||||
This endpoint is called by the demo session cloning process after production
|
||||
batches are seeded to generate realistic production alerts including:
|
||||
- Production delays
|
||||
- Equipment maintenance alerts
|
||||
- Batch start delays
|
||||
|
||||
Security: Protected by x-internal-service header check.
|
||||
"""
|
||||
try:
|
||||
# Verify internal service header
|
||||
if not request or request.headers.get("x-internal-service") not in ["demo-session", "internal"]:
|
||||
logger.warning("Unauthorized internal API call", tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="This endpoint is for internal service use only"
|
||||
)
|
||||
|
||||
# Get production scheduler from app state
|
||||
production_scheduler = getattr(request.app.state, 'production_scheduler', None)
|
||||
|
||||
if not production_scheduler:
|
||||
logger.error("Production scheduler not initialized")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Production scheduler not available"
|
||||
)
|
||||
|
||||
# Trigger comprehensive production alert checks for the specific tenant
|
||||
logger.info("Triggering comprehensive production alert checks", tenant_id=str(tenant_id))
|
||||
|
||||
# Call the scheduler's manual trigger method
|
||||
result = await production_scheduler.trigger_manual_check(tenant_id)
|
||||
|
||||
if result.get("success", False):
|
||||
logger.info(
|
||||
"Production alert checks completed successfully",
|
||||
tenant_id=str(tenant_id),
|
||||
alerts_generated=result.get("alerts_generated", 0)
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"Production alert checks failed",
|
||||
tenant_id=str(tenant_id),
|
||||
error=result.get("error", "Unknown error")
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error triggering production alerts",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to trigger production alerts: {str(e)}"
|
||||
)
|
||||
798
services/production/app/api/internal_demo.py
Normal file
798
services/production/app/api/internal_demo.py
Normal file
@@ -0,0 +1,798 @@
|
||||
"""
|
||||
Internal Demo Cloning API for Production Service
|
||||
Service-to-service endpoint for cloning production data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Header
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, delete, func
|
||||
import structlog
|
||||
import uuid
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Optional, Dict, Any
|
||||
import os
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from app.core.database import get_db
|
||||
from app.models.production import (
|
||||
ProductionBatch, ProductionSchedule, ProductionCapacity,
|
||||
QualityCheckTemplate, QualityCheck, Equipment,
|
||||
ProductionStatus, ProductionPriority, ProcessStage,
|
||||
EquipmentStatus, EquipmentType
|
||||
)
|
||||
from shared.utils.demo_dates import (
|
||||
adjust_date_for_demo, resolve_time_marker
|
||||
)
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(prefix="/internal/demo", tags=["internal"])
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_PROFESSIONAL = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
|
||||
|
||||
|
||||
@router.post("/clone")
|
||||
async def clone_demo_data(
|
||||
base_tenant_id: str,
|
||||
virtual_tenant_id: str,
|
||||
demo_account_type: str,
|
||||
session_id: Optional[str] = None,
|
||||
session_created_at: Optional[str] = None,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Clone production service data for a virtual demo tenant
|
||||
|
||||
Clones:
|
||||
- Production batches (historical production runs)
|
||||
- Production schedules (daily planning)
|
||||
- Production capacity records
|
||||
- Quality check templates
|
||||
- Quality checks (inspection records)
|
||||
- Equipment (machines and tools)
|
||||
|
||||
Args:
|
||||
base_tenant_id: Template tenant UUID to clone from
|
||||
virtual_tenant_id: Target virtual tenant UUID
|
||||
demo_account_type: Type of demo account
|
||||
session_id: Originating session ID for tracing
|
||||
session_created_at: Session creation timestamp for date adjustment
|
||||
|
||||
Returns:
|
||||
Cloning status and record counts
|
||||
"""
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
# Parse session creation time for date adjustment
|
||||
if session_created_at:
|
||||
try:
|
||||
session_time = datetime.fromisoformat(session_created_at.replace('Z', '+00:00'))
|
||||
except (ValueError, AttributeError):
|
||||
session_time = start_time
|
||||
else:
|
||||
session_time = start_time
|
||||
|
||||
logger.info(
|
||||
"Starting production data cloning",
|
||||
base_tenant_id=base_tenant_id,
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
demo_account_type=demo_account_type,
|
||||
session_id=session_id,
|
||||
session_created_at=session_created_at
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate UUIDs
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Track cloning statistics
|
||||
stats = {
|
||||
"batches": 0,
|
||||
"production_schedules": 0,
|
||||
"production_capacity": 0,
|
||||
"quality_check_templates": 0,
|
||||
"quality_checks": 0,
|
||||
"equipment": 0,
|
||||
"alerts_generated": 0
|
||||
}
|
||||
|
||||
def parse_date_field(date_value, session_time, field_name="date"):
|
||||
"""Parse date field, handling both ISO strings and BASE_TS markers"""
|
||||
if not date_value:
|
||||
return None
|
||||
|
||||
# Check if it's a BASE_TS marker
|
||||
if isinstance(date_value, str) and date_value.startswith("BASE_TS"):
|
||||
try:
|
||||
return resolve_time_marker(date_value, session_time)
|
||||
except ValueError as e:
|
||||
logger.warning(
|
||||
f"Invalid BASE_TS marker in {field_name}",
|
||||
marker=date_value,
|
||||
error=str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
# Handle regular ISO date strings
|
||||
try:
|
||||
return adjust_date_for_demo(
|
||||
datetime.fromisoformat(date_value.replace('Z', '+00:00')),
|
||||
session_time
|
||||
)
|
||||
except (ValueError, AttributeError) as e:
|
||||
logger.warning(
|
||||
f"Invalid date format in {field_name}",
|
||||
date_value=date_value,
|
||||
error=str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
# Load seed data from JSON files
|
||||
from shared.utils.seed_data_paths import get_seed_data_path
|
||||
|
||||
if demo_account_type == "professional":
|
||||
json_file = get_seed_data_path("professional", "06-production.json")
|
||||
elif demo_account_type == "enterprise":
|
||||
json_file = get_seed_data_path("enterprise", "06-production.json")
|
||||
elif demo_account_type == "enterprise_child":
|
||||
json_file = get_seed_data_path("enterprise", "06-production.json", child_id=base_tenant_id)
|
||||
else:
|
||||
raise ValueError(f"Invalid demo account type: {demo_account_type}")
|
||||
|
||||
# Load JSON data
|
||||
with open(json_file, 'r', encoding='utf-8') as f:
|
||||
seed_data = json.load(f)
|
||||
|
||||
# Create Equipment first (no dependencies)
|
||||
for equipment_data in seed_data.get('equipment', []):
|
||||
# Transform equipment ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
equipment_uuid = UUID(equipment_data['id'])
|
||||
transformed_id = transform_id(equipment_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse equipment UUID",
|
||||
equipment_id=equipment_data['id'],
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid UUID format in equipment data: {str(e)}"
|
||||
)
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_install_date = parse_date_field(
|
||||
equipment_data.get('install_date'),
|
||||
session_time,
|
||||
"install_date"
|
||||
)
|
||||
adjusted_last_maintenance = parse_date_field(
|
||||
equipment_data.get('last_maintenance_date'),
|
||||
session_time,
|
||||
"last_maintenance_date"
|
||||
)
|
||||
adjusted_next_maintenance = parse_date_field(
|
||||
equipment_data.get('next_maintenance_date'),
|
||||
session_time,
|
||||
"next_maintenance_date"
|
||||
)
|
||||
adjusted_created_at = parse_date_field(
|
||||
equipment_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
)
|
||||
adjusted_updated_at = parse_date_field(
|
||||
equipment_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
)
|
||||
|
||||
new_equipment = Equipment(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
name=equipment_data['name'],
|
||||
type=equipment_data['type'],
|
||||
model=equipment_data['model'],
|
||||
serial_number=equipment_data.get('serial_number'),
|
||||
location=equipment_data['location'],
|
||||
status=equipment_data['status'],
|
||||
install_date=adjusted_install_date,
|
||||
last_maintenance_date=adjusted_last_maintenance,
|
||||
next_maintenance_date=adjusted_next_maintenance,
|
||||
maintenance_interval_days=equipment_data.get('maintenance_interval_days'),
|
||||
efficiency_percentage=equipment_data.get('efficiency_percentage'),
|
||||
uptime_percentage=equipment_data.get('uptime_percentage'),
|
||||
energy_usage_kwh=equipment_data.get('energy_usage_kwh'),
|
||||
power_kw=equipment_data.get('power_kw'),
|
||||
capacity=equipment_data.get('capacity'),
|
||||
weight_kg=equipment_data.get('weight_kg'),
|
||||
current_temperature=equipment_data.get('current_temperature'),
|
||||
target_temperature=equipment_data.get('target_temperature'),
|
||||
is_active=equipment_data.get('is_active', True),
|
||||
notes=equipment_data.get('notes'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_equipment)
|
||||
stats["equipment"] += 1
|
||||
|
||||
# Flush to get equipment IDs
|
||||
await db.flush()
|
||||
|
||||
# Clone Quality Check Templates from seed data
|
||||
template_id_map = {}
|
||||
|
||||
for template_data in seed_data.get('quality_check_templates', []):
|
||||
# Transform template ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
template_uuid = UUID(template_data['id'])
|
||||
transformed_id = transform_id(template_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse template UUID",
|
||||
template_id=template_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
template_id_map[UUID(template_data['id'])] = transformed_id
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_created_at = parse_date_field(
|
||||
template_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
) or session_time
|
||||
adjusted_updated_at = parse_date_field(
|
||||
template_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
) or adjusted_created_at
|
||||
|
||||
new_template = QualityCheckTemplate(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
name=template_data.get('name'),
|
||||
template_code=template_data.get('template_code'),
|
||||
check_type=template_data.get('check_type'),
|
||||
category=template_data.get('category'),
|
||||
description=template_data.get('description'),
|
||||
instructions=template_data.get('instructions'),
|
||||
parameters=template_data.get('parameters'),
|
||||
thresholds=template_data.get('thresholds'),
|
||||
scoring_criteria=template_data.get('scoring_criteria'),
|
||||
is_active=template_data.get('is_active', True),
|
||||
is_required=template_data.get('is_required', False),
|
||||
is_critical=template_data.get('is_critical', False),
|
||||
weight=template_data.get('weight', 1.0),
|
||||
min_value=template_data.get('min_value'),
|
||||
max_value=template_data.get('max_value'),
|
||||
target_value=template_data.get('target_value'),
|
||||
unit=template_data.get('unit'),
|
||||
tolerance_percentage=template_data.get('tolerance_percentage'),
|
||||
applicable_stages=template_data.get('applicable_stages'),
|
||||
created_by=template_data.get('created_by'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_template)
|
||||
stats["quality_check_templates"] += 1
|
||||
|
||||
# Flush to get template IDs
|
||||
await db.flush()
|
||||
|
||||
# Clone Production Batches from seed data
|
||||
batch_id_map = {}
|
||||
for batch_data in seed_data.get('batches', []):
|
||||
# Transform batch ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
batch_uuid = UUID(batch_data['id'])
|
||||
transformed_id = transform_id(batch_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse batch UUID",
|
||||
batch_id=batch_data['id'],
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid UUID format in batch data: {str(e)}"
|
||||
)
|
||||
|
||||
batch_id_map[UUID(batch_data['id'])] = transformed_id
|
||||
|
||||
# Adjust dates relative to session creation time
|
||||
adjusted_planned_start = parse_date_field(batch_data.get('planned_start_time'), session_time, "planned_start_time")
|
||||
adjusted_planned_end = parse_date_field(batch_data.get('planned_end_time'), session_time, "planned_end_time")
|
||||
adjusted_actual_start = parse_date_field(batch_data.get('actual_start_time'), session_time, "actual_start_time")
|
||||
adjusted_actual_end = parse_date_field(batch_data.get('actual_end_time'), session_time, "actual_end_time")
|
||||
adjusted_completed = parse_date_field(batch_data.get('completed_at'), session_time, "completed_at")
|
||||
adjusted_created_at = parse_date_field(batch_data.get('created_at'), session_time, "created_at") or session_time
|
||||
adjusted_updated_at = parse_date_field(batch_data.get('updated_at'), session_time, "updated_at") or adjusted_created_at
|
||||
|
||||
# Map status and priority enums
|
||||
status_value = batch_data.get('status', 'PENDING')
|
||||
if isinstance(status_value, str):
|
||||
try:
|
||||
status_value = ProductionStatus[status_value]
|
||||
except KeyError:
|
||||
status_value = ProductionStatus.PENDING
|
||||
|
||||
priority_value = batch_data.get('priority', 'MEDIUM')
|
||||
if isinstance(priority_value, str):
|
||||
try:
|
||||
priority_value = ProductionPriority[priority_value]
|
||||
except KeyError:
|
||||
priority_value = ProductionPriority.MEDIUM
|
||||
|
||||
# Map process stage enum
|
||||
process_stage_value = batch_data.get('current_process_stage')
|
||||
if process_stage_value and isinstance(process_stage_value, str):
|
||||
try:
|
||||
process_stage_value = ProcessStage[process_stage_value]
|
||||
except KeyError:
|
||||
process_stage_value = None
|
||||
|
||||
# Transform foreign key references (product_id, recipe_id, order_id, forecast_id)
|
||||
transformed_product_id = None
|
||||
if batch_data.get('product_id'):
|
||||
try:
|
||||
transformed_product_id = str(transform_id(batch_data['product_id'], virtual_uuid))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform product_id",
|
||||
product_id=batch_data.get('product_id'),
|
||||
error=str(e))
|
||||
|
||||
transformed_recipe_id = None
|
||||
if batch_data.get('recipe_id'):
|
||||
try:
|
||||
transformed_recipe_id = str(transform_id(batch_data['recipe_id'], virtual_uuid))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform recipe_id",
|
||||
recipe_id=batch_data.get('recipe_id'),
|
||||
error=str(e))
|
||||
|
||||
transformed_order_id = None
|
||||
if batch_data.get('order_id'):
|
||||
try:
|
||||
transformed_order_id = str(transform_id(batch_data['order_id'], virtual_uuid))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform order_id",
|
||||
order_id=batch_data.get('order_id'),
|
||||
error=str(e))
|
||||
|
||||
transformed_forecast_id = None
|
||||
if batch_data.get('forecast_id'):
|
||||
try:
|
||||
transformed_forecast_id = str(transform_id(batch_data['forecast_id'], virtual_uuid))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform forecast_id",
|
||||
forecast_id=batch_data.get('forecast_id'),
|
||||
error=str(e))
|
||||
|
||||
# Transform equipment_used array
|
||||
transformed_equipment = []
|
||||
if batch_data.get('equipment_used'):
|
||||
for equip_id in batch_data['equipment_used']:
|
||||
try:
|
||||
transformed_equipment.append(str(transform_id(equip_id, virtual_uuid)))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform equipment_id",
|
||||
equipment_id=equip_id,
|
||||
error=str(e))
|
||||
|
||||
# staff_assigned contains user IDs - these should NOT be transformed
|
||||
# because they reference actual user accounts which are NOT cloned
|
||||
# The demo uses the same user accounts across all virtual tenants
|
||||
staff_assigned = batch_data.get('staff_assigned', [])
|
||||
|
||||
new_batch = ProductionBatch(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
batch_number=f"{session_id[:8]}-{batch_data.get('batch_number', f'BATCH-{uuid.uuid4().hex[:8].upper()}')}",
|
||||
product_id=transformed_product_id,
|
||||
product_name=batch_data.get('product_name'),
|
||||
recipe_id=transformed_recipe_id,
|
||||
planned_start_time=adjusted_planned_start,
|
||||
planned_end_time=adjusted_planned_end,
|
||||
planned_quantity=batch_data.get('planned_quantity'),
|
||||
planned_duration_minutes=batch_data.get('planned_duration_minutes'),
|
||||
actual_start_time=adjusted_actual_start,
|
||||
actual_end_time=adjusted_actual_end,
|
||||
actual_quantity=batch_data.get('actual_quantity'),
|
||||
actual_duration_minutes=batch_data.get('actual_duration_minutes'),
|
||||
status=status_value,
|
||||
priority=priority_value,
|
||||
current_process_stage=process_stage_value,
|
||||
process_stage_history=batch_data.get('process_stage_history'),
|
||||
pending_quality_checks=batch_data.get('pending_quality_checks'),
|
||||
completed_quality_checks=batch_data.get('completed_quality_checks'),
|
||||
estimated_cost=batch_data.get('estimated_cost'),
|
||||
actual_cost=batch_data.get('actual_cost'),
|
||||
labor_cost=batch_data.get('labor_cost'),
|
||||
material_cost=batch_data.get('material_cost'),
|
||||
overhead_cost=batch_data.get('overhead_cost'),
|
||||
yield_percentage=batch_data.get('yield_percentage'),
|
||||
quality_score=batch_data.get('quality_score'),
|
||||
waste_quantity=batch_data.get('waste_quantity'),
|
||||
defect_quantity=batch_data.get('defect_quantity'),
|
||||
waste_defect_type=batch_data.get('waste_defect_type'),
|
||||
equipment_used=transformed_equipment,
|
||||
staff_assigned=staff_assigned,
|
||||
station_id=batch_data.get('station_id'),
|
||||
order_id=transformed_order_id,
|
||||
forecast_id=transformed_forecast_id,
|
||||
is_rush_order=batch_data.get('is_rush_order', False),
|
||||
is_special_recipe=batch_data.get('is_special_recipe', False),
|
||||
is_ai_assisted=batch_data.get('is_ai_assisted', False),
|
||||
production_notes=batch_data.get('production_notes'),
|
||||
quality_notes=batch_data.get('quality_notes'),
|
||||
delay_reason=batch_data.get('delay_reason'),
|
||||
cancellation_reason=batch_data.get('cancellation_reason'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at,
|
||||
completed_at=adjusted_completed
|
||||
)
|
||||
db.add(new_batch)
|
||||
stats["batches"] += 1
|
||||
|
||||
# Flush to get batch IDs
|
||||
await db.flush()
|
||||
|
||||
# Clone Quality Checks from seed data (if any)
|
||||
for check_data in seed_data.get('quality_checks', []):
|
||||
# Transform IDs
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
check_uuid = UUID(check_data['id'])
|
||||
transformed_id = transform_id(check_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse check UUID",
|
||||
check_id=check_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
# Map batch_id if it exists in our map
|
||||
batch_id_value = check_data.get('batch_id')
|
||||
if batch_id_value:
|
||||
batch_id_value = batch_id_map.get(UUID(batch_id_value), UUID(batch_id_value))
|
||||
|
||||
# Map template_id if it exists
|
||||
template_id_value = check_data.get('template_id')
|
||||
if template_id_value:
|
||||
template_id_value = template_id_map.get(UUID(template_id_value), UUID(template_id_value))
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_check_time = parse_date_field(
|
||||
check_data.get('check_time'),
|
||||
session_time,
|
||||
"check_time"
|
||||
)
|
||||
|
||||
adjusted_created_at = parse_date_field(
|
||||
check_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
)
|
||||
adjusted_updated_at = parse_date_field(
|
||||
check_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
) or adjusted_created_at
|
||||
|
||||
new_check = QualityCheck(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
batch_id=str(batch_id_value) if batch_id_value else None,
|
||||
template_id=str(template_id_value) if template_id_value else None,
|
||||
check_type=check_data.get('check_type'),
|
||||
process_stage=check_data.get('process_stage'),
|
||||
check_time=adjusted_check_time,
|
||||
checker_id=check_data.get('checker_id'),
|
||||
quality_score=check_data.get('quality_score'),
|
||||
pass_fail=check_data.get('pass_fail'),
|
||||
defect_count=check_data.get('defect_count'),
|
||||
defect_types=check_data.get('defect_types'),
|
||||
measured_weight=check_data.get('measured_weight'),
|
||||
measured_temperature=check_data.get('measured_temperature'),
|
||||
measured_moisture=check_data.get('measured_moisture'),
|
||||
measured_dimensions=check_data.get('measured_dimensions'),
|
||||
stage_specific_data=check_data.get('stage_specific_data'),
|
||||
target_weight=check_data.get('target_weight'),
|
||||
target_temperature=check_data.get('target_temperature'),
|
||||
target_moisture=check_data.get('target_moisture'),
|
||||
tolerance_percentage=check_data.get('tolerance_percentage'),
|
||||
within_tolerance=check_data.get('within_tolerance'),
|
||||
corrective_action_needed=check_data.get('corrective_action_needed'),
|
||||
corrective_actions=check_data.get('corrective_actions'),
|
||||
template_results=check_data.get('template_results'),
|
||||
criteria_scores=check_data.get('criteria_scores'),
|
||||
check_notes=check_data.get('check_notes'),
|
||||
photos_urls=check_data.get('photos_urls'),
|
||||
certificate_url=check_data.get('certificate_url'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_check)
|
||||
stats["quality_checks"] += 1
|
||||
|
||||
# Clone Production Schedules from seed data (if any)
|
||||
for schedule_data in seed_data.get('production_schedules', []):
|
||||
# Transform IDs
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
schedule_uuid = UUID(schedule_data['id'])
|
||||
transformed_id = transform_id(schedule_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse schedule UUID",
|
||||
schedule_id=schedule_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_schedule_date = parse_date_field(
|
||||
schedule_data.get('schedule_date'),
|
||||
session_time,
|
||||
"schedule_date"
|
||||
)
|
||||
adjusted_shift_start = parse_date_field(
|
||||
schedule_data.get('shift_start'),
|
||||
session_time,
|
||||
"shift_start"
|
||||
)
|
||||
adjusted_shift_end = parse_date_field(
|
||||
schedule_data.get('shift_end'),
|
||||
session_time,
|
||||
"shift_end"
|
||||
)
|
||||
adjusted_finalized = parse_date_field(
|
||||
schedule_data.get('finalized_at'),
|
||||
session_time,
|
||||
"finalized_at"
|
||||
)
|
||||
adjusted_created_at = parse_date_field(
|
||||
schedule_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
)
|
||||
adjusted_updated_at = parse_date_field(
|
||||
schedule_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
) or adjusted_created_at
|
||||
|
||||
new_schedule = ProductionSchedule(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
schedule_date=adjusted_schedule_date,
|
||||
shift_start=adjusted_shift_start,
|
||||
shift_end=adjusted_shift_end,
|
||||
total_capacity_hours=schedule_data.get('total_capacity_hours'),
|
||||
planned_capacity_hours=schedule_data.get('planned_capacity_hours'),
|
||||
actual_capacity_hours=schedule_data.get('actual_capacity_hours'),
|
||||
overtime_hours=schedule_data.get('overtime_hours', 0.0),
|
||||
staff_count=schedule_data.get('staff_count'),
|
||||
equipment_capacity=schedule_data.get('equipment_capacity'),
|
||||
station_assignments=schedule_data.get('station_assignments'),
|
||||
total_batches_planned=schedule_data.get('total_batches_planned', 0),
|
||||
total_batches_completed=schedule_data.get('total_batches_completed', 0),
|
||||
total_quantity_planned=schedule_data.get('total_quantity_planned', 0.0),
|
||||
total_quantity_produced=schedule_data.get('total_quantity_produced', 0.0),
|
||||
is_finalized=schedule_data.get('is_finalized', False),
|
||||
is_active=schedule_data.get('is_active', True),
|
||||
efficiency_percentage=schedule_data.get('efficiency_percentage'),
|
||||
utilization_percentage=schedule_data.get('utilization_percentage'),
|
||||
on_time_completion_rate=schedule_data.get('on_time_completion_rate'),
|
||||
schedule_notes=schedule_data.get('schedule_notes'),
|
||||
schedule_adjustments=schedule_data.get('schedule_adjustments'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at,
|
||||
finalized_at=adjusted_finalized
|
||||
)
|
||||
db.add(new_schedule)
|
||||
stats["production_schedules"] += 1
|
||||
|
||||
# Clone Production Capacity from seed data (if any)
|
||||
for capacity_data in seed_data.get('production_capacity', []):
|
||||
# Transform IDs
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
capacity_uuid = UUID(capacity_data['id'])
|
||||
transformed_id = transform_id(capacity_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse capacity UUID",
|
||||
capacity_id=capacity_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_date = parse_date_field(
|
||||
capacity_data.get('date'),
|
||||
session_time,
|
||||
"date"
|
||||
)
|
||||
adjusted_start_time = parse_date_field(
|
||||
capacity_data.get('start_time'),
|
||||
session_time,
|
||||
"start_time"
|
||||
)
|
||||
adjusted_end_time = parse_date_field(
|
||||
capacity_data.get('end_time'),
|
||||
session_time,
|
||||
"end_time"
|
||||
)
|
||||
adjusted_last_maintenance = parse_date_field(
|
||||
capacity_data.get('last_maintenance_date'),
|
||||
session_time,
|
||||
"last_maintenance_date"
|
||||
)
|
||||
adjusted_created_at = parse_date_field(
|
||||
capacity_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
)
|
||||
adjusted_updated_at = parse_date_field(
|
||||
capacity_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
) or adjusted_created_at
|
||||
|
||||
new_capacity = ProductionCapacity(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
resource_type=capacity_data.get('resource_type'),
|
||||
resource_id=capacity_data.get('resource_id'),
|
||||
resource_name=capacity_data.get('resource_name'),
|
||||
date=adjusted_date,
|
||||
start_time=adjusted_start_time,
|
||||
end_time=adjusted_end_time,
|
||||
total_capacity_units=capacity_data.get('total_capacity_units'),
|
||||
allocated_capacity_units=capacity_data.get('allocated_capacity_units'),
|
||||
remaining_capacity_units=capacity_data.get('remaining_capacity_units'),
|
||||
is_available=capacity_data.get('is_available'),
|
||||
is_maintenance=capacity_data.get('is_maintenance'),
|
||||
is_reserved=capacity_data.get('is_reserved'),
|
||||
equipment_type=capacity_data.get('equipment_type'),
|
||||
max_batch_size=capacity_data.get('max_batch_size'),
|
||||
min_batch_size=capacity_data.get('min_batch_size'),
|
||||
setup_time_minutes=capacity_data.get('setup_time_minutes'),
|
||||
cleanup_time_minutes=capacity_data.get('cleanup_time_minutes'),
|
||||
efficiency_rating=capacity_data.get('efficiency_rating'),
|
||||
maintenance_status=capacity_data.get('maintenance_status'),
|
||||
last_maintenance_date=adjusted_last_maintenance,
|
||||
notes=capacity_data.get('notes'),
|
||||
restrictions=capacity_data.get('restrictions'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_capacity)
|
||||
stats["production_capacity"] += 1
|
||||
|
||||
# Note: Edge cases are now handled exclusively through JSON seed data
|
||||
# The seed data files already contain comprehensive edge cases including:
|
||||
# - Overdue batches (should have started 2 hours ago)
|
||||
# - In-progress batches (currently being processed)
|
||||
# - Upcoming batches (scheduled for later today/tomorrow)
|
||||
# This ensures standardization and single source of truth for demo data
|
||||
|
||||
logger.info(
|
||||
"Edge cases handled by JSON seed data - no manual creation needed",
|
||||
seed_data_edge_cases="overdue_batches, in_progress_batches, upcoming_batches"
|
||||
)
|
||||
|
||||
# Commit cloned data
|
||||
await db.commit()
|
||||
|
||||
# NOTE: Alert generation removed - alerts are now generated automatically by the
|
||||
# production alert service which runs scheduled checks at appropriate intervals.
|
||||
# This eliminates duplicate alerts and provides a more realistic demo experience.
|
||||
stats["alerts_generated"] = 0
|
||||
|
||||
# Calculate total from non-alert stats
|
||||
total_records = (stats["equipment"] + stats["batches"] + stats["production_schedules"] +
|
||||
stats["quality_check_templates"] + stats["quality_checks"] +
|
||||
stats["production_capacity"])
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
logger.info(
|
||||
"Production data cloning completed",
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
total_records=total_records,
|
||||
stats=stats,
|
||||
duration_ms=duration_ms
|
||||
)
|
||||
|
||||
return {
|
||||
"service": "production",
|
||||
"status": "completed",
|
||||
"records_cloned": total_records,
|
||||
"duration_ms": duration_ms,
|
||||
"details": stats
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("Invalid UUID format", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=f"Invalid UUID: {str(e)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to clone production data",
|
||||
error=str(e),
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
# Rollback on error
|
||||
await db.rollback()
|
||||
|
||||
return {
|
||||
"service": "production",
|
||||
"status": "failed",
|
||||
"records_cloned": 0,
|
||||
"duration_ms": int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000),
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/clone/health")
|
||||
async def clone_health_check():
|
||||
"""
|
||||
Health check for internal cloning endpoint
|
||||
Used by orchestrator to verify service availability
|
||||
"""
|
||||
return {
|
||||
"service": "production",
|
||||
"clone_endpoint": "available",
|
||||
"version": "2.0.0"
|
||||
}
|
||||
|
||||
|
||||
@router.delete("/tenant/{virtual_tenant_id}")
|
||||
async def delete_demo_data(
|
||||
virtual_tenant_id: str,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Delete all production data for a virtual demo tenant"""
|
||||
logger.info("Deleting production data for virtual tenant", virtual_tenant_id=virtual_tenant_id)
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
try:
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Count records
|
||||
batch_count = await db.scalar(select(func.count(ProductionBatch.id)).where(ProductionBatch.tenant_id == virtual_uuid))
|
||||
schedule_count = await db.scalar(select(func.count(ProductionSchedule.id)).where(ProductionSchedule.tenant_id == virtual_uuid))
|
||||
quality_count = await db.scalar(select(func.count(QualityCheck.id)).where(QualityCheck.tenant_id == virtual_uuid))
|
||||
equipment_count = await db.scalar(select(func.count(Equipment.id)).where(Equipment.tenant_id == virtual_uuid))
|
||||
|
||||
# Delete in order
|
||||
await db.execute(delete(QualityCheck).where(QualityCheck.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(ProductionBatch).where(ProductionBatch.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(ProductionSchedule).where(ProductionSchedule.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(QualityCheckTemplate).where(QualityCheckTemplate.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(Equipment).where(Equipment.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(ProductionCapacity).where(ProductionCapacity.tenant_id == virtual_uuid))
|
||||
await db.commit()
|
||||
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
logger.info("Production data deleted successfully", virtual_tenant_id=virtual_tenant_id, duration_ms=duration_ms)
|
||||
|
||||
return {
|
||||
"service": "production",
|
||||
"status": "deleted",
|
||||
"virtual_tenant_id": virtual_tenant_id,
|
||||
"records_deleted": {
|
||||
"batches": batch_count,
|
||||
"schedules": schedule_count,
|
||||
"quality_checks": quality_count,
|
||||
"equipment": equipment_count,
|
||||
"total": batch_count + schedule_count + quality_count + equipment_count
|
||||
},
|
||||
"duration_ms": duration_ms
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error("Failed to delete production data", error=str(e), exc_info=True)
|
||||
await db.rollback()
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
394
services/production/app/api/ml_insights.py
Normal file
394
services/production/app/api/ml_insights.py
Normal file
@@ -0,0 +1,394 @@
|
||||
"""
|
||||
ML Insights API Endpoints for Production Service
|
||||
|
||||
Provides endpoints to trigger ML insight generation for:
|
||||
- Production yield predictions
|
||||
- Quality optimization
|
||||
- Process efficiency analysis
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, List
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timedelta
|
||||
import structlog
|
||||
import pandas as pd
|
||||
|
||||
from app.core.database import get_db
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/api/v1/tenants/{tenant_id}/production/ml/insights",
|
||||
tags=["ML Insights"]
|
||||
)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# REQUEST/RESPONSE SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class YieldPredictionRequest(BaseModel):
|
||||
"""Request schema for yield prediction"""
|
||||
recipe_ids: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="Specific recipe IDs to analyze. If None, analyzes all recipes"
|
||||
)
|
||||
lookback_days: int = Field(
|
||||
90,
|
||||
description="Days of historical production to analyze",
|
||||
ge=30,
|
||||
le=365
|
||||
)
|
||||
min_history_runs: int = Field(
|
||||
30,
|
||||
description="Minimum production runs required",
|
||||
ge=10,
|
||||
le=100
|
||||
)
|
||||
|
||||
|
||||
class YieldPredictionResponse(BaseModel):
|
||||
"""Response schema for yield prediction"""
|
||||
success: bool
|
||||
message: str
|
||||
tenant_id: str
|
||||
recipes_analyzed: int
|
||||
total_insights_generated: int
|
||||
total_insights_posted: int
|
||||
recipes_with_issues: int
|
||||
insights_by_recipe: dict
|
||||
errors: List[str] = []
|
||||
|
||||
|
||||
# ================================================================
|
||||
# API ENDPOINTS
|
||||
# ================================================================
|
||||
|
||||
@router.post("/predict-yields", response_model=YieldPredictionResponse)
|
||||
async def trigger_yield_prediction(
|
||||
tenant_id: str,
|
||||
request_data: YieldPredictionRequest,
|
||||
request: Request,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Trigger yield prediction for production recipes.
|
||||
|
||||
This endpoint:
|
||||
1. Fetches historical production data for specified recipes
|
||||
2. Runs the YieldInsightsOrchestrator to predict yields
|
||||
3. Generates insights about yield optimization opportunities
|
||||
4. Posts insights to AI Insights Service
|
||||
5. Publishes recommendation events to RabbitMQ
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
request_data: Prediction parameters
|
||||
request: FastAPI request (for app state access)
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
YieldPredictionResponse with prediction results
|
||||
"""
|
||||
logger.info(
|
||||
"ML insights yield prediction requested",
|
||||
tenant_id=tenant_id,
|
||||
recipe_ids=request_data.recipe_ids,
|
||||
lookback_days=request_data.lookback_days
|
||||
)
|
||||
|
||||
try:
|
||||
# Import ML orchestrator and clients
|
||||
from app.ml.yield_insights_orchestrator import YieldInsightsOrchestrator
|
||||
from shared.clients.recipes_client import RecipesServiceClient
|
||||
from app.core.config import settings
|
||||
|
||||
# Get event publisher from app state (if available)
|
||||
event_publisher = getattr(request.app.state, 'event_publisher', None) if hasattr(request, 'app') else None
|
||||
|
||||
# Initialize orchestrator and recipes client
|
||||
orchestrator = YieldInsightsOrchestrator(
|
||||
event_publisher=event_publisher
|
||||
)
|
||||
recipes_client = RecipesServiceClient(settings)
|
||||
|
||||
# Get recipes to analyze from recipes service via API
|
||||
if request_data.recipe_ids:
|
||||
# Fetch specific recipes
|
||||
recipes = []
|
||||
for recipe_id in request_data.recipe_ids:
|
||||
recipe = await recipes_client.get_recipe_by_id(
|
||||
recipe_id=recipe_id,
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
if recipe:
|
||||
recipes.append(recipe)
|
||||
else:
|
||||
# Fetch all recipes for tenant (limit to 10)
|
||||
all_recipes = await recipes_client.get_all_recipes(tenant_id=tenant_id)
|
||||
recipes = all_recipes[:10] if all_recipes else [] # Limit to prevent timeout
|
||||
|
||||
if not recipes:
|
||||
return YieldPredictionResponse(
|
||||
success=False,
|
||||
message="No recipes found for analysis",
|
||||
tenant_id=tenant_id,
|
||||
recipes_analyzed=0,
|
||||
total_insights_generated=0,
|
||||
total_insights_posted=0,
|
||||
recipes_with_issues=0,
|
||||
insights_by_recipe={},
|
||||
errors=["No recipes found"]
|
||||
)
|
||||
|
||||
# Calculate date range for production history
|
||||
end_date = datetime.utcnow()
|
||||
start_date = end_date - timedelta(days=request_data.lookback_days)
|
||||
|
||||
# Process each recipe
|
||||
total_insights_generated = 0
|
||||
total_insights_posted = 0
|
||||
recipes_with_issues = 0
|
||||
insights_by_recipe = {}
|
||||
errors = []
|
||||
|
||||
for recipe in recipes:
|
||||
try:
|
||||
recipe_id = str(recipe['id'])
|
||||
recipe_name = recipe.get('name', 'Unknown Recipe')
|
||||
logger.info(f"Analyzing yield for {recipe_name} ({recipe_id})")
|
||||
|
||||
# Fetch real production batch history from database
|
||||
from app.models.production import ProductionBatch, ProductionStatus
|
||||
from sqlalchemy import select
|
||||
|
||||
batch_query = select(ProductionBatch).where(
|
||||
ProductionBatch.tenant_id == UUID(tenant_id),
|
||||
ProductionBatch.recipe_id == UUID(recipe_id), # Use the extracted UUID
|
||||
ProductionBatch.actual_start_time >= start_date,
|
||||
ProductionBatch.actual_start_time <= end_date,
|
||||
ProductionBatch.status == ProductionStatus.COMPLETED,
|
||||
ProductionBatch.actual_quantity.isnot(None)
|
||||
).order_by(ProductionBatch.actual_start_time)
|
||||
|
||||
batch_result = await db.execute(batch_query)
|
||||
batches = batch_result.scalars().all()
|
||||
|
||||
if len(batches) < request_data.min_history_runs:
|
||||
logger.warning(
|
||||
f"Insufficient production history for recipe {recipe_id}: "
|
||||
f"{len(batches)} batches < {request_data.min_history_runs} required"
|
||||
)
|
||||
continue
|
||||
|
||||
# Create production history DataFrame from real batches
|
||||
production_data = []
|
||||
for batch in batches:
|
||||
# Calculate yield percentage
|
||||
if batch.planned_quantity and batch.actual_quantity:
|
||||
yield_pct = (batch.actual_quantity / batch.planned_quantity) * 100
|
||||
else:
|
||||
continue # Skip batches without complete data
|
||||
|
||||
production_data.append({
|
||||
'production_run_id': str(batch.id), # Required: unique identifier for each production run
|
||||
'recipe_id': str(batch.recipe_id), # Required: recipe identifier
|
||||
'started_at': batch.actual_start_time,
|
||||
'completed_at': batch.actual_end_time, # Optional but useful for duration analysis
|
||||
'batch_size': float(batch.planned_quantity), # Use planned_quantity as batch_size
|
||||
'planned_quantity': float(batch.planned_quantity),
|
||||
'actual_quantity': float(batch.actual_quantity),
|
||||
'yield_percentage': yield_pct,
|
||||
'staff_assigned': batch.staff_assigned if batch.staff_assigned else ['unknown'],
|
||||
'batch_number': batch.batch_number,
|
||||
'equipment_id': batch.equipment_used[0] if batch.equipment_used and len(batch.equipment_used) > 0 else None,
|
||||
'notes': batch.quality_notes # Optional quality notes
|
||||
})
|
||||
|
||||
if not production_data:
|
||||
logger.warning(
|
||||
f"No valid production data for recipe {recipe_id}"
|
||||
)
|
||||
continue
|
||||
|
||||
production_history = pd.DataFrame(production_data)
|
||||
|
||||
# Debug: Log DataFrame columns and sample data
|
||||
logger.debug(
|
||||
"Production history DataFrame created",
|
||||
recipe_id=recipe_id,
|
||||
columns=list(production_history.columns),
|
||||
sample_data=production_history.head(1).to_dict('records') if len(production_history) > 0 else None
|
||||
)
|
||||
|
||||
# Run yield analysis
|
||||
results = await orchestrator.analyze_and_post_insights(
|
||||
tenant_id=tenant_id,
|
||||
recipe_id=recipe_id,
|
||||
production_history=production_history,
|
||||
min_history_runs=request_data.min_history_runs
|
||||
)
|
||||
|
||||
# Track results
|
||||
total_insights_generated += results['insights_generated']
|
||||
total_insights_posted += results['insights_posted']
|
||||
|
||||
baseline_stats = results.get('baseline_stats', {})
|
||||
mean_yield = baseline_stats.get('mean_yield', 100)
|
||||
if mean_yield < 90:
|
||||
recipes_with_issues += 1
|
||||
|
||||
insights_by_recipe[recipe_id] = {
|
||||
'recipe_name': recipe_name,
|
||||
'insights_posted': results['insights_posted'],
|
||||
'mean_yield': mean_yield,
|
||||
'patterns': len(results.get('patterns', []))
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Recipe {recipe_id} analysis complete",
|
||||
insights_posted=results['insights_posted'],
|
||||
mean_yield=mean_yield
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error analyzing recipe {recipe_id}: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
errors.append(error_msg)
|
||||
|
||||
# Close orchestrator
|
||||
await orchestrator.close()
|
||||
|
||||
# Build response
|
||||
response = YieldPredictionResponse(
|
||||
success=total_insights_posted > 0,
|
||||
message=f"Successfully analyzed {len([r for r in recipes if isinstance(r, dict)])} recipes, generated {total_insights_posted} insights",
|
||||
tenant_id=tenant_id,
|
||||
recipes_analyzed=len([r for r in recipes if isinstance(r, dict)]),
|
||||
total_insights_generated=total_insights_generated,
|
||||
total_insights_posted=total_insights_posted,
|
||||
recipes_with_issues=recipes_with_issues,
|
||||
insights_by_recipe=insights_by_recipe,
|
||||
errors=errors
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"ML insights yield prediction complete",
|
||||
tenant_id=tenant_id,
|
||||
total_insights=total_insights_posted,
|
||||
recipes_with_issues=recipes_with_issues
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"ML insights yield prediction failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Yield prediction failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def ml_insights_health():
|
||||
"""Health check for ML insights endpoints"""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "production-ml-insights",
|
||||
"endpoints": [
|
||||
"POST /ml/insights/predict-yields"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# ================================================================
|
||||
# INTERNAL ENDPOINTS (for demo-session service)
|
||||
# ================================================================
|
||||
|
||||
# Create a separate router for internal endpoints to avoid the tenant prefix
|
||||
internal_router = APIRouter(
|
||||
tags=["ML Insights - Internal"]
|
||||
)
|
||||
|
||||
|
||||
@internal_router.post("/api/v1/tenants/{tenant_id}/production/internal/ml/generate-yield-insights")
|
||||
async def generate_yield_insights_internal(
|
||||
tenant_id: str,
|
||||
request: Request,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Internal endpoint to trigger yield insights generation for demo sessions.
|
||||
|
||||
This endpoint is called by the demo-session service after cloning data.
|
||||
It uses the same ML logic as the public endpoint but with optimized defaults.
|
||||
|
||||
Security: Protected by x-internal-service header check.
|
||||
|
||||
Args:
|
||||
tenant_id: The tenant UUID
|
||||
request: FastAPI request object
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
{
|
||||
"insights_posted": int,
|
||||
"tenant_id": str,
|
||||
"status": str
|
||||
}
|
||||
"""
|
||||
# Verify internal service header
|
||||
if not request or request.headers.get("x-internal-service") not in ["demo-session", "internal"]:
|
||||
logger.warning("Unauthorized internal API call", tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="This endpoint is for internal service use only"
|
||||
)
|
||||
|
||||
logger.info("Internal yield insights generation triggered", tenant_id=tenant_id)
|
||||
|
||||
try:
|
||||
# Use the existing yield prediction logic with sensible defaults
|
||||
request_data = YieldPredictionRequest(
|
||||
recipe_ids=None, # Analyze all recipes
|
||||
lookback_days=90, # 3 months of history
|
||||
min_history_runs=20 # Minimum 20 production runs required
|
||||
)
|
||||
|
||||
# Call the existing yield prediction endpoint logic
|
||||
result = await trigger_yield_prediction(
|
||||
tenant_id=tenant_id,
|
||||
request_data=request_data,
|
||||
request=request,
|
||||
db=db
|
||||
)
|
||||
|
||||
# Return simplified response for internal use
|
||||
return {
|
||||
"insights_posted": result.total_insights_posted,
|
||||
"tenant_id": tenant_id,
|
||||
"status": "success" if result.success else "failed",
|
||||
"message": result.message,
|
||||
"recipes_analyzed": result.recipes_analyzed,
|
||||
"recipes_with_issues": result.recipes_with_issues
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Internal yield insights generation failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Internal yield insights generation failed: {str(e)}"
|
||||
)
|
||||
241
services/production/app/api/orchestrator.py
Normal file
241
services/production/app/api/orchestrator.py
Normal file
@@ -0,0 +1,241 @@
|
||||
# ================================================================
|
||||
# services/production/app/api/orchestrator.py
|
||||
# ================================================================
|
||||
"""
|
||||
Production Orchestrator API - Endpoints for orchestrated production scheduling
|
||||
Called by the Orchestrator Service to generate production schedules from forecast data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Request
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import date
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
|
||||
from shared.routing import RouteBuilder
|
||||
from app.services.production_service import ProductionService
|
||||
from app.schemas.production import ProductionScheduleResponse
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-orchestrator"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# REQUEST/RESPONSE SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class GenerateScheduleRequest(BaseModel):
|
||||
"""
|
||||
Request to generate production schedule (called by Orchestrator)
|
||||
|
||||
The Orchestrator calls Forecasting Service first, then passes forecast data here.
|
||||
Production Service uses this data to determine what to produce.
|
||||
|
||||
NEW: Accepts cached data snapshots from Orchestrator to eliminate duplicate API calls.
|
||||
"""
|
||||
forecast_data: Dict[str, Any] = Field(..., description="Forecast data from Forecasting Service")
|
||||
target_date: Optional[date] = Field(None, description="Target production date")
|
||||
planning_horizon_days: int = Field(default=1, ge=1, le=7, description="Planning horizon in days")
|
||||
|
||||
# NEW: Cached data from Orchestrator
|
||||
inventory_data: Optional[Dict[str, Any]] = Field(None, description="Cached inventory snapshot from Orchestrator")
|
||||
recipes_data: Optional[Dict[str, Any]] = Field(None, description="Cached recipes snapshot from Orchestrator")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"forecast_data": {
|
||||
"forecasts": [
|
||||
{
|
||||
"product_id": "uuid-here",
|
||||
"predicted_demand": 100.0,
|
||||
"confidence_score": 0.85
|
||||
}
|
||||
],
|
||||
"forecast_id": "uuid-here",
|
||||
"generated_at": "2025-01-30T10:00:00Z"
|
||||
},
|
||||
"target_date": "2025-01-31",
|
||||
"planning_horizon_days": 1
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class GenerateScheduleResponse(BaseModel):
|
||||
"""Response from generate_schedule endpoint"""
|
||||
success: bool
|
||||
message: str
|
||||
schedule_id: Optional[UUID] = None
|
||||
schedule_number: Optional[str] = None
|
||||
batches_created: int = 0
|
||||
total_planned_quantity: float = 0.0
|
||||
warnings: List[str] = []
|
||||
errors: List[str] = []
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"success": True,
|
||||
"message": "Production schedule generated successfully",
|
||||
"schedule_id": "uuid-here",
|
||||
"schedule_number": "PROD-2025-01-30-001",
|
||||
"batches_created": 5,
|
||||
"total_planned_quantity": 500.0,
|
||||
"warnings": [],
|
||||
"errors": []
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# ================================================================
|
||||
# ORCHESTRATOR ENTRY POINT
|
||||
# ================================================================
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("generate-schedule"),
|
||||
response_model=GenerateScheduleResponse
|
||||
)
|
||||
async def generate_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
request_data: GenerateScheduleRequest = ...,
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Generate production schedule from forecast data (called by Orchestrator)
|
||||
|
||||
This is the main entry point for orchestrated production planning.
|
||||
The Orchestrator calls Forecasting Service first, then passes forecast data here.
|
||||
|
||||
Flow:
|
||||
1. Receive forecast data from orchestrator
|
||||
2. Parse forecast to extract product demands
|
||||
3. Check inventory levels for each product
|
||||
4. Calculate production quantities needed
|
||||
5. Create production schedule and batches
|
||||
6. Return schedule summary
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
request_data: Schedule generation request with forecast data
|
||||
|
||||
Returns:
|
||||
GenerateScheduleResponse with schedule details and created batches
|
||||
"""
|
||||
try:
|
||||
logger.info("Generate production schedule endpoint called",
|
||||
tenant_id=str(tenant_id),
|
||||
has_forecast_data=bool(request_data.forecast_data))
|
||||
|
||||
target_date = request_data.target_date or date.today()
|
||||
forecast_data = request_data.forecast_data
|
||||
|
||||
# Parse forecast data from orchestrator
|
||||
forecasts = _parse_forecast_data(forecast_data)
|
||||
|
||||
if not forecasts:
|
||||
return GenerateScheduleResponse(
|
||||
success=False,
|
||||
message="No forecast data provided",
|
||||
errors=["Forecast data is empty or invalid"]
|
||||
)
|
||||
|
||||
# Generate production schedule using the service (with cached data if available)
|
||||
result = await production_service.generate_production_schedule_from_forecast(
|
||||
tenant_id=tenant_id,
|
||||
target_date=target_date,
|
||||
forecasts=forecasts,
|
||||
planning_horizon_days=request_data.planning_horizon_days,
|
||||
inventory_data=request_data.inventory_data, # NEW: Pass cached inventory
|
||||
recipes_data=request_data.recipes_data # NEW: Pass cached recipes
|
||||
)
|
||||
|
||||
logger.info("Production schedule generated successfully",
|
||||
tenant_id=str(tenant_id),
|
||||
schedule_id=str(result.get('schedule_id')) if result.get('schedule_id') else None,
|
||||
batches_created=result.get('batches_created', 0))
|
||||
|
||||
return GenerateScheduleResponse(
|
||||
success=True,
|
||||
message="Production schedule generated successfully",
|
||||
schedule_id=result.get('schedule_id'),
|
||||
schedule_number=result.get('schedule_number'),
|
||||
batches_created=result.get('batches_created', 0),
|
||||
total_planned_quantity=result.get('total_planned_quantity', 0.0),
|
||||
warnings=result.get('warnings', []),
|
||||
errors=[]
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating production schedule",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
return GenerateScheduleResponse(
|
||||
success=False,
|
||||
message="Failed to generate production schedule",
|
||||
errors=[str(e)]
|
||||
)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# HELPER FUNCTIONS
|
||||
# ================================================================
|
||||
|
||||
def _parse_forecast_data(forecast_data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Parse forecast data received from orchestrator
|
||||
|
||||
Expected format from Forecasting Service via Orchestrator:
|
||||
{
|
||||
"forecasts": [
|
||||
{
|
||||
"product_id": "uuid",
|
||||
"inventory_product_id": "uuid", # Alternative field name
|
||||
"predicted_demand": 100.0,
|
||||
"predicted_value": 100.0, # Alternative field name
|
||||
"confidence_score": 0.85,
|
||||
...
|
||||
}
|
||||
],
|
||||
"forecast_id": "uuid",
|
||||
"generated_at": "2025-01-30T10:00:00Z"
|
||||
}
|
||||
"""
|
||||
forecasts = []
|
||||
|
||||
forecast_list = forecast_data.get('forecasts', [])
|
||||
for forecast_item in forecast_list:
|
||||
# Extract product ID (try multiple field names)
|
||||
product_id = (
|
||||
forecast_item.get('product_id') or
|
||||
forecast_item.get('inventory_product_id') or
|
||||
forecast_item.get('item_id')
|
||||
)
|
||||
|
||||
# Extract predicted demand (try multiple field names)
|
||||
predicted_demand = (
|
||||
forecast_item.get('predicted_demand') or
|
||||
forecast_item.get('predicted_value') or
|
||||
forecast_item.get('demand') or
|
||||
0
|
||||
)
|
||||
|
||||
if product_id and predicted_demand > 0:
|
||||
forecasts.append({
|
||||
'product_id': product_id,
|
||||
'predicted_demand': float(predicted_demand),
|
||||
'confidence_score': forecast_item.get('confidence_score', 0.8),
|
||||
'lower_bound': forecast_item.get('lower_bound', 0),
|
||||
'upper_bound': forecast_item.get('upper_bound', 0),
|
||||
'forecast_id': forecast_data.get('forecast_id'),
|
||||
})
|
||||
|
||||
return forecasts
|
||||
357
services/production/app/api/production_batches.py
Normal file
357
services/production/app/api/production_batches.py
Normal file
@@ -0,0 +1,357 @@
|
||||
# services/production/app/api/production_batches.py
|
||||
"""
|
||||
Production Batches API - ATOMIC CRUD operations on ProductionBatch model
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from datetime import date
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.security import create_audit_logger, AuditSeverity, AuditAction
|
||||
from app.core.database import get_db
|
||||
from app.services.production_service import ProductionService
|
||||
from app.models import AuditLog
|
||||
from app.schemas.production import (
|
||||
ProductionBatchCreate,
|
||||
ProductionBatchUpdate,
|
||||
ProductionBatchStatusUpdate,
|
||||
ProductionBatchResponse,
|
||||
ProductionBatchListResponse,
|
||||
ProductionStatusEnum
|
||||
)
|
||||
from app.core.config import settings
|
||||
from app.utils.cache import get_cached, set_cached, make_cache_key
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
async def get_production_alert_service(request: Request) -> ProductionAlertService:
|
||||
"""Dependency injection for production alert service"""
|
||||
# Get the alert service from app state, which is where it's stored during app startup
|
||||
alert_service = getattr(request.app.state, 'production_alert_service', None)
|
||||
if not alert_service:
|
||||
logger.warning("Production alert service not available in app state")
|
||||
return None
|
||||
return alert_service
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-batches"])
|
||||
|
||||
# Initialize audit logger with the production service's AuditLog model
|
||||
audit_logger = create_audit_logger("production-service", AuditLog)
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("batches"),
|
||||
response_model=ProductionBatchListResponse
|
||||
)
|
||||
async def list_production_batches(
|
||||
tenant_id: UUID = Path(...),
|
||||
status: Optional[ProductionStatusEnum] = Query(None, description="Filter by status"),
|
||||
product_id: Optional[UUID] = Query(None, description="Filter by product"),
|
||||
order_id: Optional[UUID] = Query(None, description="Filter by order"),
|
||||
start_date: Optional[date] = Query(None, description="Filter from date"),
|
||||
end_date: Optional[date] = Query(None, description="Filter to date"),
|
||||
page: int = Query(1, ge=1, description="Page number"),
|
||||
page_size: int = Query(50, ge=1, le=100, description="Page size"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""List batches with filters: date, status, product, order_id (with Redis caching - 20s TTL)"""
|
||||
try:
|
||||
# PERFORMANCE OPTIMIZATION: Cache frequently accessed queries (status filter, first page)
|
||||
cache_key = None
|
||||
if page == 1 and product_id is None and order_id is None and start_date is None and end_date is None:
|
||||
# Cache simple status-filtered queries (common for dashboards)
|
||||
cache_key = make_cache_key(
|
||||
"production_batches",
|
||||
str(tenant_id),
|
||||
status=status.value if status else None,
|
||||
page_size=page_size
|
||||
)
|
||||
cached_result = await get_cached(cache_key)
|
||||
if cached_result is not None:
|
||||
logger.debug("Cache hit for production batches", cache_key=cache_key, tenant_id=str(tenant_id), status=status)
|
||||
return ProductionBatchListResponse(**cached_result)
|
||||
|
||||
filters = {
|
||||
"status": status,
|
||||
"product_id": str(product_id) if product_id else None,
|
||||
"order_id": str(order_id) if order_id else None,
|
||||
"start_date": start_date,
|
||||
"end_date": end_date
|
||||
}
|
||||
|
||||
batch_list = await production_service.get_production_batches_list(tenant_id, filters, page, page_size)
|
||||
|
||||
# Cache the result if applicable (20s TTL for production batches)
|
||||
if cache_key:
|
||||
await set_cached(cache_key, batch_list.model_dump(), ttl=20)
|
||||
logger.debug("Cached production batches", cache_key=cache_key, ttl=20, tenant_id=str(tenant_id), status=status)
|
||||
|
||||
logger.info("Retrieved production batches list",
|
||||
tenant_id=str(tenant_id), filters=filters)
|
||||
|
||||
return batch_list
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error listing production batches",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to list production batches")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("batches"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def create_production_batch(
|
||||
batch_data: ProductionBatchCreate,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
request: Request = None,
|
||||
alert_service: ProductionAlertService = Depends(get_production_alert_service)
|
||||
):
|
||||
"""Create a new production batch"""
|
||||
try:
|
||||
batch = await production_service.create_production_batch(tenant_id, batch_data)
|
||||
|
||||
# Trigger Start Production alert
|
||||
if alert_service:
|
||||
try:
|
||||
# Generate reasoning data for the batch
|
||||
reasoning_data = {
|
||||
"type": "manual_creation",
|
||||
"parameters": {
|
||||
"product_name": batch.product_name,
|
||||
"planned_quantity": batch.planned_quantity,
|
||||
"priority": batch.priority.value if batch.priority else "MEDIUM"
|
||||
},
|
||||
"urgency": {
|
||||
"level": "normal",
|
||||
"ready_by_time": batch.planned_start_time.strftime('%H:%M') if batch.planned_start_time else "unknown"
|
||||
},
|
||||
"metadata": {
|
||||
"trigger_source": "manual_creation",
|
||||
"created_by": current_user.get("user_id", "unknown"),
|
||||
"is_ai_assisted": False
|
||||
}
|
||||
}
|
||||
|
||||
# Update batch with reasoning data
|
||||
from app.core.database import get_db
|
||||
db = next(get_db())
|
||||
batch.reasoning_data = reasoning_data
|
||||
await db.commit()
|
||||
|
||||
# Emit Start Production alert
|
||||
await alert_service.emit_start_production_alert(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch.id,
|
||||
product_name=batch.product_name,
|
||||
batch_number=batch.batch_number,
|
||||
reasoning_data=reasoning_data,
|
||||
planned_start_time=batch.planned_start_time.isoformat() if batch.planned_start_time else None
|
||||
)
|
||||
|
||||
logger.info("Start Production alert triggered for batch",
|
||||
batch_id=str(batch.id), tenant_id=str(tenant_id))
|
||||
|
||||
except Exception as alert_error:
|
||||
logger.error("Failed to trigger Start Production alert",
|
||||
error=str(alert_error), batch_id=str(batch.id))
|
||||
# Don't fail the batch creation if alert fails
|
||||
|
||||
logger.info("Created production batch",
|
||||
batch_id=str(batch.id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid batch data", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error creating production batch",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create production batch")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("batches/active"),
|
||||
response_model=ProductionBatchListResponse
|
||||
)
|
||||
async def get_active_batches(
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Get currently active production batches"""
|
||||
try:
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
batch_repo = ProductionBatchRepository(db)
|
||||
|
||||
batches = await batch_repo.get_active_batches(str(tenant_id))
|
||||
batch_responses = [ProductionBatchResponse.model_validate(batch) for batch in batches]
|
||||
|
||||
logger.info("Retrieved active production batches",
|
||||
count=len(batches), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchListResponse(
|
||||
batches=batch_responses,
|
||||
total_count=len(batches),
|
||||
page=1,
|
||||
page_size=len(batches)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting active batches",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get active batches")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_detail_route("batches", "batch_id"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def get_batch_details(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Get detailed information about a production batch"""
|
||||
try:
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
batch_repo = ProductionBatchRepository(db)
|
||||
|
||||
batch = await batch_repo.get_by_id(batch_id)
|
||||
if not batch or str(batch.tenant_id) != str(tenant_id):
|
||||
raise HTTPException(status_code=404, detail="Production batch not found")
|
||||
|
||||
logger.info("Retrieved production batch details",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error getting batch details",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get batch details")
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_nested_resource_route("batches", "batch_id", "status"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def update_batch_status(
|
||||
status_update: ProductionBatchStatusUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Update production batch status"""
|
||||
try:
|
||||
batch = await production_service.update_batch_status(tenant_id, batch_id, status_update)
|
||||
|
||||
logger.info("Updated production batch status",
|
||||
batch_id=str(batch_id),
|
||||
new_status=status_update.status.value,
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid status update", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error updating batch status",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update batch status")
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_resource_detail_route("batches", "batch_id"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def update_production_batch(
|
||||
batch_update: ProductionBatchUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Update batch (e.g., start time, notes, status)"""
|
||||
try:
|
||||
batch = await production_service.update_production_batch(tenant_id, batch_id, batch_update)
|
||||
|
||||
logger.info("Updated production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid batch update", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error updating production batch",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update production batch")
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_resource_detail_route("batches", "batch_id")
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def delete_production_batch(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Cancel/delete draft batch (Admin+ only, soft delete preferred)"""
|
||||
try:
|
||||
await production_service.delete_production_batch(tenant_id, batch_id)
|
||||
|
||||
# Log audit event for batch deletion
|
||||
try:
|
||||
db = next(get_db())
|
||||
await audit_logger.log_deletion(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user["user_id"],
|
||||
resource_type="production_batch",
|
||||
resource_id=str(batch_id),
|
||||
description=f"Deleted production batch",
|
||||
endpoint=f"/batches/{batch_id}",
|
||||
method="DELETE"
|
||||
)
|
||||
except Exception as audit_error:
|
||||
logger.warning("Failed to log audit event", error=str(audit_error))
|
||||
|
||||
logger.info("Deleted production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {"message": "Production batch deleted successfully"}
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot delete batch", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error deleting production batch",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to delete production batch")
|
||||
90
services/production/app/api/production_dashboard.py
Normal file
90
services/production/app/api/production_dashboard.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# services/production/app/api/production_dashboard.py
|
||||
"""
|
||||
Production Dashboard API - Dashboard endpoints for production overview
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from datetime import date, datetime
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.routing import RouteBuilder
|
||||
from app.services.production_service import ProductionService
|
||||
from app.schemas.production import ProductionDashboardSummary
|
||||
from app.core.config import settings
|
||||
from app.utils.cache import get_cached, set_cached, make_cache_key
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-dashboard"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_dashboard_route("summary"),
|
||||
response_model=ProductionDashboardSummary
|
||||
)
|
||||
async def get_dashboard_summary(
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get production dashboard summary with caching (60s TTL)"""
|
||||
try:
|
||||
# PHASE 2: Check cache first
|
||||
cache_key = make_cache_key("production_dashboard", str(tenant_id))
|
||||
cached_result = await get_cached(cache_key)
|
||||
if cached_result is not None:
|
||||
logger.debug("Cache hit for production dashboard", cache_key=cache_key, tenant_id=str(tenant_id))
|
||||
return ProductionDashboardSummary(**cached_result)
|
||||
|
||||
# Cache miss - fetch from database
|
||||
summary = await production_service.get_dashboard_summary(tenant_id)
|
||||
|
||||
# PHASE 2: Cache the result (60s TTL for production batches)
|
||||
await set_cached(cache_key, summary.model_dump(), ttl=60)
|
||||
logger.debug("Cached production dashboard", cache_key=cache_key, ttl=60, tenant_id=str(tenant_id))
|
||||
|
||||
logger.info("Retrieved production dashboard summary",
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting dashboard summary",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get dashboard summary")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_dashboard_route("requirements"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_production_requirements(
|
||||
tenant_id: UUID = Path(...),
|
||||
date: Optional[date] = Query(None, description="Target date for production requirements"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get production requirements for procurement planning"""
|
||||
try:
|
||||
target_date = date or datetime.now().date()
|
||||
requirements = await production_service.get_production_requirements(tenant_id, target_date)
|
||||
|
||||
logger.info("Retrieved production requirements for procurement",
|
||||
tenant_id=str(tenant_id), date=target_date.isoformat())
|
||||
|
||||
return requirements
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting production requirements",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get production requirements")
|
||||
470
services/production/app/api/production_operations.py
Normal file
470
services/production/app/api/production_operations.py
Normal file
@@ -0,0 +1,470 @@
|
||||
# services/production/app/api/production_operations.py
|
||||
"""
|
||||
Production Operations API - Business operations for production management
|
||||
Includes: batch start/complete, schedule finalize/optimize, capacity management, transformations, stats
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request, status
|
||||
from typing import Optional
|
||||
from datetime import date, datetime, timedelta
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.monitoring.decorators import monitor_performance
|
||||
from app.services.production_service import ProductionService
|
||||
from app.schemas.production import (
|
||||
ProductionBatchResponse,
|
||||
ProductionScheduleResponse
|
||||
)
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-operations"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
# ===== BATCH OPERATIONS =====
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("batches", "batch_id", "start"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def start_production_batch(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Mark batch as started (updates actual_start_time)"""
|
||||
try:
|
||||
batch = await production_service.start_production_batch(tenant_id, batch_id)
|
||||
|
||||
logger.info("Started production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot start batch", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error starting production batch",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to start production batch")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("batches", "batch_id", "complete"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def complete_production_batch(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
completion_data: Optional[dict] = None,
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Complete batch — auto-calculates yield, duration, cost summary"""
|
||||
try:
|
||||
batch = await production_service.complete_production_batch(tenant_id, batch_id, completion_data)
|
||||
|
||||
logger.info("Completed production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot complete batch", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error completing production batch",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to complete production batch")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("batches/stats"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_production_batch_stats(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None, description="Start date for stats"),
|
||||
end_date: Optional[date] = Query(None, description="End date for stats"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Aggregated stats: completed vs failed, avg yield, on-time rate"""
|
||||
try:
|
||||
# Default to last 30 days if no dates provided
|
||||
if not start_date:
|
||||
start_date = (datetime.now() - timedelta(days=30)).date()
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
|
||||
stats = await production_service.get_batch_statistics(tenant_id, start_date, end_date)
|
||||
|
||||
logger.info("Retrieved production batch statistics",
|
||||
tenant_id=str(tenant_id), start_date=start_date.isoformat(), end_date=end_date.isoformat())
|
||||
|
||||
return stats
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting production batch stats",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get production batch stats")
|
||||
|
||||
|
||||
# ===== SCHEDULE OPERATIONS =====
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("schedules", "schedule_id", "finalize"),
|
||||
response_model=ProductionScheduleResponse
|
||||
)
|
||||
async def finalize_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
schedule_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Lock schedule; prevents further changes"""
|
||||
try:
|
||||
schedule = await production_service.finalize_production_schedule(tenant_id, schedule_id)
|
||||
|
||||
logger.info("Finalized production schedule",
|
||||
schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionScheduleResponse.model_validate(schedule)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot finalize schedule", error=str(e), schedule_id=str(schedule_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error finalizing production schedule",
|
||||
error=str(e), schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to finalize production schedule")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("schedules/optimize"),
|
||||
response_model=dict
|
||||
)
|
||||
async def optimize_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
target_date: date = Query(..., description="Date to optimize"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Trigger AI-based rescheduling suggestion based on demand/capacity"""
|
||||
try:
|
||||
optimization_result = await production_service.optimize_schedule(tenant_id, target_date)
|
||||
|
||||
logger.info("Generated schedule optimization suggestions",
|
||||
tenant_id=str(tenant_id), date=target_date.isoformat())
|
||||
|
||||
return optimization_result
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error optimizing production schedule",
|
||||
error=str(e), tenant_id=str(tenant_id), date=target_date.isoformat())
|
||||
raise HTTPException(status_code=500, detail="Failed to optimize production schedule")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("schedules/capacity-usage"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_schedule_capacity_usage(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get capacity usage report for scheduling period"""
|
||||
try:
|
||||
if not start_date:
|
||||
start_date = datetime.now().date()
|
||||
if not end_date:
|
||||
end_date = start_date + timedelta(days=7)
|
||||
|
||||
usage_report = await production_service.get_capacity_usage_report(tenant_id, start_date, end_date)
|
||||
|
||||
logger.info("Retrieved capacity usage report",
|
||||
tenant_id=str(tenant_id),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat())
|
||||
|
||||
return usage_report
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting capacity usage",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get capacity usage")
|
||||
|
||||
|
||||
# ===== CAPACITY MANAGEMENT =====
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("capacity/status"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_capacity_status(
|
||||
tenant_id: UUID = Path(...),
|
||||
target_date: Optional[date] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get real-time capacity status"""
|
||||
try:
|
||||
if not target_date:
|
||||
target_date = datetime.now().date()
|
||||
|
||||
status = await production_service.get_capacity_status(tenant_id, target_date)
|
||||
|
||||
logger.info("Retrieved capacity status",
|
||||
tenant_id=str(tenant_id), date=target_date.isoformat())
|
||||
|
||||
return status
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting capacity status",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get capacity status")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("capacity/availability"),
|
||||
response_model=dict
|
||||
)
|
||||
async def check_resource_availability(
|
||||
tenant_id: UUID = Path(...),
|
||||
target_date: date = Query(...),
|
||||
required_capacity: float = Query(..., gt=0),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Check if capacity is available for scheduling"""
|
||||
try:
|
||||
availability = await production_service.check_resource_availability(
|
||||
tenant_id, target_date, required_capacity
|
||||
)
|
||||
|
||||
logger.info("Checked resource availability",
|
||||
tenant_id=str(tenant_id),
|
||||
date=target_date.isoformat(),
|
||||
required=required_capacity)
|
||||
|
||||
return availability
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking resource availability",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to check resource availability")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("capacity/reserve"),
|
||||
response_model=dict
|
||||
)
|
||||
async def reserve_capacity(
|
||||
tenant_id: UUID = Path(...),
|
||||
target_date: date = Query(...),
|
||||
capacity_amount: float = Query(..., gt=0),
|
||||
batch_id: UUID = Query(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Reserve capacity for a batch"""
|
||||
try:
|
||||
reservation = await production_service.reserve_capacity(
|
||||
tenant_id, target_date, capacity_amount, batch_id
|
||||
)
|
||||
|
||||
logger.info("Reserved production capacity",
|
||||
tenant_id=str(tenant_id),
|
||||
date=target_date.isoformat(),
|
||||
amount=capacity_amount,
|
||||
batch_id=str(batch_id))
|
||||
|
||||
return reservation
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot reserve capacity", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error reserving capacity",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to reserve capacity")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/capacity/date/{date}",
|
||||
response_model=list
|
||||
)
|
||||
async def get_capacity_by_date(
|
||||
tenant_id: UUID = Path(...),
|
||||
date: date = Path(..., description="Date to retrieve capacity for (format: YYYY-MM-DD)"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get capacity by date (using direct route to support date path parameter)"""
|
||||
try:
|
||||
capacity_data = await production_service.get_capacity_by_date(tenant_id, date)
|
||||
|
||||
logger.info("Retrieved capacity by date",
|
||||
tenant_id=str(tenant_id), date=date.isoformat())
|
||||
|
||||
return capacity_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting capacity by date",
|
||||
error=str(e), tenant_id=str(tenant_id), date=date.isoformat())
|
||||
raise HTTPException(status_code=500, detail="Failed to get capacity by date")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("capacity/bottlenecks"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_capacity_bottlenecks(
|
||||
tenant_id: UUID = Path(...),
|
||||
days_ahead: int = Query(7, ge=1, le=30),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Identify capacity bottlenecks in upcoming period"""
|
||||
try:
|
||||
bottlenecks = await production_service.predict_capacity_bottlenecks(tenant_id, days_ahead)
|
||||
|
||||
logger.info("Retrieved capacity bottlenecks prediction",
|
||||
tenant_id=str(tenant_id), days_ahead=days_ahead)
|
||||
|
||||
return bottlenecks
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting capacity bottlenecks",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get capacity bottlenecks")
|
||||
|
||||
|
||||
# ===== TRANSFORMATION OPERATIONS =====
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("batches/complete-with-transformation"),
|
||||
response_model=dict
|
||||
)
|
||||
async def complete_batch_with_transformation(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Query(...),
|
||||
transformation_data: dict = None,
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Complete batch and create product transformation record"""
|
||||
try:
|
||||
result = await production_service.complete_batch_with_transformation(
|
||||
tenant_id, batch_id, transformation_data
|
||||
)
|
||||
|
||||
logger.info("Completed batch with transformation",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=str(batch_id))
|
||||
|
||||
return result
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot complete batch with transformation", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error completing batch with transformation",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to complete batch with transformation")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("transform-par-baked"),
|
||||
response_model=dict
|
||||
)
|
||||
async def transform_par_baked_products(
|
||||
tenant_id: UUID = Path(...),
|
||||
source_batch_id: UUID = Query(...),
|
||||
target_quantity: float = Query(..., gt=0),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Transform par-baked products to fully baked"""
|
||||
try:
|
||||
result = await production_service.transform_par_baked_to_fresh(
|
||||
tenant_id, source_batch_id, target_quantity
|
||||
)
|
||||
|
||||
logger.info("Transformed par-baked products",
|
||||
tenant_id=str(tenant_id),
|
||||
source_batch_id=str(source_batch_id),
|
||||
quantity=target_quantity)
|
||||
|
||||
return result
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot transform products", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error transforming products",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to transform products")
|
||||
|
||||
|
||||
# ===== SCHEDULER OPERATIONS =====
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("scheduler/trigger")
|
||||
)
|
||||
@monitor_performance("trigger_production_scheduler")
|
||||
async def trigger_production_scheduler(
|
||||
tenant_id: UUID = Path(...),
|
||||
request: Request = None
|
||||
):
|
||||
"""
|
||||
Manually trigger the production scheduler for the current tenant
|
||||
|
||||
This endpoint is primarily for testing and development purposes.
|
||||
Triggers the production schedule generation process manually.
|
||||
"""
|
||||
try:
|
||||
# Get the scheduler service from app state
|
||||
if hasattr(request.app.state, 'scheduler_service'):
|
||||
scheduler_service = request.app.state.scheduler_service
|
||||
await scheduler_service.test_production_schedule_generation()
|
||||
|
||||
logger.info("Production scheduler triggered manually",
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Production scheduler executed successfully",
|
||||
"tenant_id": str(tenant_id)
|
||||
}
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
||||
detail="Scheduler service is not available"
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error triggering production scheduler",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Error triggering production scheduler: {str(e)}"
|
||||
)
|
||||
96
services/production/app/api/production_orders_operations.py
Normal file
96
services/production/app/api/production_orders_operations.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# services/production/app/api/production_orders_operations.py
|
||||
"""
|
||||
Tenant Data Deletion Operations (Internal Service Only)
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import service_only_access
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.services.tenant_deletion import TenantDataDeletionResult
|
||||
from app.core.database import get_db
|
||||
from app.services.tenant_deletion_service import ProductionTenantDeletionService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-tenant-deletion"])
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_base_route("tenant/{tenant_id}", include_tenant_prefix=False),
|
||||
response_model=dict
|
||||
)
|
||||
@service_only_access
|
||||
async def delete_tenant_data(
|
||||
tenant_id: str = Path(..., description="Tenant ID to delete data for"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Delete all production data for a tenant (Internal service only)
|
||||
"""
|
||||
try:
|
||||
logger.info("production.tenant_deletion.api_called", tenant_id=tenant_id)
|
||||
|
||||
deletion_service = ProductionTenantDeletionService(db)
|
||||
result = await deletion_service.safe_delete_tenant_data(tenant_id)
|
||||
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Tenant data deletion failed: {', '.join(result.errors)}"
|
||||
)
|
||||
|
||||
return {
|
||||
"message": "Tenant data deletion completed successfully",
|
||||
"summary": result.to_dict()
|
||||
}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("production.tenant_deletion.api_error", tenant_id=tenant_id, error=str(e), exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to delete tenant data: {str(e)}")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("tenant/{tenant_id}/deletion-preview", include_tenant_prefix=False),
|
||||
response_model=dict
|
||||
)
|
||||
@service_only_access
|
||||
async def preview_tenant_data_deletion(
|
||||
tenant_id: str = Path(..., description="Tenant ID to preview deletion for"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Preview what data would be deleted for a tenant (dry-run)
|
||||
"""
|
||||
try:
|
||||
logger.info("production.tenant_deletion.preview_called", tenant_id=tenant_id)
|
||||
|
||||
deletion_service = ProductionTenantDeletionService(db)
|
||||
preview_data = await deletion_service.get_tenant_data_preview(tenant_id)
|
||||
result = TenantDataDeletionResult(tenant_id=tenant_id, service_name=deletion_service.service_name)
|
||||
result.deleted_counts = preview_data
|
||||
result.success = True
|
||||
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Tenant deletion preview failed: {', '.join(result.errors)}"
|
||||
)
|
||||
|
||||
return {
|
||||
"tenant_id": tenant_id,
|
||||
"service": "production-service",
|
||||
"data_counts": result.deleted_counts,
|
||||
"total_items": sum(result.deleted_counts.values())
|
||||
}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("production.tenant_deletion.preview_error", tenant_id=tenant_id, error=str(e), exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to preview tenant data deletion: {str(e)}")
|
||||
223
services/production/app/api/production_schedules.py
Normal file
223
services/production/app/api/production_schedules.py
Normal file
@@ -0,0 +1,223 @@
|
||||
# services/production/app/api/production_schedules.py
|
||||
"""
|
||||
Production Schedules API - ATOMIC CRUD operations on ProductionSchedule model
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from datetime import date, datetime, timedelta
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.security import create_audit_logger, AuditSeverity, AuditAction
|
||||
from app.core.database import get_db
|
||||
from app.services.production_service import ProductionService
|
||||
from app.models import AuditLog
|
||||
from app.schemas.production import (
|
||||
ProductionScheduleCreate,
|
||||
ProductionScheduleUpdate,
|
||||
ProductionScheduleResponse
|
||||
)
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-schedules"])
|
||||
|
||||
# Initialize audit logger with the production service's AuditLog model
|
||||
audit_logger = create_audit_logger("production-service", AuditLog)
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("schedules"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None, description="Start date for schedule"),
|
||||
end_date: Optional[date] = Query(None, description="End date for schedule"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Get production schedule for a date range"""
|
||||
try:
|
||||
# Default to next 7 days if no dates provided
|
||||
if not start_date:
|
||||
start_date = datetime.now().date()
|
||||
if not end_date:
|
||||
end_date = start_date + timedelta(days=7)
|
||||
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
schedule_repo = ProductionScheduleRepository(db)
|
||||
|
||||
schedules = await schedule_repo.get_schedules_by_date_range(
|
||||
str(tenant_id), start_date, end_date
|
||||
)
|
||||
|
||||
schedule_data = {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat(),
|
||||
"schedules": [
|
||||
{
|
||||
"id": str(schedule.id),
|
||||
"date": schedule.schedule_date.isoformat(),
|
||||
"shift_start": schedule.shift_start.isoformat(),
|
||||
"shift_end": schedule.shift_end.isoformat(),
|
||||
"capacity_utilization": schedule.utilization_percentage,
|
||||
"batches_planned": schedule.total_batches_planned,
|
||||
"is_finalized": schedule.is_finalized
|
||||
}
|
||||
for schedule in schedules
|
||||
],
|
||||
"total_schedules": len(schedules)
|
||||
}
|
||||
|
||||
logger.info("Retrieved production schedule",
|
||||
tenant_id=str(tenant_id),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat(),
|
||||
schedules_count=len(schedules))
|
||||
|
||||
return schedule_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting production schedule",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get production schedule")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_detail_route("schedules", "schedule_id"),
|
||||
response_model=ProductionScheduleResponse
|
||||
)
|
||||
async def get_production_schedule_details(
|
||||
tenant_id: UUID = Path(...),
|
||||
schedule_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Retrieve full schedule details including assignments"""
|
||||
try:
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
schedule_repo = ProductionScheduleRepository(db)
|
||||
|
||||
schedule = await schedule_repo.get(schedule_id)
|
||||
if not schedule or str(schedule.tenant_id) != str(tenant_id):
|
||||
raise HTTPException(status_code=404, detail="Production schedule not found")
|
||||
|
||||
logger.info("Retrieved production schedule details",
|
||||
schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionScheduleResponse.model_validate(schedule)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error getting production schedule details",
|
||||
error=str(e), schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get production schedule details")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("schedules"),
|
||||
response_model=ProductionScheduleResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def create_production_schedule(
|
||||
schedule_data: ProductionScheduleCreate,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Generate or manually create a daily/shift schedule (Admin+ only)"""
|
||||
try:
|
||||
schedule = await production_service.create_production_schedule(tenant_id, schedule_data)
|
||||
|
||||
logger.info("Created production schedule",
|
||||
schedule_id=str(schedule.id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionScheduleResponse.model_validate(schedule)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid schedule data", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error creating production schedule",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create production schedule")
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_resource_detail_route("schedules", "schedule_id"),
|
||||
response_model=ProductionScheduleResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def update_production_schedule(
|
||||
schedule_update: ProductionScheduleUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
schedule_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Edit schedule before finalizing (Admin+ only)"""
|
||||
try:
|
||||
schedule = await production_service.update_production_schedule(tenant_id, schedule_id, schedule_update)
|
||||
|
||||
logger.info("Updated production schedule",
|
||||
schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionScheduleResponse.model_validate(schedule)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid schedule update", error=str(e), schedule_id=str(schedule_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error updating production schedule",
|
||||
error=str(e), schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update production schedule")
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_resource_detail_route("schedules", "schedule_id")
|
||||
)
|
||||
async def delete_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
schedule_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Delete a production schedule (if not finalized)"""
|
||||
try:
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
schedule_repo = ProductionScheduleRepository(db)
|
||||
|
||||
schedule = await schedule_repo.get(schedule_id)
|
||||
if not schedule or str(schedule.tenant_id) != str(tenant_id):
|
||||
raise HTTPException(status_code=404, detail="Production schedule not found")
|
||||
|
||||
if schedule.is_finalized:
|
||||
raise HTTPException(status_code=400, detail="Cannot delete finalized schedule")
|
||||
|
||||
await schedule_repo.delete(schedule_id)
|
||||
|
||||
logger.info("Deleted production schedule",
|
||||
schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {"message": "Production schedule deleted successfully"}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error deleting production schedule",
|
||||
error=str(e), schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to delete production schedule")
|
||||
441
services/production/app/api/quality_templates.py
Normal file
441
services/production/app/api/quality_templates.py
Normal file
@@ -0,0 +1,441 @@
|
||||
# services/production/app/api/quality_templates.py
|
||||
"""
|
||||
Quality Check Templates API - CRUD operations on quality check templates
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, status
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder, RouteCategory
|
||||
from app.core.database import get_db
|
||||
from app.services.quality_template_service import QualityTemplateService
|
||||
from app.models.production import ProcessStage, QualityCheckTemplate
|
||||
from app.schemas.quality_templates import (
|
||||
QualityCheckTemplateCreate,
|
||||
QualityCheckTemplateUpdate,
|
||||
QualityCheckTemplateResponse,
|
||||
QualityCheckTemplateList,
|
||||
QualityCheckType
|
||||
)
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["quality-templates"])
|
||||
|
||||
|
||||
# ===== Quality Template CRUD Endpoints =====
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("quality-templates"),
|
||||
response_model=QualityCheckTemplateList
|
||||
)
|
||||
async def list_quality_templates(
|
||||
tenant_id: UUID = Path(...),
|
||||
stage: Optional[ProcessStage] = Query(None, description="Filter by process stage"),
|
||||
check_type: Optional[QualityCheckType] = Query(None, description="Filter by check type"),
|
||||
is_active: Optional[bool] = Query(True, description="Filter by active status"),
|
||||
skip: int = Query(0, ge=0, description="Number of templates to skip"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Number of templates to return"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
List quality check templates with filtering and pagination
|
||||
|
||||
Filters:
|
||||
- stage: Filter by applicable process stage
|
||||
- check_type: Filter by type of quality check
|
||||
- is_active: Filter by active status (default: True)
|
||||
"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
templates, total = await service.get_templates(
|
||||
tenant_id=str(tenant_id),
|
||||
stage=stage,
|
||||
check_type=check_type.value if check_type else None,
|
||||
is_active=is_active,
|
||||
skip=skip,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
logger.info("Retrieved quality templates",
|
||||
tenant_id=str(tenant_id),
|
||||
total=total,
|
||||
filters={"stage": stage, "check_type": check_type, "is_active": is_active})
|
||||
|
||||
return QualityCheckTemplateList(
|
||||
templates=[QualityCheckTemplateResponse.from_orm(t) for t in templates],
|
||||
total=total,
|
||||
skip=skip,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error listing quality templates",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve quality templates"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("quality-templates"),
|
||||
response_model=QualityCheckTemplateResponse,
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def create_quality_template(
|
||||
template_data: QualityCheckTemplateCreate,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Create a new quality check template"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Add created_by from current user
|
||||
template_dict = template_data.dict()
|
||||
template_dict['created_by'] = UUID(current_user["user_id"])
|
||||
template_create = QualityCheckTemplateCreate(**template_dict)
|
||||
|
||||
# Create template via service (handles validation and business rules)
|
||||
template = await service.create_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_data=template_create
|
||||
)
|
||||
|
||||
# Commit the transaction to persist changes
|
||||
await db.commit()
|
||||
|
||||
logger.info("Created quality template",
|
||||
template_id=str(template.id),
|
||||
template_name=template.name,
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(template)
|
||||
|
||||
except ValueError as e:
|
||||
# Business rule validation errors
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Error creating quality template",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to create quality template"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_detail_route("quality-templates", "template_id"),
|
||||
response_model=QualityCheckTemplateResponse
|
||||
)
|
||||
async def get_quality_template(
|
||||
tenant_id: UUID = Path(...),
|
||||
template_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Get a specific quality check template"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
template = await service.get_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
)
|
||||
|
||||
if not template:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(template)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error getting quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve quality template"
|
||||
)
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_resource_detail_route("quality-templates", "template_id"),
|
||||
response_model=QualityCheckTemplateResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def update_quality_template(
|
||||
template_data: QualityCheckTemplateUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
template_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Update a quality check template"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Update template via service (handles validation and business rules)
|
||||
template = await service.update_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id,
|
||||
template_data=template_data
|
||||
)
|
||||
|
||||
if not template:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
# Commit the transaction to persist changes
|
||||
await db.commit()
|
||||
|
||||
logger.info("Updated quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(template)
|
||||
|
||||
except ValueError as e:
|
||||
# Business rule validation errors
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e)
|
||||
)
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error updating quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to update quality template"
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_resource_detail_route("quality-templates", "template_id"),
|
||||
status_code=status.HTTP_204_NO_CONTENT
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def delete_quality_template(
|
||||
tenant_id: UUID = Path(...),
|
||||
template_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Delete a quality check template
|
||||
|
||||
Note: Service layer determines whether to use soft or hard delete
|
||||
based on business rules (checking dependencies, etc.)
|
||||
"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Delete template via service (handles business rules)
|
||||
success = await service.delete_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
# Commit the transaction to persist changes
|
||||
await db.commit()
|
||||
|
||||
logger.info("Deleted quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error("Error deleting quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to delete quality template"
|
||||
)
|
||||
|
||||
|
||||
# ===== Additional Quality Template Operations =====
|
||||
|
||||
@router.get(
|
||||
route_builder.build_custom_route(
|
||||
RouteCategory.BASE,
|
||||
["quality-templates", "stages", "{stage}"]
|
||||
),
|
||||
response_model=QualityCheckTemplateList
|
||||
)
|
||||
async def get_templates_for_stage(
|
||||
tenant_id: UUID = Path(...),
|
||||
stage: ProcessStage = Path(...),
|
||||
is_active: bool = Query(True, description="Filter by active status"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Get all quality templates applicable to a specific process stage"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
templates = await service.get_templates_for_stage(
|
||||
tenant_id=str(tenant_id),
|
||||
stage=stage,
|
||||
is_active=is_active
|
||||
)
|
||||
|
||||
logger.info("Retrieved templates for stage",
|
||||
tenant_id=str(tenant_id),
|
||||
stage=stage,
|
||||
count=len(templates))
|
||||
|
||||
return QualityCheckTemplateList(
|
||||
templates=[QualityCheckTemplateResponse.from_orm(t) for t in templates],
|
||||
total=len(templates),
|
||||
skip=0,
|
||||
limit=len(templates)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting templates for stage",
|
||||
error=str(e),
|
||||
stage=stage,
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve templates for stage"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_resource_action_route("quality-templates", "template_id", "duplicate"),
|
||||
response_model=QualityCheckTemplateResponse,
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def duplicate_quality_template(
|
||||
tenant_id: UUID = Path(...),
|
||||
template_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Duplicate an existing quality check template"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Duplicate template via service (handles business rules)
|
||||
duplicate = await service.duplicate_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
)
|
||||
|
||||
if not duplicate:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
logger.info("Duplicated quality template",
|
||||
original_id=str(template_id),
|
||||
duplicate_id=str(duplicate.id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(duplicate)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error duplicating quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to duplicate quality template"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("quality-templates/validate"),
|
||||
response_model=dict
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def validate_quality_template(
|
||||
template_data: dict,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
):
|
||||
"""
|
||||
Validate quality template configuration without creating it
|
||||
|
||||
Returns validation result with any errors found
|
||||
"""
|
||||
try:
|
||||
errors = []
|
||||
|
||||
# Basic validation
|
||||
if not template_data.get('name'):
|
||||
errors.append("Template name is required")
|
||||
|
||||
if not template_data.get('check_type'):
|
||||
errors.append("Check type is required")
|
||||
|
||||
# Validate measurement fields
|
||||
check_type = template_data.get('check_type')
|
||||
if check_type in ['measurement', 'temperature', 'weight']:
|
||||
if template_data.get('min_value') is not None and template_data.get('max_value') is not None:
|
||||
if template_data['min_value'] >= template_data['max_value']:
|
||||
errors.append("Minimum value must be less than maximum value")
|
||||
|
||||
# Validate weight
|
||||
weight = template_data.get('weight', 1.0)
|
||||
if weight < 0 or weight > 10:
|
||||
errors.append("Weight must be between 0 and 10")
|
||||
|
||||
is_valid = len(errors) == 0
|
||||
|
||||
logger.info("Validated quality template",
|
||||
tenant_id=str(tenant_id),
|
||||
valid=is_valid,
|
||||
error_count=len(errors))
|
||||
|
||||
return {
|
||||
"valid": is_valid,
|
||||
"errors": errors
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error validating quality template",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
return {
|
||||
"valid": False,
|
||||
"errors": [f"Validation error: {str(e)}"]
|
||||
}
|
||||
293
services/production/app/api/sustainability.py
Normal file
293
services/production/app/api/sustainability.py
Normal file
@@ -0,0 +1,293 @@
|
||||
"""
|
||||
Production Service - Sustainability API
|
||||
Exposes production-specific sustainability metrics following microservices principles
|
||||
Each service owns its domain data
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
from fastapi import APIRouter, Depends, Path, Query, Request
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from app.services.production_service import ProductionService
|
||||
from shared.routing import RouteBuilder
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Create route builder for consistent URL structure
|
||||
route_builder = RouteBuilder('production')
|
||||
|
||||
router = APIRouter(tags=["production-sustainability"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
from app.core.config import settings
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/sustainability/waste-metrics",
|
||||
response_model=dict,
|
||||
summary="Get production waste metrics",
|
||||
description="""
|
||||
Returns production-specific waste metrics for sustainability tracking.
|
||||
|
||||
This endpoint is part of the microservices architecture where each service
|
||||
owns its domain data. Frontend aggregates data from multiple services.
|
||||
|
||||
Metrics include:
|
||||
- Total production waste from batches (waste_quantity + defect_quantity)
|
||||
- Production volumes (planned vs actual)
|
||||
- Waste breakdown by defect type
|
||||
- AI-assisted batch tracking
|
||||
"""
|
||||
)
|
||||
async def get_production_waste_metrics(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Start date for metrics (default: 30 days ago)"),
|
||||
end_date: Optional[datetime] = Query(None, description="End date for metrics (default: now)"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get production waste metrics for sustainability dashboard
|
||||
|
||||
Returns production-specific metrics that frontend will aggregate with
|
||||
inventory metrics for complete sustainability picture.
|
||||
"""
|
||||
try:
|
||||
# Set default dates
|
||||
if not end_date:
|
||||
end_date = datetime.now()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Get waste analytics from production service
|
||||
waste_data = await production_service.get_waste_analytics(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
)
|
||||
|
||||
# Enrich with metadata
|
||||
response = {
|
||||
**waste_data,
|
||||
"service": "production",
|
||||
"period": {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat(),
|
||||
"days": (end_date - start_date).days
|
||||
},
|
||||
"metadata": {
|
||||
"data_source": "production_batches",
|
||||
"calculation_method": "SUM(waste_quantity + defect_quantity)",
|
||||
"filters_applied": {
|
||||
"status": ["COMPLETED", "QUALITY_CHECK"],
|
||||
"date_range": f"{start_date.date()} to {end_date.date()}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Production waste metrics retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
total_waste_kg=waste_data.get('total_production_waste', 0),
|
||||
period_days=(end_date - start_date).days,
|
||||
user_id=current_user.get('user_id')
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting production waste metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/sustainability/baseline",
|
||||
response_model=dict,
|
||||
summary="Get production baseline metrics",
|
||||
description="""
|
||||
Returns baseline production metrics from the first 90 days of operation.
|
||||
|
||||
Used by frontend to calculate SDG 12.3 compliance (waste reduction targets).
|
||||
If tenant has less than 90 days of data, returns industry average baseline.
|
||||
"""
|
||||
)
|
||||
async def get_production_baseline(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get baseline production metrics for SDG compliance calculations
|
||||
|
||||
Frontend uses this to calculate:
|
||||
- Waste reduction percentage vs baseline
|
||||
- Progress toward SDG 12.3 targets
|
||||
- Grant eligibility based on improvement
|
||||
"""
|
||||
try:
|
||||
baseline_data = await production_service.get_baseline_metrics(tenant_id)
|
||||
|
||||
# Add metadata
|
||||
response = {
|
||||
**baseline_data,
|
||||
"service": "production",
|
||||
"metadata": {
|
||||
"baseline_period_days": 90,
|
||||
"calculation_method": "First 90 days of production data",
|
||||
"fallback": "Industry average (25%) if insufficient data"
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Production baseline metrics retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
has_baseline=baseline_data.get('has_baseline', False),
|
||||
baseline_waste_pct=baseline_data.get('waste_percentage'),
|
||||
user_id=current_user.get('user_id')
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting production baseline",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/sustainability/ai-impact",
|
||||
response_model=dict,
|
||||
summary="Get AI waste reduction impact",
|
||||
description="""
|
||||
Analyzes the impact of AI-assisted production on waste reduction.
|
||||
|
||||
Compares waste rates between:
|
||||
- AI-assisted batches (with is_ai_assisted=true)
|
||||
- Manual batches (is_ai_assisted=false)
|
||||
|
||||
Shows ROI of AI features for sustainability.
|
||||
"""
|
||||
)
|
||||
async def get_ai_waste_impact(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Start date (default: 30 days ago)"),
|
||||
end_date: Optional[datetime] = Query(None, description="End date (default: now)"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get AI impact on waste reduction
|
||||
|
||||
Frontend uses this to showcase:
|
||||
- Value proposition of AI features
|
||||
- Waste avoided through AI assistance
|
||||
- Financial ROI of AI investment
|
||||
"""
|
||||
try:
|
||||
# Set default dates
|
||||
if not end_date:
|
||||
end_date = datetime.now()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Get AI impact analytics (we'll implement this)
|
||||
ai_impact = await production_service.get_ai_waste_impact(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"AI waste impact retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
ai_waste_reduction_pct=ai_impact.get('waste_reduction_percentage'),
|
||||
user_id=current_user.get('user_id')
|
||||
)
|
||||
|
||||
return ai_impact
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting AI waste impact",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/sustainability/summary",
|
||||
response_model=dict,
|
||||
summary="Get production sustainability summary",
|
||||
description="""
|
||||
Quick summary endpoint combining all production sustainability metrics.
|
||||
|
||||
Useful for dashboard widgets that need overview data without multiple calls.
|
||||
"""
|
||||
)
|
||||
async def get_production_sustainability_summary(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(30, ge=7, le=365, description="Number of days to analyze"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get comprehensive production sustainability summary
|
||||
|
||||
Combines waste metrics, baseline, and AI impact in one response.
|
||||
Optimized for dashboard widgets.
|
||||
"""
|
||||
try:
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
# Get all metrics in parallel (within service)
|
||||
waste_data = await production_service.get_waste_analytics(tenant_id, start_date, end_date)
|
||||
baseline_data = await production_service.get_baseline_metrics(tenant_id)
|
||||
|
||||
# Try to get AI impact (may not be available for all tenants)
|
||||
try:
|
||||
ai_impact = await production_service.get_ai_waste_impact(tenant_id, start_date, end_date)
|
||||
except:
|
||||
ai_impact = {"available": False}
|
||||
|
||||
summary = {
|
||||
"service": "production",
|
||||
"period_days": days,
|
||||
"waste_metrics": waste_data,
|
||||
"baseline": baseline_data,
|
||||
"ai_impact": ai_impact,
|
||||
"last_updated": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Production sustainability summary retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
period_days=days,
|
||||
user_id=current_user.get('user_id')
|
||||
)
|
||||
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting production sustainability summary",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
Reference in New Issue
Block a user