Initial commit - production deployment
This commit is contained in:
6
services/production/app/__init__.py
Normal file
6
services/production/app/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
# ================================================================
|
||||
# services/production/app/__init__.py
|
||||
# ================================================================
|
||||
"""
|
||||
Production service application package
|
||||
"""
|
||||
6
services/production/app/api/__init__.py
Normal file
6
services/production/app/api/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
# ================================================================
|
||||
# services/production/app/api/__init__.py
|
||||
# ================================================================
|
||||
"""
|
||||
API routes and endpoints for production service
|
||||
"""
|
||||
528
services/production/app/api/analytics.py
Normal file
528
services/production/app/api/analytics.py
Normal file
@@ -0,0 +1,528 @@
|
||||
# services/production/app/api/analytics.py
|
||||
"""
|
||||
Analytics API endpoints for Production Service
|
||||
Following standardized URL structure: /api/v1/tenants/{tenant_id}/production/analytics/{operation}
|
||||
Requires: Professional or Enterprise subscription tier
|
||||
"""
|
||||
|
||||
from datetime import date, datetime, timedelta
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import analytics_tier_required
|
||||
from app.services.production_service import ProductionService
|
||||
from app.core.config import settings
|
||||
from shared.routing import RouteBuilder
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Create route builder for consistent URL structure
|
||||
route_builder = RouteBuilder('production')
|
||||
|
||||
router = APIRouter(tags=["production-analytics"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
# ===== ANALYTICS ENDPOINTS (Professional/Enterprise Only) =====
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("equipment-efficiency"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_equipment_efficiency(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None, description="Start date for analysis"),
|
||||
end_date: Optional[date] = Query(None, description="End date for analysis"),
|
||||
equipment_id: Optional[UUID] = Query(None, description="Filter by equipment"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze equipment efficiency (Professional/Enterprise only)
|
||||
|
||||
Metrics:
|
||||
- Overall Equipment Effectiveness (OEE)
|
||||
- Availability rate
|
||||
- Performance rate
|
||||
- Quality rate
|
||||
- Downtime analysis
|
||||
"""
|
||||
try:
|
||||
# Set default dates
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing method: get_equipment_efficiency_analytics
|
||||
efficiency_data = await production_service.get_equipment_efficiency_analytics(tenant_id)
|
||||
|
||||
logger.info("Equipment efficiency analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_id=str(equipment_id) if equipment_id else "all",
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return efficiency_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing equipment efficiency",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze equipment efficiency"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("production-trends"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_production_trends(
|
||||
tenant_id: UUID = Path(...),
|
||||
days_back: int = Query(90, ge=7, le=365, description="Days to analyze"),
|
||||
product_id: Optional[UUID] = Query(None, description="Filter by product"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze production trends (Professional/Enterprise only)
|
||||
|
||||
Provides:
|
||||
- Production volume trends
|
||||
- Batch completion rates
|
||||
- Cycle time analysis
|
||||
- Quality trends
|
||||
- Seasonal patterns
|
||||
"""
|
||||
try:
|
||||
# Use existing methods: get_performance_analytics + get_yield_trends_analytics
|
||||
end_date_calc = datetime.now().date()
|
||||
start_date_calc = end_date_calc - timedelta(days=days_back)
|
||||
|
||||
performance = await production_service.get_performance_analytics(
|
||||
tenant_id, start_date_calc, end_date_calc
|
||||
)
|
||||
|
||||
# Map days_back to period string for yield trends
|
||||
period = "weekly" if days_back <= 30 else "monthly"
|
||||
yield_trends = await production_service.get_yield_trends_analytics(tenant_id, period)
|
||||
|
||||
trends = {
|
||||
"performance_metrics": performance,
|
||||
"yield_trends": yield_trends,
|
||||
"days_analyzed": days_back,
|
||||
"product_filter": str(product_id) if product_id else None
|
||||
}
|
||||
|
||||
logger.info("Production trends analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
days_analyzed=days_back,
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return trends
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing production trends",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze production trends"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("capacity-utilization"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_capacity_utilization(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze production capacity utilization (Professional/Enterprise only)
|
||||
|
||||
Metrics:
|
||||
- Capacity utilization percentage
|
||||
- Bottleneck identification
|
||||
- Resource allocation efficiency
|
||||
- Optimization recommendations
|
||||
"""
|
||||
try:
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing method: get_capacity_usage_report
|
||||
utilization = await production_service.get_capacity_usage_report(
|
||||
tenant_id, start_date, end_date
|
||||
)
|
||||
|
||||
logger.info("Capacity utilization analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return utilization
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing capacity utilization",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze capacity utilization"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("quality-metrics"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_quality_metrics(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
product_id: Optional[UUID] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze quality control metrics (Professional/Enterprise only)
|
||||
|
||||
Metrics:
|
||||
- First pass yield
|
||||
- Defect rates by type
|
||||
- Quality trends over time
|
||||
- Root cause analysis
|
||||
"""
|
||||
try:
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing methods: get_quality_trends + get_top_defects_analytics
|
||||
quality_trends = await production_service.get_quality_trends(
|
||||
tenant_id, start_date, end_date
|
||||
)
|
||||
top_defects = await production_service.get_top_defects_analytics(tenant_id)
|
||||
|
||||
quality_data = {
|
||||
"quality_trends": quality_trends,
|
||||
"top_defects": top_defects,
|
||||
"period": {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat()
|
||||
},
|
||||
"product_filter": str(product_id) if product_id else None
|
||||
}
|
||||
|
||||
logger.info("Quality metrics analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return quality_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing quality metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze quality metrics"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("waste-analysis"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_production_waste_analysis(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze production waste (Professional/Enterprise only)
|
||||
|
||||
Provides:
|
||||
- Material waste percentages
|
||||
- Waste by category/product
|
||||
- Cost impact analysis
|
||||
- Reduction recommendations
|
||||
"""
|
||||
try:
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing method: get_batch_statistics to calculate waste from yield data
|
||||
batch_stats = await production_service.get_batch_statistics(
|
||||
tenant_id, start_date, end_date
|
||||
)
|
||||
|
||||
# Calculate waste metrics from batch statistics
|
||||
waste_analysis = {
|
||||
"batch_statistics": batch_stats,
|
||||
"waste_metrics": {
|
||||
"calculated_from": "yield_variance",
|
||||
"note": "Waste derived from planned vs actual quantity differences"
|
||||
},
|
||||
"period": {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat()
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("Production waste analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return waste_analysis
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing production waste",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze production waste"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("cost-analysis"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_production_cost_analysis(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
product_id: Optional[UUID] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Analyze production costs (Professional/Enterprise only)
|
||||
|
||||
Metrics:
|
||||
- Cost per unit
|
||||
- Direct vs indirect costs
|
||||
- Cost trends over time
|
||||
- Cost variance analysis
|
||||
- Profitability insights
|
||||
"""
|
||||
try:
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Use existing method: get_batch_statistics for cost-related data
|
||||
batch_stats = await production_service.get_batch_statistics(
|
||||
tenant_id, start_date, end_date
|
||||
)
|
||||
|
||||
cost_analysis = {
|
||||
"batch_statistics": batch_stats,
|
||||
"cost_metrics": {
|
||||
"note": "Cost analysis requires additional cost tracking data",
|
||||
"available_metrics": ["batch_count", "production_volume", "efficiency"]
|
||||
},
|
||||
"period": {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat()
|
||||
},
|
||||
"product_filter": str(product_id) if product_id else None
|
||||
}
|
||||
|
||||
logger.info("Production cost analyzed",
|
||||
tenant_id=str(tenant_id),
|
||||
product_id=str(product_id) if product_id else "all",
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return cost_analysis
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error analyzing production costs",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to analyze production costs"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_analytics_route("predictive-maintenance"),
|
||||
response_model=dict
|
||||
)
|
||||
@analytics_tier_required
|
||||
async def get_predictive_maintenance_insights(
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: Optional[UUID] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get predictive maintenance insights (Professional/Enterprise only)
|
||||
|
||||
Provides:
|
||||
- Equipment failure predictions
|
||||
- Maintenance schedule recommendations
|
||||
- Parts replacement forecasts
|
||||
- Downtime risk assessment
|
||||
"""
|
||||
try:
|
||||
# Use existing method: predict_capacity_bottlenecks as proxy for maintenance insights
|
||||
days_ahead = 7 # Predict one week ahead
|
||||
bottlenecks = await production_service.predict_capacity_bottlenecks(
|
||||
tenant_id, days_ahead
|
||||
)
|
||||
|
||||
maintenance_insights = {
|
||||
"capacity_bottlenecks": bottlenecks,
|
||||
"maintenance_recommendations": {
|
||||
"note": "Derived from capacity predictions and bottleneck analysis",
|
||||
"days_predicted": days_ahead
|
||||
},
|
||||
"equipment_filter": str(equipment_id) if equipment_id else None
|
||||
}
|
||||
|
||||
logger.info("Predictive maintenance insights generated",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_id=str(equipment_id) if equipment_id else "all",
|
||||
user_id=current_user.get('user_id'))
|
||||
|
||||
return maintenance_insights
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating predictive maintenance insights",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to generate predictive maintenance insights"
|
||||
)
|
||||
|
||||
|
||||
# ===== SUSTAINABILITY / WASTE ANALYTICS ENDPOINT =====
|
||||
# Called by Inventory Service for sustainability metrics
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/waste-analytics",
|
||||
response_model=dict
|
||||
)
|
||||
async def get_waste_analytics_for_sustainability(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: datetime = Query(..., description="Start date for waste analysis"),
|
||||
end_date: datetime = Query(..., description="End date for waste analysis"),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get production waste analytics for sustainability tracking
|
||||
|
||||
This endpoint is called by the Inventory Service's sustainability module
|
||||
to calculate environmental impact and SDG 12.3 compliance.
|
||||
|
||||
Does NOT require analytics tier - this is core sustainability data.
|
||||
|
||||
Returns:
|
||||
- total_production_waste: Sum of waste_quantity from all batches
|
||||
- total_defects: Sum of defect_quantity from all batches
|
||||
- total_planned: Sum of planned_quantity
|
||||
- total_actual: Sum of actual_quantity
|
||||
"""
|
||||
try:
|
||||
waste_data = await production_service.get_waste_analytics(
|
||||
tenant_id,
|
||||
start_date,
|
||||
end_date
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Production waste analytics retrieved for sustainability",
|
||||
tenant_id=str(tenant_id),
|
||||
total_waste=waste_data.get('total_production_waste', 0),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat()
|
||||
)
|
||||
|
||||
return waste_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting waste analytics for sustainability",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve waste analytics: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/baseline",
|
||||
response_model=dict
|
||||
)
|
||||
async def get_baseline_metrics(
|
||||
tenant_id: UUID = Path(...),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get baseline production metrics from first 90 days
|
||||
|
||||
Used by sustainability service to establish waste baseline
|
||||
for SDG 12.3 compliance tracking.
|
||||
|
||||
Returns:
|
||||
- waste_percentage: Baseline waste percentage from first 90 days
|
||||
- total_production_kg: Total production in first 90 days
|
||||
- total_waste_kg: Total waste in first 90 days
|
||||
- period: Date range of baseline period
|
||||
"""
|
||||
try:
|
||||
baseline_data = await production_service.get_baseline_metrics(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Baseline metrics retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
baseline_percentage=baseline_data.get('waste_percentage', 0)
|
||||
)
|
||||
|
||||
return baseline_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting baseline metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve baseline metrics: {str(e)}"
|
||||
)
|
||||
237
services/production/app/api/audit.py
Normal file
237
services/production/app/api/audit.py
Normal file
@@ -0,0 +1,237 @@
|
||||
# services/production/app/api/audit.py
|
||||
"""
|
||||
Audit Logs API - Retrieve audit trail for production service
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Path, status
|
||||
from typing import Optional, Dict, Any
|
||||
from uuid import UUID
|
||||
from datetime import datetime
|
||||
import structlog
|
||||
from sqlalchemy import select, func, and_
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.models import AuditLog
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.models.audit_log_schemas import (
|
||||
AuditLogResponse,
|
||||
AuditLogListResponse,
|
||||
AuditLogStatsResponse
|
||||
)
|
||||
from app.core.database import database_manager
|
||||
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["audit-logs"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
async def get_db():
|
||||
"""Database session dependency"""
|
||||
async with database_manager.get_session() as session:
|
||||
yield session
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("audit-logs"),
|
||||
response_model=AuditLogListResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def get_audit_logs(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Filter logs from this date"),
|
||||
end_date: Optional[datetime] = Query(None, description="Filter logs until this date"),
|
||||
user_id: Optional[UUID] = Query(None, description="Filter by user ID"),
|
||||
action: Optional[str] = Query(None, description="Filter by action type"),
|
||||
resource_type: Optional[str] = Query(None, description="Filter by resource type"),
|
||||
severity: Optional[str] = Query(None, description="Filter by severity level"),
|
||||
search: Optional[str] = Query(None, description="Search in description field"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Number of records to return"),
|
||||
offset: int = Query(0, ge=0, description="Number of records to skip"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get audit logs for production service.
|
||||
Requires admin or owner role.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Retrieving audit logs",
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.get("user_id"),
|
||||
filters={
|
||||
"start_date": start_date,
|
||||
"end_date": end_date,
|
||||
"action": action,
|
||||
"resource_type": resource_type,
|
||||
"severity": severity
|
||||
}
|
||||
)
|
||||
|
||||
# Build query filters
|
||||
filters = [AuditLog.tenant_id == tenant_id]
|
||||
|
||||
if start_date:
|
||||
filters.append(AuditLog.created_at >= start_date)
|
||||
if end_date:
|
||||
filters.append(AuditLog.created_at <= end_date)
|
||||
if user_id:
|
||||
filters.append(AuditLog.user_id == user_id)
|
||||
if action:
|
||||
filters.append(AuditLog.action == action)
|
||||
if resource_type:
|
||||
filters.append(AuditLog.resource_type == resource_type)
|
||||
if severity:
|
||||
filters.append(AuditLog.severity == severity)
|
||||
if search:
|
||||
filters.append(AuditLog.description.ilike(f"%{search}%"))
|
||||
|
||||
# Count total matching records
|
||||
count_query = select(func.count()).select_from(AuditLog).where(and_(*filters))
|
||||
total_result = await db.execute(count_query)
|
||||
total = total_result.scalar() or 0
|
||||
|
||||
# Fetch paginated results
|
||||
query = (
|
||||
select(AuditLog)
|
||||
.where(and_(*filters))
|
||||
.order_by(AuditLog.created_at.desc())
|
||||
.limit(limit)
|
||||
.offset(offset)
|
||||
)
|
||||
|
||||
result = await db.execute(query)
|
||||
audit_logs = result.scalars().all()
|
||||
|
||||
# Convert to response models
|
||||
items = [AuditLogResponse.from_orm(log) for log in audit_logs]
|
||||
|
||||
logger.info(
|
||||
"Successfully retrieved audit logs",
|
||||
tenant_id=tenant_id,
|
||||
total=total,
|
||||
returned=len(items)
|
||||
)
|
||||
|
||||
return AuditLogListResponse(
|
||||
items=items,
|
||||
total=total,
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
has_more=(offset + len(items)) < total
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to retrieve audit logs",
|
||||
error=str(e),
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve audit logs: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("audit-logs/stats"),
|
||||
response_model=AuditLogStatsResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def get_audit_log_stats(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Filter logs from this date"),
|
||||
end_date: Optional[datetime] = Query(None, description="Filter logs until this date"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get audit log statistics for production service.
|
||||
Requires admin or owner role.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Retrieving audit log statistics",
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.get("user_id")
|
||||
)
|
||||
|
||||
# Build base filters
|
||||
filters = [AuditLog.tenant_id == tenant_id]
|
||||
if start_date:
|
||||
filters.append(AuditLog.created_at >= start_date)
|
||||
if end_date:
|
||||
filters.append(AuditLog.created_at <= end_date)
|
||||
|
||||
# Total events
|
||||
count_query = select(func.count()).select_from(AuditLog).where(and_(*filters))
|
||||
total_result = await db.execute(count_query)
|
||||
total_events = total_result.scalar() or 0
|
||||
|
||||
# Events by action
|
||||
action_query = (
|
||||
select(AuditLog.action, func.count().label('count'))
|
||||
.where(and_(*filters))
|
||||
.group_by(AuditLog.action)
|
||||
)
|
||||
action_result = await db.execute(action_query)
|
||||
events_by_action = {row.action: row.count for row in action_result}
|
||||
|
||||
# Events by severity
|
||||
severity_query = (
|
||||
select(AuditLog.severity, func.count().label('count'))
|
||||
.where(and_(*filters))
|
||||
.group_by(AuditLog.severity)
|
||||
)
|
||||
severity_result = await db.execute(severity_query)
|
||||
events_by_severity = {row.severity: row.count for row in severity_result}
|
||||
|
||||
# Events by resource type
|
||||
resource_query = (
|
||||
select(AuditLog.resource_type, func.count().label('count'))
|
||||
.where(and_(*filters))
|
||||
.group_by(AuditLog.resource_type)
|
||||
)
|
||||
resource_result = await db.execute(resource_query)
|
||||
events_by_resource_type = {row.resource_type: row.count for row in resource_result}
|
||||
|
||||
# Date range
|
||||
date_range_query = (
|
||||
select(
|
||||
func.min(AuditLog.created_at).label('min_date'),
|
||||
func.max(AuditLog.created_at).label('max_date')
|
||||
)
|
||||
.where(and_(*filters))
|
||||
)
|
||||
date_result = await db.execute(date_range_query)
|
||||
date_row = date_result.one()
|
||||
|
||||
logger.info(
|
||||
"Successfully retrieved audit log statistics",
|
||||
tenant_id=tenant_id,
|
||||
total_events=total_events
|
||||
)
|
||||
|
||||
return AuditLogStatsResponse(
|
||||
total_events=total_events,
|
||||
events_by_action=events_by_action,
|
||||
events_by_severity=events_by_severity,
|
||||
events_by_resource_type=events_by_resource_type,
|
||||
date_range={
|
||||
"min": date_row.min_date,
|
||||
"max": date_row.max_date
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to retrieve audit log statistics",
|
||||
error=str(e),
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve audit log statistics: {str(e)}"
|
||||
)
|
||||
167
services/production/app/api/batch.py
Normal file
167
services/production/app/api/batch.py
Normal file
@@ -0,0 +1,167 @@
|
||||
# services/production/app/api/batch.py
|
||||
"""
|
||||
Production Batch API - Batch operations for enterprise dashboards
|
||||
|
||||
Phase 2 optimization: Eliminate N+1 query patterns by fetching production data
|
||||
for multiple tenants in a single request.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Body
|
||||
from typing import List, Dict, Any
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
import asyncio
|
||||
|
||||
from fastapi import Request
|
||||
from app.services.production_service import ProductionService
|
||||
from app.core.config import settings
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
|
||||
router = APIRouter(tags=["production-batch"])
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
class ProductionSummaryBatchRequest(BaseModel):
|
||||
"""Request model for batch production summary"""
|
||||
tenant_ids: List[str] = Field(..., description="List of tenant IDs", max_length=100)
|
||||
|
||||
|
||||
class ProductionSummary(BaseModel):
|
||||
"""Production summary for a single tenant"""
|
||||
tenant_id: str
|
||||
total_batches: int
|
||||
pending_batches: int
|
||||
in_progress_batches: int
|
||||
completed_batches: int
|
||||
on_hold_batches: int
|
||||
cancelled_batches: int
|
||||
total_planned_quantity: float
|
||||
total_actual_quantity: float
|
||||
efficiency_rate: float
|
||||
|
||||
|
||||
@router.post("/batch/production-summary", response_model=Dict[str, ProductionSummary])
|
||||
async def get_production_summary_batch(
|
||||
request: ProductionSummaryBatchRequest = Body(...),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get production summary for multiple tenants in a single request.
|
||||
|
||||
Optimized for enterprise dashboards to eliminate N+1 query patterns.
|
||||
Fetches production data for all tenants in parallel.
|
||||
|
||||
Args:
|
||||
request: Batch request with tenant IDs
|
||||
|
||||
Returns:
|
||||
Dictionary mapping tenant_id -> production summary
|
||||
|
||||
Example:
|
||||
POST /api/v1/production/batch/production-summary
|
||||
{
|
||||
"tenant_ids": ["tenant-1", "tenant-2", "tenant-3"]
|
||||
}
|
||||
|
||||
Response:
|
||||
{
|
||||
"tenant-1": {"tenant_id": "tenant-1", "total_batches": 25, ...},
|
||||
"tenant-2": {"tenant_id": "tenant-2", "total_batches": 18, ...},
|
||||
"tenant-3": {"tenant_id": "tenant-3", "total_batches": 32, ...}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if len(request.tenant_ids) > 100:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Maximum 100 tenant IDs allowed per batch request"
|
||||
)
|
||||
|
||||
if not request.tenant_ids:
|
||||
return {}
|
||||
|
||||
logger.info(
|
||||
"Batch fetching production summaries",
|
||||
tenant_count=len(request.tenant_ids)
|
||||
)
|
||||
|
||||
async def fetch_tenant_production(tenant_id: str) -> tuple[str, ProductionSummary]:
|
||||
"""Fetch production summary for a single tenant"""
|
||||
try:
|
||||
tenant_uuid = UUID(tenant_id)
|
||||
summary = await production_service.get_dashboard_summary(tenant_uuid)
|
||||
|
||||
# Calculate efficiency rate
|
||||
efficiency_rate = 0.0
|
||||
if summary.total_planned_quantity > 0 and summary.total_actual_quantity is not None:
|
||||
efficiency_rate = (summary.total_actual_quantity / summary.total_planned_quantity) * 100
|
||||
|
||||
return tenant_id, ProductionSummary(
|
||||
tenant_id=tenant_id,
|
||||
total_batches=int(summary.total_batches or 0),
|
||||
pending_batches=int(summary.pending_batches or 0),
|
||||
in_progress_batches=int(summary.in_progress_batches or 0),
|
||||
completed_batches=int(summary.completed_batches or 0),
|
||||
on_hold_batches=int(summary.on_hold_batches or 0),
|
||||
cancelled_batches=int(summary.cancelled_batches or 0),
|
||||
total_planned_quantity=float(summary.total_planned_quantity or 0),
|
||||
total_actual_quantity=float(summary.total_actual_quantity or 0),
|
||||
efficiency_rate=efficiency_rate
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to fetch production for tenant in batch",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e)
|
||||
)
|
||||
return tenant_id, ProductionSummary(
|
||||
tenant_id=tenant_id,
|
||||
total_batches=0,
|
||||
pending_batches=0,
|
||||
in_progress_batches=0,
|
||||
completed_batches=0,
|
||||
on_hold_batches=0,
|
||||
cancelled_batches=0,
|
||||
total_planned_quantity=0.0,
|
||||
total_actual_quantity=0.0,
|
||||
efficiency_rate=0.0
|
||||
)
|
||||
|
||||
# Fetch all tenant production data in parallel
|
||||
tasks = [fetch_tenant_production(tid) for tid in request.tenant_ids]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# Build result dictionary
|
||||
result_dict = {}
|
||||
for result in results:
|
||||
if isinstance(result, Exception):
|
||||
logger.error("Exception in batch production fetch", error=str(result))
|
||||
continue
|
||||
tenant_id, summary = result
|
||||
result_dict[tenant_id] = summary
|
||||
|
||||
logger.info(
|
||||
"Batch production summaries retrieved",
|
||||
requested_count=len(request.tenant_ids),
|
||||
successful_count=len(result_dict)
|
||||
)
|
||||
|
||||
return result_dict
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error in batch production summary", error=str(e), exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to fetch batch production summaries: {str(e)}"
|
||||
)
|
||||
580
services/production/app/api/equipment.py
Normal file
580
services/production/app/api/equipment.py
Normal file
@@ -0,0 +1,580 @@
|
||||
# services/production/app/api/equipment.py
|
||||
"""
|
||||
Equipment API - CRUD operations on Equipment model
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timezone
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.security import create_audit_logger, AuditSeverity, AuditAction
|
||||
from app.core.database import get_db
|
||||
from app.services.production_service import ProductionService
|
||||
from app.models import AuditLog
|
||||
from app.schemas.equipment import (
|
||||
EquipmentCreate,
|
||||
EquipmentUpdate,
|
||||
EquipmentResponse,
|
||||
EquipmentListResponse,
|
||||
EquipmentDeletionSummary
|
||||
)
|
||||
from app.models.production import EquipmentStatus, EquipmentType
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-equipment"])
|
||||
|
||||
# Initialize audit logger with the production service's AuditLog model
|
||||
audit_logger = create_audit_logger("production-service", AuditLog)
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("equipment"),
|
||||
response_model=EquipmentListResponse
|
||||
)
|
||||
async def list_equipment(
|
||||
tenant_id: UUID = Path(...),
|
||||
status: Optional[EquipmentStatus] = Query(None, description="Filter by status"),
|
||||
type: Optional[EquipmentType] = Query(None, description="Filter by equipment type"),
|
||||
is_active: Optional[bool] = Query(None, description="Filter by active status"),
|
||||
page: int = Query(1, ge=1, description="Page number"),
|
||||
page_size: int = Query(50, ge=1, le=100, description="Page size"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""List equipment with filters: status, type, active status"""
|
||||
try:
|
||||
filters = {
|
||||
"status": status,
|
||||
"type": type,
|
||||
"is_active": is_active
|
||||
}
|
||||
|
||||
equipment_list = await production_service.get_equipment_list(tenant_id, filters, page, page_size)
|
||||
|
||||
logger.info("Retrieved equipment list",
|
||||
tenant_id=str(tenant_id), filters=filters)
|
||||
|
||||
return equipment_list
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error listing equipment",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to list equipment")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("equipment"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def create_equipment(
|
||||
equipment_data: EquipmentCreate,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Create a new equipment item"""
|
||||
try:
|
||||
equipment = await production_service.create_equipment(tenant_id, equipment_data)
|
||||
|
||||
logger.info("Created equipment",
|
||||
equipment_id=str(equipment.id), tenant_id=str(tenant_id))
|
||||
|
||||
# Audit log the equipment creation
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.CREATE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment.id),
|
||||
severity=AuditSeverity.INFO.value,
|
||||
audit_metadata={"equipment_name": equipment.name, "equipment_type": equipment.type.value}
|
||||
)
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Validation error creating equipment",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error creating equipment",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create equipment")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("equipment/{equipment_id}"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def get_equipment(
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get a specific equipment item"""
|
||||
try:
|
||||
equipment = await production_service.get_equipment(tenant_id, equipment_id)
|
||||
|
||||
if not equipment:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info("Retrieved equipment",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error retrieving equipment",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to retrieve equipment")
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_base_route("equipment/{equipment_id}"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def update_equipment(
|
||||
equipment_data: EquipmentUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Update an equipment item"""
|
||||
try:
|
||||
equipment = await production_service.update_equipment(tenant_id, equipment_id, equipment_data)
|
||||
|
||||
if not equipment:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info("Updated equipment",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
# Audit log the equipment update
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.UPDATE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment_id),
|
||||
severity=AuditSeverity.INFO.value,
|
||||
audit_metadata={"updates": equipment_data.model_dump(exclude_unset=True)}
|
||||
)
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.warning("Validation error updating equipment",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error updating equipment",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update equipment")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("equipment/{equipment_id}/deletion-summary"),
|
||||
response_model=EquipmentDeletionSummary
|
||||
)
|
||||
async def get_equipment_deletion_summary(
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get deletion summary for equipment (dependency check)"""
|
||||
try:
|
||||
summary = await production_service.get_equipment_deletion_summary(tenant_id, equipment_id)
|
||||
|
||||
logger.info("Retrieved equipment deletion summary",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
return EquipmentDeletionSummary(**summary)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting equipment deletion summary",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get deletion summary")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("equipment/{equipment_id}/report-failure"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def report_equipment_failure(
|
||||
failure_data: dict,
|
||||
request: Request,
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Report equipment failure and trigger maintenance workflow"""
|
||||
try:
|
||||
# Update equipment status and add failure record
|
||||
equipment = await production_service.report_equipment_failure(
|
||||
tenant_id,
|
||||
equipment_id,
|
||||
failure_data
|
||||
)
|
||||
|
||||
if not equipment:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info("Reported equipment failure",
|
||||
equipment_id=str(equipment_id),
|
||||
tenant_id=str(tenant_id),
|
||||
failure_type=failure_data.get('failureType'))
|
||||
|
||||
# Audit log the failure report
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.UPDATE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment_id),
|
||||
severity=AuditSeverity.WARNING.value,
|
||||
audit_metadata={
|
||||
"action": "report_failure",
|
||||
"failure_type": failure_data.get('failureType'),
|
||||
"severity": failure_data.get('severity')
|
||||
}
|
||||
)
|
||||
|
||||
# Get notification service from app state
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
|
||||
# Trigger notifications if notification service is available
|
||||
if notification_service:
|
||||
try:
|
||||
await trigger_failure_notifications(
|
||||
notification_service,
|
||||
tenant_id,
|
||||
equipment,
|
||||
failure_data
|
||||
)
|
||||
|
||||
# Send primary notification to equipment support contact if available
|
||||
if equipment.support_contact and equipment.support_contact.get('email'):
|
||||
await send_support_contact_notification(
|
||||
notification_service,
|
||||
tenant_id,
|
||||
equipment,
|
||||
failure_data,
|
||||
equipment.support_contact['email']
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to send notifications", error=str(e), equipment_id=str(equipment_id))
|
||||
# Continue even if notifications fail
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error reporting equipment failure",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to report equipment failure")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("equipment/{equipment_id}/mark-repaired"),
|
||||
response_model=EquipmentResponse
|
||||
)
|
||||
async def mark_equipment_repaired(
|
||||
repair_data: dict,
|
||||
request: Request,
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Mark equipment as repaired and update maintenance records"""
|
||||
try:
|
||||
# Update equipment status and add repair record
|
||||
equipment = await production_service.mark_equipment_repaired(
|
||||
tenant_id,
|
||||
equipment_id,
|
||||
repair_data
|
||||
)
|
||||
|
||||
if not equipment:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info("Marked equipment as repaired",
|
||||
equipment_id=str(equipment_id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
# Audit log the repair completion
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.UPDATE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment_id),
|
||||
severity=AuditSeverity.INFO.value,
|
||||
audit_metadata={
|
||||
"action": "mark_repaired",
|
||||
"technician": repair_data.get('technicianName'),
|
||||
"cost": repair_data.get('cost')
|
||||
}
|
||||
)
|
||||
|
||||
# Get notification service from app state
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
|
||||
# Trigger notifications if notification service is available
|
||||
if notification_service:
|
||||
try:
|
||||
# Calculate downtime for notifications
|
||||
last_maintenance_date = equipment.last_maintenance_date or datetime.now(timezone.utc)
|
||||
repair_date_str = repair_data.get('repairDate')
|
||||
if repair_date_str:
|
||||
if 'T' in repair_date_str:
|
||||
repair_date = datetime.fromisoformat(repair_date_str.replace('Z', '+00:00'))
|
||||
else:
|
||||
repair_date = datetime.fromisoformat(f"{repair_date_str}T00:00:00+00:00")
|
||||
else:
|
||||
repair_date = datetime.now(timezone.utc)
|
||||
|
||||
downtime_hours = int((repair_date - last_maintenance_date).total_seconds() / 3600)
|
||||
|
||||
# Add downtime to repair_data for notification
|
||||
repair_data_with_downtime = {**repair_data, 'downtime': downtime_hours}
|
||||
|
||||
await trigger_repair_notifications(
|
||||
notification_service,
|
||||
tenant_id,
|
||||
equipment,
|
||||
repair_data_with_downtime
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to send notifications", error=str(e), equipment_id=str(equipment_id))
|
||||
# Continue even if notifications fail
|
||||
|
||||
return EquipmentResponse.model_validate(equipment)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error marking equipment as repaired",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to mark equipment as repaired")
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_base_route("equipment/{equipment_id}")
|
||||
)
|
||||
async def delete_equipment(
|
||||
tenant_id: UUID = Path(...),
|
||||
equipment_id: UUID = Path(...),
|
||||
permanent: bool = Query(False, description="Permanent delete (hard delete) if true"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Delete an equipment item. Use permanent=true for hard delete (requires admin role)"""
|
||||
try:
|
||||
# Hard delete requires admin role
|
||||
if permanent:
|
||||
user_role = current_user.get('role', '').lower()
|
||||
if user_role not in ['admin', 'owner']:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Hard delete requires admin or owner role"
|
||||
)
|
||||
|
||||
success = await production_service.hard_delete_equipment(tenant_id, equipment_id)
|
||||
delete_type = "hard_delete"
|
||||
severity = AuditSeverity.CRITICAL.value
|
||||
else:
|
||||
success = await production_service.delete_equipment(tenant_id, equipment_id)
|
||||
delete_type = "soft_delete"
|
||||
severity = AuditSeverity.WARNING.value
|
||||
|
||||
if not success:
|
||||
raise HTTPException(status_code=404, detail="Equipment not found")
|
||||
|
||||
logger.info(f"{'Hard' if permanent else 'Soft'} deleted equipment",
|
||||
equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
|
||||
# Audit log the equipment deletion
|
||||
await audit_logger.log_event(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user.get('user_id'),
|
||||
action=AuditAction.DELETE.value,
|
||||
resource_type="equipment",
|
||||
resource_id=str(equipment_id),
|
||||
severity=severity,
|
||||
audit_metadata={"action": delete_type, "permanent": permanent}
|
||||
)
|
||||
|
||||
return {"message": f"Equipment {'permanently deleted' if permanent else 'deleted'} successfully"}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error deleting equipment",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to delete equipment")
|
||||
|
||||
|
||||
# Helper functions for notifications
|
||||
async def trigger_failure_notifications(notification_service: any, tenant_id: UUID, equipment: any, failure_data: dict):
|
||||
"""Trigger failure notifications via email - sends to bakery managers"""
|
||||
try:
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from pathlib import Path
|
||||
|
||||
# Load template from file
|
||||
template_dir = Path(__file__).parent.parent.parent / "notification" / "app" / "templates"
|
||||
env = Environment(loader=FileSystemLoader(str(template_dir)))
|
||||
template = env.get_template('equipment_failure_email.html')
|
||||
|
||||
# Prepare template variables
|
||||
template_vars = {
|
||||
"equipment_name": equipment.name,
|
||||
"equipment_type": equipment.type.value if hasattr(equipment.type, 'value') else equipment.type,
|
||||
"equipment_model": equipment.model or "N/A",
|
||||
"equipment_serial_number": equipment.serial_number or "N/A",
|
||||
"equipment_location": equipment.location or "N/A",
|
||||
"failure_type": failure_data.get('failureType', 'Unknown'),
|
||||
"severity": failure_data.get('severity', 'high'),
|
||||
"description": failure_data.get('description', ''),
|
||||
"reported_time": datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC'),
|
||||
"estimated_impact": "SÍ - Afecta producción" if failure_data.get('estimatedImpact') else "NO - Sin impacto en producción",
|
||||
"support_contact": equipment.support_contact or {},
|
||||
"equipment_link": f"https://app.bakeryia.com/equipment/{equipment.id}",
|
||||
"bakery_name": "BakeryIA",
|
||||
"current_year": datetime.now().year
|
||||
}
|
||||
|
||||
html_content = template.render(**template_vars)
|
||||
|
||||
# Send via notification service (which will handle the actual email sending)
|
||||
# This is a simplified approach - in production you'd want to get manager emails from DB
|
||||
logger.info("Failure notifications triggered (template rendered)",
|
||||
equipment_id=str(equipment.id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error triggering failure notifications",
|
||||
error=str(e), equipment_id=str(equipment.id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
|
||||
async def trigger_repair_notifications(notification_service: any, tenant_id: UUID, equipment: any, repair_data: dict):
|
||||
"""Trigger repair completion notifications via email - sends to bakery managers"""
|
||||
try:
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from pathlib import Path
|
||||
|
||||
# Load template from file
|
||||
template_dir = Path(__file__).parent.parent.parent / "notification" / "app" / "templates"
|
||||
env = Environment(loader=FileSystemLoader(str(template_dir)))
|
||||
template = env.get_template('equipment_repaired_email.html')
|
||||
|
||||
# Prepare template variables
|
||||
template_vars = {
|
||||
"equipment_name": equipment.name,
|
||||
"equipment_type": equipment.type.value if hasattr(equipment.type, 'value') else equipment.type,
|
||||
"equipment_model": equipment.model or "N/A",
|
||||
"equipment_location": equipment.location or "N/A",
|
||||
"repair_date": repair_data.get('repairDate', datetime.now(timezone.utc).strftime('%Y-%m-%d')),
|
||||
"technician_name": repair_data.get('technicianName', 'Unknown'),
|
||||
"repair_description": repair_data.get('repairDescription', ''),
|
||||
"parts_replaced": repair_data.get('partsReplaced', []),
|
||||
"cost": repair_data.get('cost', 0),
|
||||
"downtime_hours": repair_data.get('downtime', 0),
|
||||
"test_results": repair_data.get('testResults', False),
|
||||
"equipment_link": f"https://app.bakeryia.com/equipment/{equipment.id}",
|
||||
"bakery_name": "BakeryIA",
|
||||
"current_year": datetime.now().year
|
||||
}
|
||||
|
||||
html_content = template.render(**template_vars)
|
||||
|
||||
# Send via notification service
|
||||
logger.info("Repair notifications triggered (template rendered)",
|
||||
equipment_id=str(equipment.id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error triggering repair notifications",
|
||||
error=str(e), equipment_id=str(equipment.id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
|
||||
async def send_support_contact_notification(notification_service: any, tenant_id: UUID, equipment: any, failure_data: dict, support_email: str):
|
||||
"""Send direct notification to equipment support contact for repair request"""
|
||||
try:
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from pathlib import Path
|
||||
|
||||
# Load template from file
|
||||
template_dir = Path(__file__).parent.parent.parent / "notification" / "app" / "templates"
|
||||
env = Environment(loader=FileSystemLoader(str(template_dir)))
|
||||
template = env.get_template('equipment_failure_email.html')
|
||||
|
||||
# Prepare template variables
|
||||
template_vars = {
|
||||
"equipment_name": equipment.name,
|
||||
"equipment_type": equipment.type.value if hasattr(equipment.type, 'value') else equipment.type,
|
||||
"equipment_model": equipment.model or "N/A",
|
||||
"equipment_serial_number": equipment.serial_number or "N/A",
|
||||
"equipment_location": equipment.location or "N/A",
|
||||
"failure_type": failure_data.get('failureType', 'Unknown'),
|
||||
"severity": failure_data.get('severity', 'high'),
|
||||
"description": failure_data.get('description', ''),
|
||||
"reported_time": datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC'),
|
||||
"estimated_impact": "SÍ - Afecta producción" if failure_data.get('estimatedImpact') else "NO - Sin impacto en producción",
|
||||
"support_contact": equipment.support_contact or {},
|
||||
"equipment_link": f"https://app.bakeryia.com/equipment/{equipment.id}",
|
||||
"bakery_name": "BakeryIA",
|
||||
"current_year": datetime.now().year
|
||||
}
|
||||
|
||||
html_content = template.render(**template_vars)
|
||||
|
||||
# TODO: Actually send email via notification service
|
||||
# For now, just log that we would send to the support email
|
||||
logger.info("Support contact notification prepared (would send to support)",
|
||||
equipment_id=str(equipment.id),
|
||||
tenant_id=str(tenant_id),
|
||||
support_email=support_email,
|
||||
subject=f"🚨 URGENTE: Fallo de Equipo - {equipment.name}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error sending support contact notification",
|
||||
error=str(e), equipment_id=str(equipment.id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
88
services/production/app/api/internal_alert_trigger.py
Normal file
88
services/production/app/api/internal_alert_trigger.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# services/production/app/api/internal_alert_trigger.py
|
||||
"""
|
||||
Internal API for triggering production alerts.
|
||||
Used by demo session cloning to generate realistic production delay alerts.
|
||||
|
||||
URL Pattern: /api/v1/tenants/{tenant_id}/production/internal/alerts/trigger
|
||||
This follows the tenant-scoped pattern so gateway can proxy correctly.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request, Path
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# New URL pattern: tenant-scoped so gateway proxies to production service correctly
|
||||
@router.post("/api/v1/tenants/{tenant_id}/production/internal/alerts/trigger")
|
||||
async def trigger_production_alerts(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID to check production for"),
|
||||
request: Request = None
|
||||
) -> dict:
|
||||
"""
|
||||
Trigger comprehensive production alert checks for a specific tenant (internal use only).
|
||||
|
||||
This endpoint is called by the demo session cloning process after production
|
||||
batches are seeded to generate realistic production alerts including:
|
||||
- Production delays
|
||||
- Equipment maintenance alerts
|
||||
- Batch start delays
|
||||
|
||||
Security: Protected by x-internal-service header check.
|
||||
"""
|
||||
try:
|
||||
# Verify internal service header
|
||||
if not request or request.headers.get("x-internal-service") not in ["demo-session", "internal"]:
|
||||
logger.warning("Unauthorized internal API call", tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="This endpoint is for internal service use only"
|
||||
)
|
||||
|
||||
# Get production scheduler from app state
|
||||
production_scheduler = getattr(request.app.state, 'production_scheduler', None)
|
||||
|
||||
if not production_scheduler:
|
||||
logger.error("Production scheduler not initialized")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Production scheduler not available"
|
||||
)
|
||||
|
||||
# Trigger comprehensive production alert checks for the specific tenant
|
||||
logger.info("Triggering comprehensive production alert checks", tenant_id=str(tenant_id))
|
||||
|
||||
# Call the scheduler's manual trigger method
|
||||
result = await production_scheduler.trigger_manual_check(tenant_id)
|
||||
|
||||
if result.get("success", False):
|
||||
logger.info(
|
||||
"Production alert checks completed successfully",
|
||||
tenant_id=str(tenant_id),
|
||||
alerts_generated=result.get("alerts_generated", 0)
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"Production alert checks failed",
|
||||
tenant_id=str(tenant_id),
|
||||
error=result.get("error", "Unknown error")
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error triggering production alerts",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to trigger production alerts: {str(e)}"
|
||||
)
|
||||
798
services/production/app/api/internal_demo.py
Normal file
798
services/production/app/api/internal_demo.py
Normal file
@@ -0,0 +1,798 @@
|
||||
"""
|
||||
Internal Demo Cloning API for Production Service
|
||||
Service-to-service endpoint for cloning production data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Header
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, delete, func
|
||||
import structlog
|
||||
import uuid
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Optional, Dict, Any
|
||||
import os
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from app.core.database import get_db
|
||||
from app.models.production import (
|
||||
ProductionBatch, ProductionSchedule, ProductionCapacity,
|
||||
QualityCheckTemplate, QualityCheck, Equipment,
|
||||
ProductionStatus, ProductionPriority, ProcessStage,
|
||||
EquipmentStatus, EquipmentType
|
||||
)
|
||||
from shared.utils.demo_dates import (
|
||||
adjust_date_for_demo, resolve_time_marker
|
||||
)
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(prefix="/internal/demo", tags=["internal"])
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_PROFESSIONAL = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
|
||||
|
||||
|
||||
@router.post("/clone")
|
||||
async def clone_demo_data(
|
||||
base_tenant_id: str,
|
||||
virtual_tenant_id: str,
|
||||
demo_account_type: str,
|
||||
session_id: Optional[str] = None,
|
||||
session_created_at: Optional[str] = None,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Clone production service data for a virtual demo tenant
|
||||
|
||||
Clones:
|
||||
- Production batches (historical production runs)
|
||||
- Production schedules (daily planning)
|
||||
- Production capacity records
|
||||
- Quality check templates
|
||||
- Quality checks (inspection records)
|
||||
- Equipment (machines and tools)
|
||||
|
||||
Args:
|
||||
base_tenant_id: Template tenant UUID to clone from
|
||||
virtual_tenant_id: Target virtual tenant UUID
|
||||
demo_account_type: Type of demo account
|
||||
session_id: Originating session ID for tracing
|
||||
session_created_at: Session creation timestamp for date adjustment
|
||||
|
||||
Returns:
|
||||
Cloning status and record counts
|
||||
"""
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
# Parse session creation time for date adjustment
|
||||
if session_created_at:
|
||||
try:
|
||||
session_time = datetime.fromisoformat(session_created_at.replace('Z', '+00:00'))
|
||||
except (ValueError, AttributeError):
|
||||
session_time = start_time
|
||||
else:
|
||||
session_time = start_time
|
||||
|
||||
logger.info(
|
||||
"Starting production data cloning",
|
||||
base_tenant_id=base_tenant_id,
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
demo_account_type=demo_account_type,
|
||||
session_id=session_id,
|
||||
session_created_at=session_created_at
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate UUIDs
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Track cloning statistics
|
||||
stats = {
|
||||
"batches": 0,
|
||||
"production_schedules": 0,
|
||||
"production_capacity": 0,
|
||||
"quality_check_templates": 0,
|
||||
"quality_checks": 0,
|
||||
"equipment": 0,
|
||||
"alerts_generated": 0
|
||||
}
|
||||
|
||||
def parse_date_field(date_value, session_time, field_name="date"):
|
||||
"""Parse date field, handling both ISO strings and BASE_TS markers"""
|
||||
if not date_value:
|
||||
return None
|
||||
|
||||
# Check if it's a BASE_TS marker
|
||||
if isinstance(date_value, str) and date_value.startswith("BASE_TS"):
|
||||
try:
|
||||
return resolve_time_marker(date_value, session_time)
|
||||
except ValueError as e:
|
||||
logger.warning(
|
||||
f"Invalid BASE_TS marker in {field_name}",
|
||||
marker=date_value,
|
||||
error=str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
# Handle regular ISO date strings
|
||||
try:
|
||||
return adjust_date_for_demo(
|
||||
datetime.fromisoformat(date_value.replace('Z', '+00:00')),
|
||||
session_time
|
||||
)
|
||||
except (ValueError, AttributeError) as e:
|
||||
logger.warning(
|
||||
f"Invalid date format in {field_name}",
|
||||
date_value=date_value,
|
||||
error=str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
# Load seed data from JSON files
|
||||
from shared.utils.seed_data_paths import get_seed_data_path
|
||||
|
||||
if demo_account_type == "professional":
|
||||
json_file = get_seed_data_path("professional", "06-production.json")
|
||||
elif demo_account_type == "enterprise":
|
||||
json_file = get_seed_data_path("enterprise", "06-production.json")
|
||||
elif demo_account_type == "enterprise_child":
|
||||
json_file = get_seed_data_path("enterprise", "06-production.json", child_id=base_tenant_id)
|
||||
else:
|
||||
raise ValueError(f"Invalid demo account type: {demo_account_type}")
|
||||
|
||||
# Load JSON data
|
||||
with open(json_file, 'r', encoding='utf-8') as f:
|
||||
seed_data = json.load(f)
|
||||
|
||||
# Create Equipment first (no dependencies)
|
||||
for equipment_data in seed_data.get('equipment', []):
|
||||
# Transform equipment ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
equipment_uuid = UUID(equipment_data['id'])
|
||||
transformed_id = transform_id(equipment_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse equipment UUID",
|
||||
equipment_id=equipment_data['id'],
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid UUID format in equipment data: {str(e)}"
|
||||
)
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_install_date = parse_date_field(
|
||||
equipment_data.get('install_date'),
|
||||
session_time,
|
||||
"install_date"
|
||||
)
|
||||
adjusted_last_maintenance = parse_date_field(
|
||||
equipment_data.get('last_maintenance_date'),
|
||||
session_time,
|
||||
"last_maintenance_date"
|
||||
)
|
||||
adjusted_next_maintenance = parse_date_field(
|
||||
equipment_data.get('next_maintenance_date'),
|
||||
session_time,
|
||||
"next_maintenance_date"
|
||||
)
|
||||
adjusted_created_at = parse_date_field(
|
||||
equipment_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
)
|
||||
adjusted_updated_at = parse_date_field(
|
||||
equipment_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
)
|
||||
|
||||
new_equipment = Equipment(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
name=equipment_data['name'],
|
||||
type=equipment_data['type'],
|
||||
model=equipment_data['model'],
|
||||
serial_number=equipment_data.get('serial_number'),
|
||||
location=equipment_data['location'],
|
||||
status=equipment_data['status'],
|
||||
install_date=adjusted_install_date,
|
||||
last_maintenance_date=adjusted_last_maintenance,
|
||||
next_maintenance_date=adjusted_next_maintenance,
|
||||
maintenance_interval_days=equipment_data.get('maintenance_interval_days'),
|
||||
efficiency_percentage=equipment_data.get('efficiency_percentage'),
|
||||
uptime_percentage=equipment_data.get('uptime_percentage'),
|
||||
energy_usage_kwh=equipment_data.get('energy_usage_kwh'),
|
||||
power_kw=equipment_data.get('power_kw'),
|
||||
capacity=equipment_data.get('capacity'),
|
||||
weight_kg=equipment_data.get('weight_kg'),
|
||||
current_temperature=equipment_data.get('current_temperature'),
|
||||
target_temperature=equipment_data.get('target_temperature'),
|
||||
is_active=equipment_data.get('is_active', True),
|
||||
notes=equipment_data.get('notes'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_equipment)
|
||||
stats["equipment"] += 1
|
||||
|
||||
# Flush to get equipment IDs
|
||||
await db.flush()
|
||||
|
||||
# Clone Quality Check Templates from seed data
|
||||
template_id_map = {}
|
||||
|
||||
for template_data in seed_data.get('quality_check_templates', []):
|
||||
# Transform template ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
template_uuid = UUID(template_data['id'])
|
||||
transformed_id = transform_id(template_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse template UUID",
|
||||
template_id=template_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
template_id_map[UUID(template_data['id'])] = transformed_id
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_created_at = parse_date_field(
|
||||
template_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
) or session_time
|
||||
adjusted_updated_at = parse_date_field(
|
||||
template_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
) or adjusted_created_at
|
||||
|
||||
new_template = QualityCheckTemplate(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
name=template_data.get('name'),
|
||||
template_code=template_data.get('template_code'),
|
||||
check_type=template_data.get('check_type'),
|
||||
category=template_data.get('category'),
|
||||
description=template_data.get('description'),
|
||||
instructions=template_data.get('instructions'),
|
||||
parameters=template_data.get('parameters'),
|
||||
thresholds=template_data.get('thresholds'),
|
||||
scoring_criteria=template_data.get('scoring_criteria'),
|
||||
is_active=template_data.get('is_active', True),
|
||||
is_required=template_data.get('is_required', False),
|
||||
is_critical=template_data.get('is_critical', False),
|
||||
weight=template_data.get('weight', 1.0),
|
||||
min_value=template_data.get('min_value'),
|
||||
max_value=template_data.get('max_value'),
|
||||
target_value=template_data.get('target_value'),
|
||||
unit=template_data.get('unit'),
|
||||
tolerance_percentage=template_data.get('tolerance_percentage'),
|
||||
applicable_stages=template_data.get('applicable_stages'),
|
||||
created_by=template_data.get('created_by'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_template)
|
||||
stats["quality_check_templates"] += 1
|
||||
|
||||
# Flush to get template IDs
|
||||
await db.flush()
|
||||
|
||||
# Clone Production Batches from seed data
|
||||
batch_id_map = {}
|
||||
for batch_data in seed_data.get('batches', []):
|
||||
# Transform batch ID using XOR
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
batch_uuid = UUID(batch_data['id'])
|
||||
transformed_id = transform_id(batch_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse batch UUID",
|
||||
batch_id=batch_data['id'],
|
||||
error=str(e))
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid UUID format in batch data: {str(e)}"
|
||||
)
|
||||
|
||||
batch_id_map[UUID(batch_data['id'])] = transformed_id
|
||||
|
||||
# Adjust dates relative to session creation time
|
||||
adjusted_planned_start = parse_date_field(batch_data.get('planned_start_time'), session_time, "planned_start_time")
|
||||
adjusted_planned_end = parse_date_field(batch_data.get('planned_end_time'), session_time, "planned_end_time")
|
||||
adjusted_actual_start = parse_date_field(batch_data.get('actual_start_time'), session_time, "actual_start_time")
|
||||
adjusted_actual_end = parse_date_field(batch_data.get('actual_end_time'), session_time, "actual_end_time")
|
||||
adjusted_completed = parse_date_field(batch_data.get('completed_at'), session_time, "completed_at")
|
||||
adjusted_created_at = parse_date_field(batch_data.get('created_at'), session_time, "created_at") or session_time
|
||||
adjusted_updated_at = parse_date_field(batch_data.get('updated_at'), session_time, "updated_at") or adjusted_created_at
|
||||
|
||||
# Map status and priority enums
|
||||
status_value = batch_data.get('status', 'PENDING')
|
||||
if isinstance(status_value, str):
|
||||
try:
|
||||
status_value = ProductionStatus[status_value]
|
||||
except KeyError:
|
||||
status_value = ProductionStatus.PENDING
|
||||
|
||||
priority_value = batch_data.get('priority', 'MEDIUM')
|
||||
if isinstance(priority_value, str):
|
||||
try:
|
||||
priority_value = ProductionPriority[priority_value]
|
||||
except KeyError:
|
||||
priority_value = ProductionPriority.MEDIUM
|
||||
|
||||
# Map process stage enum
|
||||
process_stage_value = batch_data.get('current_process_stage')
|
||||
if process_stage_value and isinstance(process_stage_value, str):
|
||||
try:
|
||||
process_stage_value = ProcessStage[process_stage_value]
|
||||
except KeyError:
|
||||
process_stage_value = None
|
||||
|
||||
# Transform foreign key references (product_id, recipe_id, order_id, forecast_id)
|
||||
transformed_product_id = None
|
||||
if batch_data.get('product_id'):
|
||||
try:
|
||||
transformed_product_id = str(transform_id(batch_data['product_id'], virtual_uuid))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform product_id",
|
||||
product_id=batch_data.get('product_id'),
|
||||
error=str(e))
|
||||
|
||||
transformed_recipe_id = None
|
||||
if batch_data.get('recipe_id'):
|
||||
try:
|
||||
transformed_recipe_id = str(transform_id(batch_data['recipe_id'], virtual_uuid))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform recipe_id",
|
||||
recipe_id=batch_data.get('recipe_id'),
|
||||
error=str(e))
|
||||
|
||||
transformed_order_id = None
|
||||
if batch_data.get('order_id'):
|
||||
try:
|
||||
transformed_order_id = str(transform_id(batch_data['order_id'], virtual_uuid))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform order_id",
|
||||
order_id=batch_data.get('order_id'),
|
||||
error=str(e))
|
||||
|
||||
transformed_forecast_id = None
|
||||
if batch_data.get('forecast_id'):
|
||||
try:
|
||||
transformed_forecast_id = str(transform_id(batch_data['forecast_id'], virtual_uuid))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform forecast_id",
|
||||
forecast_id=batch_data.get('forecast_id'),
|
||||
error=str(e))
|
||||
|
||||
# Transform equipment_used array
|
||||
transformed_equipment = []
|
||||
if batch_data.get('equipment_used'):
|
||||
for equip_id in batch_data['equipment_used']:
|
||||
try:
|
||||
transformed_equipment.append(str(transform_id(equip_id, virtual_uuid)))
|
||||
except (ValueError, Exception) as e:
|
||||
logger.warning("Failed to transform equipment_id",
|
||||
equipment_id=equip_id,
|
||||
error=str(e))
|
||||
|
||||
# staff_assigned contains user IDs - these should NOT be transformed
|
||||
# because they reference actual user accounts which are NOT cloned
|
||||
# The demo uses the same user accounts across all virtual tenants
|
||||
staff_assigned = batch_data.get('staff_assigned', [])
|
||||
|
||||
new_batch = ProductionBatch(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
batch_number=f"{session_id[:8]}-{batch_data.get('batch_number', f'BATCH-{uuid.uuid4().hex[:8].upper()}')}",
|
||||
product_id=transformed_product_id,
|
||||
product_name=batch_data.get('product_name'),
|
||||
recipe_id=transformed_recipe_id,
|
||||
planned_start_time=adjusted_planned_start,
|
||||
planned_end_time=adjusted_planned_end,
|
||||
planned_quantity=batch_data.get('planned_quantity'),
|
||||
planned_duration_minutes=batch_data.get('planned_duration_minutes'),
|
||||
actual_start_time=adjusted_actual_start,
|
||||
actual_end_time=adjusted_actual_end,
|
||||
actual_quantity=batch_data.get('actual_quantity'),
|
||||
actual_duration_minutes=batch_data.get('actual_duration_minutes'),
|
||||
status=status_value,
|
||||
priority=priority_value,
|
||||
current_process_stage=process_stage_value,
|
||||
process_stage_history=batch_data.get('process_stage_history'),
|
||||
pending_quality_checks=batch_data.get('pending_quality_checks'),
|
||||
completed_quality_checks=batch_data.get('completed_quality_checks'),
|
||||
estimated_cost=batch_data.get('estimated_cost'),
|
||||
actual_cost=batch_data.get('actual_cost'),
|
||||
labor_cost=batch_data.get('labor_cost'),
|
||||
material_cost=batch_data.get('material_cost'),
|
||||
overhead_cost=batch_data.get('overhead_cost'),
|
||||
yield_percentage=batch_data.get('yield_percentage'),
|
||||
quality_score=batch_data.get('quality_score'),
|
||||
waste_quantity=batch_data.get('waste_quantity'),
|
||||
defect_quantity=batch_data.get('defect_quantity'),
|
||||
waste_defect_type=batch_data.get('waste_defect_type'),
|
||||
equipment_used=transformed_equipment,
|
||||
staff_assigned=staff_assigned,
|
||||
station_id=batch_data.get('station_id'),
|
||||
order_id=transformed_order_id,
|
||||
forecast_id=transformed_forecast_id,
|
||||
is_rush_order=batch_data.get('is_rush_order', False),
|
||||
is_special_recipe=batch_data.get('is_special_recipe', False),
|
||||
is_ai_assisted=batch_data.get('is_ai_assisted', False),
|
||||
production_notes=batch_data.get('production_notes'),
|
||||
quality_notes=batch_data.get('quality_notes'),
|
||||
delay_reason=batch_data.get('delay_reason'),
|
||||
cancellation_reason=batch_data.get('cancellation_reason'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at,
|
||||
completed_at=adjusted_completed
|
||||
)
|
||||
db.add(new_batch)
|
||||
stats["batches"] += 1
|
||||
|
||||
# Flush to get batch IDs
|
||||
await db.flush()
|
||||
|
||||
# Clone Quality Checks from seed data (if any)
|
||||
for check_data in seed_data.get('quality_checks', []):
|
||||
# Transform IDs
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
check_uuid = UUID(check_data['id'])
|
||||
transformed_id = transform_id(check_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse check UUID",
|
||||
check_id=check_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
# Map batch_id if it exists in our map
|
||||
batch_id_value = check_data.get('batch_id')
|
||||
if batch_id_value:
|
||||
batch_id_value = batch_id_map.get(UUID(batch_id_value), UUID(batch_id_value))
|
||||
|
||||
# Map template_id if it exists
|
||||
template_id_value = check_data.get('template_id')
|
||||
if template_id_value:
|
||||
template_id_value = template_id_map.get(UUID(template_id_value), UUID(template_id_value))
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_check_time = parse_date_field(
|
||||
check_data.get('check_time'),
|
||||
session_time,
|
||||
"check_time"
|
||||
)
|
||||
|
||||
adjusted_created_at = parse_date_field(
|
||||
check_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
)
|
||||
adjusted_updated_at = parse_date_field(
|
||||
check_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
) or adjusted_created_at
|
||||
|
||||
new_check = QualityCheck(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
batch_id=str(batch_id_value) if batch_id_value else None,
|
||||
template_id=str(template_id_value) if template_id_value else None,
|
||||
check_type=check_data.get('check_type'),
|
||||
process_stage=check_data.get('process_stage'),
|
||||
check_time=adjusted_check_time,
|
||||
checker_id=check_data.get('checker_id'),
|
||||
quality_score=check_data.get('quality_score'),
|
||||
pass_fail=check_data.get('pass_fail'),
|
||||
defect_count=check_data.get('defect_count'),
|
||||
defect_types=check_data.get('defect_types'),
|
||||
measured_weight=check_data.get('measured_weight'),
|
||||
measured_temperature=check_data.get('measured_temperature'),
|
||||
measured_moisture=check_data.get('measured_moisture'),
|
||||
measured_dimensions=check_data.get('measured_dimensions'),
|
||||
stage_specific_data=check_data.get('stage_specific_data'),
|
||||
target_weight=check_data.get('target_weight'),
|
||||
target_temperature=check_data.get('target_temperature'),
|
||||
target_moisture=check_data.get('target_moisture'),
|
||||
tolerance_percentage=check_data.get('tolerance_percentage'),
|
||||
within_tolerance=check_data.get('within_tolerance'),
|
||||
corrective_action_needed=check_data.get('corrective_action_needed'),
|
||||
corrective_actions=check_data.get('corrective_actions'),
|
||||
template_results=check_data.get('template_results'),
|
||||
criteria_scores=check_data.get('criteria_scores'),
|
||||
check_notes=check_data.get('check_notes'),
|
||||
photos_urls=check_data.get('photos_urls'),
|
||||
certificate_url=check_data.get('certificate_url'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_check)
|
||||
stats["quality_checks"] += 1
|
||||
|
||||
# Clone Production Schedules from seed data (if any)
|
||||
for schedule_data in seed_data.get('production_schedules', []):
|
||||
# Transform IDs
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
schedule_uuid = UUID(schedule_data['id'])
|
||||
transformed_id = transform_id(schedule_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse schedule UUID",
|
||||
schedule_id=schedule_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_schedule_date = parse_date_field(
|
||||
schedule_data.get('schedule_date'),
|
||||
session_time,
|
||||
"schedule_date"
|
||||
)
|
||||
adjusted_shift_start = parse_date_field(
|
||||
schedule_data.get('shift_start'),
|
||||
session_time,
|
||||
"shift_start"
|
||||
)
|
||||
adjusted_shift_end = parse_date_field(
|
||||
schedule_data.get('shift_end'),
|
||||
session_time,
|
||||
"shift_end"
|
||||
)
|
||||
adjusted_finalized = parse_date_field(
|
||||
schedule_data.get('finalized_at'),
|
||||
session_time,
|
||||
"finalized_at"
|
||||
)
|
||||
adjusted_created_at = parse_date_field(
|
||||
schedule_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
)
|
||||
adjusted_updated_at = parse_date_field(
|
||||
schedule_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
) or adjusted_created_at
|
||||
|
||||
new_schedule = ProductionSchedule(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
schedule_date=adjusted_schedule_date,
|
||||
shift_start=adjusted_shift_start,
|
||||
shift_end=adjusted_shift_end,
|
||||
total_capacity_hours=schedule_data.get('total_capacity_hours'),
|
||||
planned_capacity_hours=schedule_data.get('planned_capacity_hours'),
|
||||
actual_capacity_hours=schedule_data.get('actual_capacity_hours'),
|
||||
overtime_hours=schedule_data.get('overtime_hours', 0.0),
|
||||
staff_count=schedule_data.get('staff_count'),
|
||||
equipment_capacity=schedule_data.get('equipment_capacity'),
|
||||
station_assignments=schedule_data.get('station_assignments'),
|
||||
total_batches_planned=schedule_data.get('total_batches_planned', 0),
|
||||
total_batches_completed=schedule_data.get('total_batches_completed', 0),
|
||||
total_quantity_planned=schedule_data.get('total_quantity_planned', 0.0),
|
||||
total_quantity_produced=schedule_data.get('total_quantity_produced', 0.0),
|
||||
is_finalized=schedule_data.get('is_finalized', False),
|
||||
is_active=schedule_data.get('is_active', True),
|
||||
efficiency_percentage=schedule_data.get('efficiency_percentage'),
|
||||
utilization_percentage=schedule_data.get('utilization_percentage'),
|
||||
on_time_completion_rate=schedule_data.get('on_time_completion_rate'),
|
||||
schedule_notes=schedule_data.get('schedule_notes'),
|
||||
schedule_adjustments=schedule_data.get('schedule_adjustments'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at,
|
||||
finalized_at=adjusted_finalized
|
||||
)
|
||||
db.add(new_schedule)
|
||||
stats["production_schedules"] += 1
|
||||
|
||||
# Clone Production Capacity from seed data (if any)
|
||||
for capacity_data in seed_data.get('production_capacity', []):
|
||||
# Transform IDs
|
||||
from shared.utils.demo_id_transformer import transform_id
|
||||
try:
|
||||
capacity_uuid = UUID(capacity_data['id'])
|
||||
transformed_id = transform_id(capacity_data['id'], virtual_uuid)
|
||||
except ValueError as e:
|
||||
logger.error("Failed to parse capacity UUID",
|
||||
capacity_id=capacity_data['id'],
|
||||
error=str(e))
|
||||
continue
|
||||
|
||||
# Parse date fields (supports BASE_TS markers and ISO timestamps)
|
||||
adjusted_date = parse_date_field(
|
||||
capacity_data.get('date'),
|
||||
session_time,
|
||||
"date"
|
||||
)
|
||||
adjusted_start_time = parse_date_field(
|
||||
capacity_data.get('start_time'),
|
||||
session_time,
|
||||
"start_time"
|
||||
)
|
||||
adjusted_end_time = parse_date_field(
|
||||
capacity_data.get('end_time'),
|
||||
session_time,
|
||||
"end_time"
|
||||
)
|
||||
adjusted_last_maintenance = parse_date_field(
|
||||
capacity_data.get('last_maintenance_date'),
|
||||
session_time,
|
||||
"last_maintenance_date"
|
||||
)
|
||||
adjusted_created_at = parse_date_field(
|
||||
capacity_data.get('created_at'),
|
||||
session_time,
|
||||
"created_at"
|
||||
)
|
||||
adjusted_updated_at = parse_date_field(
|
||||
capacity_data.get('updated_at'),
|
||||
session_time,
|
||||
"updated_at"
|
||||
) or adjusted_created_at
|
||||
|
||||
new_capacity = ProductionCapacity(
|
||||
id=str(transformed_id),
|
||||
tenant_id=virtual_uuid,
|
||||
resource_type=capacity_data.get('resource_type'),
|
||||
resource_id=capacity_data.get('resource_id'),
|
||||
resource_name=capacity_data.get('resource_name'),
|
||||
date=adjusted_date,
|
||||
start_time=adjusted_start_time,
|
||||
end_time=adjusted_end_time,
|
||||
total_capacity_units=capacity_data.get('total_capacity_units'),
|
||||
allocated_capacity_units=capacity_data.get('allocated_capacity_units'),
|
||||
remaining_capacity_units=capacity_data.get('remaining_capacity_units'),
|
||||
is_available=capacity_data.get('is_available'),
|
||||
is_maintenance=capacity_data.get('is_maintenance'),
|
||||
is_reserved=capacity_data.get('is_reserved'),
|
||||
equipment_type=capacity_data.get('equipment_type'),
|
||||
max_batch_size=capacity_data.get('max_batch_size'),
|
||||
min_batch_size=capacity_data.get('min_batch_size'),
|
||||
setup_time_minutes=capacity_data.get('setup_time_minutes'),
|
||||
cleanup_time_minutes=capacity_data.get('cleanup_time_minutes'),
|
||||
efficiency_rating=capacity_data.get('efficiency_rating'),
|
||||
maintenance_status=capacity_data.get('maintenance_status'),
|
||||
last_maintenance_date=adjusted_last_maintenance,
|
||||
notes=capacity_data.get('notes'),
|
||||
restrictions=capacity_data.get('restrictions'),
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at
|
||||
)
|
||||
db.add(new_capacity)
|
||||
stats["production_capacity"] += 1
|
||||
|
||||
# Note: Edge cases are now handled exclusively through JSON seed data
|
||||
# The seed data files already contain comprehensive edge cases including:
|
||||
# - Overdue batches (should have started 2 hours ago)
|
||||
# - In-progress batches (currently being processed)
|
||||
# - Upcoming batches (scheduled for later today/tomorrow)
|
||||
# This ensures standardization and single source of truth for demo data
|
||||
|
||||
logger.info(
|
||||
"Edge cases handled by JSON seed data - no manual creation needed",
|
||||
seed_data_edge_cases="overdue_batches, in_progress_batches, upcoming_batches"
|
||||
)
|
||||
|
||||
# Commit cloned data
|
||||
await db.commit()
|
||||
|
||||
# NOTE: Alert generation removed - alerts are now generated automatically by the
|
||||
# production alert service which runs scheduled checks at appropriate intervals.
|
||||
# This eliminates duplicate alerts and provides a more realistic demo experience.
|
||||
stats["alerts_generated"] = 0
|
||||
|
||||
# Calculate total from non-alert stats
|
||||
total_records = (stats["equipment"] + stats["batches"] + stats["production_schedules"] +
|
||||
stats["quality_check_templates"] + stats["quality_checks"] +
|
||||
stats["production_capacity"])
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
logger.info(
|
||||
"Production data cloning completed",
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
total_records=total_records,
|
||||
stats=stats,
|
||||
duration_ms=duration_ms
|
||||
)
|
||||
|
||||
return {
|
||||
"service": "production",
|
||||
"status": "completed",
|
||||
"records_cloned": total_records,
|
||||
"duration_ms": duration_ms,
|
||||
"details": stats
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("Invalid UUID format", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=f"Invalid UUID: {str(e)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to clone production data",
|
||||
error=str(e),
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
# Rollback on error
|
||||
await db.rollback()
|
||||
|
||||
return {
|
||||
"service": "production",
|
||||
"status": "failed",
|
||||
"records_cloned": 0,
|
||||
"duration_ms": int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000),
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/clone/health")
|
||||
async def clone_health_check():
|
||||
"""
|
||||
Health check for internal cloning endpoint
|
||||
Used by orchestrator to verify service availability
|
||||
"""
|
||||
return {
|
||||
"service": "production",
|
||||
"clone_endpoint": "available",
|
||||
"version": "2.0.0"
|
||||
}
|
||||
|
||||
|
||||
@router.delete("/tenant/{virtual_tenant_id}")
|
||||
async def delete_demo_data(
|
||||
virtual_tenant_id: str,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Delete all production data for a virtual demo tenant"""
|
||||
logger.info("Deleting production data for virtual tenant", virtual_tenant_id=virtual_tenant_id)
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
try:
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Count records
|
||||
batch_count = await db.scalar(select(func.count(ProductionBatch.id)).where(ProductionBatch.tenant_id == virtual_uuid))
|
||||
schedule_count = await db.scalar(select(func.count(ProductionSchedule.id)).where(ProductionSchedule.tenant_id == virtual_uuid))
|
||||
quality_count = await db.scalar(select(func.count(QualityCheck.id)).where(QualityCheck.tenant_id == virtual_uuid))
|
||||
equipment_count = await db.scalar(select(func.count(Equipment.id)).where(Equipment.tenant_id == virtual_uuid))
|
||||
|
||||
# Delete in order
|
||||
await db.execute(delete(QualityCheck).where(QualityCheck.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(ProductionBatch).where(ProductionBatch.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(ProductionSchedule).where(ProductionSchedule.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(QualityCheckTemplate).where(QualityCheckTemplate.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(Equipment).where(Equipment.tenant_id == virtual_uuid))
|
||||
await db.execute(delete(ProductionCapacity).where(ProductionCapacity.tenant_id == virtual_uuid))
|
||||
await db.commit()
|
||||
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
logger.info("Production data deleted successfully", virtual_tenant_id=virtual_tenant_id, duration_ms=duration_ms)
|
||||
|
||||
return {
|
||||
"service": "production",
|
||||
"status": "deleted",
|
||||
"virtual_tenant_id": virtual_tenant_id,
|
||||
"records_deleted": {
|
||||
"batches": batch_count,
|
||||
"schedules": schedule_count,
|
||||
"quality_checks": quality_count,
|
||||
"equipment": equipment_count,
|
||||
"total": batch_count + schedule_count + quality_count + equipment_count
|
||||
},
|
||||
"duration_ms": duration_ms
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error("Failed to delete production data", error=str(e), exc_info=True)
|
||||
await db.rollback()
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
394
services/production/app/api/ml_insights.py
Normal file
394
services/production/app/api/ml_insights.py
Normal file
@@ -0,0 +1,394 @@
|
||||
"""
|
||||
ML Insights API Endpoints for Production Service
|
||||
|
||||
Provides endpoints to trigger ML insight generation for:
|
||||
- Production yield predictions
|
||||
- Quality optimization
|
||||
- Process efficiency analysis
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, List
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timedelta
|
||||
import structlog
|
||||
import pandas as pd
|
||||
|
||||
from app.core.database import get_db
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/api/v1/tenants/{tenant_id}/production/ml/insights",
|
||||
tags=["ML Insights"]
|
||||
)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# REQUEST/RESPONSE SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class YieldPredictionRequest(BaseModel):
|
||||
"""Request schema for yield prediction"""
|
||||
recipe_ids: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="Specific recipe IDs to analyze. If None, analyzes all recipes"
|
||||
)
|
||||
lookback_days: int = Field(
|
||||
90,
|
||||
description="Days of historical production to analyze",
|
||||
ge=30,
|
||||
le=365
|
||||
)
|
||||
min_history_runs: int = Field(
|
||||
30,
|
||||
description="Minimum production runs required",
|
||||
ge=10,
|
||||
le=100
|
||||
)
|
||||
|
||||
|
||||
class YieldPredictionResponse(BaseModel):
|
||||
"""Response schema for yield prediction"""
|
||||
success: bool
|
||||
message: str
|
||||
tenant_id: str
|
||||
recipes_analyzed: int
|
||||
total_insights_generated: int
|
||||
total_insights_posted: int
|
||||
recipes_with_issues: int
|
||||
insights_by_recipe: dict
|
||||
errors: List[str] = []
|
||||
|
||||
|
||||
# ================================================================
|
||||
# API ENDPOINTS
|
||||
# ================================================================
|
||||
|
||||
@router.post("/predict-yields", response_model=YieldPredictionResponse)
|
||||
async def trigger_yield_prediction(
|
||||
tenant_id: str,
|
||||
request_data: YieldPredictionRequest,
|
||||
request: Request,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Trigger yield prediction for production recipes.
|
||||
|
||||
This endpoint:
|
||||
1. Fetches historical production data for specified recipes
|
||||
2. Runs the YieldInsightsOrchestrator to predict yields
|
||||
3. Generates insights about yield optimization opportunities
|
||||
4. Posts insights to AI Insights Service
|
||||
5. Publishes recommendation events to RabbitMQ
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
request_data: Prediction parameters
|
||||
request: FastAPI request (for app state access)
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
YieldPredictionResponse with prediction results
|
||||
"""
|
||||
logger.info(
|
||||
"ML insights yield prediction requested",
|
||||
tenant_id=tenant_id,
|
||||
recipe_ids=request_data.recipe_ids,
|
||||
lookback_days=request_data.lookback_days
|
||||
)
|
||||
|
||||
try:
|
||||
# Import ML orchestrator and clients
|
||||
from app.ml.yield_insights_orchestrator import YieldInsightsOrchestrator
|
||||
from shared.clients.recipes_client import RecipesServiceClient
|
||||
from app.core.config import settings
|
||||
|
||||
# Get event publisher from app state (if available)
|
||||
event_publisher = getattr(request.app.state, 'event_publisher', None) if hasattr(request, 'app') else None
|
||||
|
||||
# Initialize orchestrator and recipes client
|
||||
orchestrator = YieldInsightsOrchestrator(
|
||||
event_publisher=event_publisher
|
||||
)
|
||||
recipes_client = RecipesServiceClient(settings)
|
||||
|
||||
# Get recipes to analyze from recipes service via API
|
||||
if request_data.recipe_ids:
|
||||
# Fetch specific recipes
|
||||
recipes = []
|
||||
for recipe_id in request_data.recipe_ids:
|
||||
recipe = await recipes_client.get_recipe_by_id(
|
||||
recipe_id=recipe_id,
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
if recipe:
|
||||
recipes.append(recipe)
|
||||
else:
|
||||
# Fetch all recipes for tenant (limit to 10)
|
||||
all_recipes = await recipes_client.get_all_recipes(tenant_id=tenant_id)
|
||||
recipes = all_recipes[:10] if all_recipes else [] # Limit to prevent timeout
|
||||
|
||||
if not recipes:
|
||||
return YieldPredictionResponse(
|
||||
success=False,
|
||||
message="No recipes found for analysis",
|
||||
tenant_id=tenant_id,
|
||||
recipes_analyzed=0,
|
||||
total_insights_generated=0,
|
||||
total_insights_posted=0,
|
||||
recipes_with_issues=0,
|
||||
insights_by_recipe={},
|
||||
errors=["No recipes found"]
|
||||
)
|
||||
|
||||
# Calculate date range for production history
|
||||
end_date = datetime.utcnow()
|
||||
start_date = end_date - timedelta(days=request_data.lookback_days)
|
||||
|
||||
# Process each recipe
|
||||
total_insights_generated = 0
|
||||
total_insights_posted = 0
|
||||
recipes_with_issues = 0
|
||||
insights_by_recipe = {}
|
||||
errors = []
|
||||
|
||||
for recipe in recipes:
|
||||
try:
|
||||
recipe_id = str(recipe['id'])
|
||||
recipe_name = recipe.get('name', 'Unknown Recipe')
|
||||
logger.info(f"Analyzing yield for {recipe_name} ({recipe_id})")
|
||||
|
||||
# Fetch real production batch history from database
|
||||
from app.models.production import ProductionBatch, ProductionStatus
|
||||
from sqlalchemy import select
|
||||
|
||||
batch_query = select(ProductionBatch).where(
|
||||
ProductionBatch.tenant_id == UUID(tenant_id),
|
||||
ProductionBatch.recipe_id == UUID(recipe_id), # Use the extracted UUID
|
||||
ProductionBatch.actual_start_time >= start_date,
|
||||
ProductionBatch.actual_start_time <= end_date,
|
||||
ProductionBatch.status == ProductionStatus.COMPLETED,
|
||||
ProductionBatch.actual_quantity.isnot(None)
|
||||
).order_by(ProductionBatch.actual_start_time)
|
||||
|
||||
batch_result = await db.execute(batch_query)
|
||||
batches = batch_result.scalars().all()
|
||||
|
||||
if len(batches) < request_data.min_history_runs:
|
||||
logger.warning(
|
||||
f"Insufficient production history for recipe {recipe_id}: "
|
||||
f"{len(batches)} batches < {request_data.min_history_runs} required"
|
||||
)
|
||||
continue
|
||||
|
||||
# Create production history DataFrame from real batches
|
||||
production_data = []
|
||||
for batch in batches:
|
||||
# Calculate yield percentage
|
||||
if batch.planned_quantity and batch.actual_quantity:
|
||||
yield_pct = (batch.actual_quantity / batch.planned_quantity) * 100
|
||||
else:
|
||||
continue # Skip batches without complete data
|
||||
|
||||
production_data.append({
|
||||
'production_run_id': str(batch.id), # Required: unique identifier for each production run
|
||||
'recipe_id': str(batch.recipe_id), # Required: recipe identifier
|
||||
'started_at': batch.actual_start_time,
|
||||
'completed_at': batch.actual_end_time, # Optional but useful for duration analysis
|
||||
'batch_size': float(batch.planned_quantity), # Use planned_quantity as batch_size
|
||||
'planned_quantity': float(batch.planned_quantity),
|
||||
'actual_quantity': float(batch.actual_quantity),
|
||||
'yield_percentage': yield_pct,
|
||||
'staff_assigned': batch.staff_assigned if batch.staff_assigned else ['unknown'],
|
||||
'batch_number': batch.batch_number,
|
||||
'equipment_id': batch.equipment_used[0] if batch.equipment_used and len(batch.equipment_used) > 0 else None,
|
||||
'notes': batch.quality_notes # Optional quality notes
|
||||
})
|
||||
|
||||
if not production_data:
|
||||
logger.warning(
|
||||
f"No valid production data for recipe {recipe_id}"
|
||||
)
|
||||
continue
|
||||
|
||||
production_history = pd.DataFrame(production_data)
|
||||
|
||||
# Debug: Log DataFrame columns and sample data
|
||||
logger.debug(
|
||||
"Production history DataFrame created",
|
||||
recipe_id=recipe_id,
|
||||
columns=list(production_history.columns),
|
||||
sample_data=production_history.head(1).to_dict('records') if len(production_history) > 0 else None
|
||||
)
|
||||
|
||||
# Run yield analysis
|
||||
results = await orchestrator.analyze_and_post_insights(
|
||||
tenant_id=tenant_id,
|
||||
recipe_id=recipe_id,
|
||||
production_history=production_history,
|
||||
min_history_runs=request_data.min_history_runs
|
||||
)
|
||||
|
||||
# Track results
|
||||
total_insights_generated += results['insights_generated']
|
||||
total_insights_posted += results['insights_posted']
|
||||
|
||||
baseline_stats = results.get('baseline_stats', {})
|
||||
mean_yield = baseline_stats.get('mean_yield', 100)
|
||||
if mean_yield < 90:
|
||||
recipes_with_issues += 1
|
||||
|
||||
insights_by_recipe[recipe_id] = {
|
||||
'recipe_name': recipe_name,
|
||||
'insights_posted': results['insights_posted'],
|
||||
'mean_yield': mean_yield,
|
||||
'patterns': len(results.get('patterns', []))
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Recipe {recipe_id} analysis complete",
|
||||
insights_posted=results['insights_posted'],
|
||||
mean_yield=mean_yield
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error analyzing recipe {recipe_id}: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
errors.append(error_msg)
|
||||
|
||||
# Close orchestrator
|
||||
await orchestrator.close()
|
||||
|
||||
# Build response
|
||||
response = YieldPredictionResponse(
|
||||
success=total_insights_posted > 0,
|
||||
message=f"Successfully analyzed {len([r for r in recipes if isinstance(r, dict)])} recipes, generated {total_insights_posted} insights",
|
||||
tenant_id=tenant_id,
|
||||
recipes_analyzed=len([r for r in recipes if isinstance(r, dict)]),
|
||||
total_insights_generated=total_insights_generated,
|
||||
total_insights_posted=total_insights_posted,
|
||||
recipes_with_issues=recipes_with_issues,
|
||||
insights_by_recipe=insights_by_recipe,
|
||||
errors=errors
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"ML insights yield prediction complete",
|
||||
tenant_id=tenant_id,
|
||||
total_insights=total_insights_posted,
|
||||
recipes_with_issues=recipes_with_issues
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"ML insights yield prediction failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Yield prediction failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def ml_insights_health():
|
||||
"""Health check for ML insights endpoints"""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "production-ml-insights",
|
||||
"endpoints": [
|
||||
"POST /ml/insights/predict-yields"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# ================================================================
|
||||
# INTERNAL ENDPOINTS (for demo-session service)
|
||||
# ================================================================
|
||||
|
||||
# Create a separate router for internal endpoints to avoid the tenant prefix
|
||||
internal_router = APIRouter(
|
||||
tags=["ML Insights - Internal"]
|
||||
)
|
||||
|
||||
|
||||
@internal_router.post("/api/v1/tenants/{tenant_id}/production/internal/ml/generate-yield-insights")
|
||||
async def generate_yield_insights_internal(
|
||||
tenant_id: str,
|
||||
request: Request,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Internal endpoint to trigger yield insights generation for demo sessions.
|
||||
|
||||
This endpoint is called by the demo-session service after cloning data.
|
||||
It uses the same ML logic as the public endpoint but with optimized defaults.
|
||||
|
||||
Security: Protected by x-internal-service header check.
|
||||
|
||||
Args:
|
||||
tenant_id: The tenant UUID
|
||||
request: FastAPI request object
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
{
|
||||
"insights_posted": int,
|
||||
"tenant_id": str,
|
||||
"status": str
|
||||
}
|
||||
"""
|
||||
# Verify internal service header
|
||||
if not request or request.headers.get("x-internal-service") not in ["demo-session", "internal"]:
|
||||
logger.warning("Unauthorized internal API call", tenant_id=tenant_id)
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="This endpoint is for internal service use only"
|
||||
)
|
||||
|
||||
logger.info("Internal yield insights generation triggered", tenant_id=tenant_id)
|
||||
|
||||
try:
|
||||
# Use the existing yield prediction logic with sensible defaults
|
||||
request_data = YieldPredictionRequest(
|
||||
recipe_ids=None, # Analyze all recipes
|
||||
lookback_days=90, # 3 months of history
|
||||
min_history_runs=20 # Minimum 20 production runs required
|
||||
)
|
||||
|
||||
# Call the existing yield prediction endpoint logic
|
||||
result = await trigger_yield_prediction(
|
||||
tenant_id=tenant_id,
|
||||
request_data=request_data,
|
||||
request=request,
|
||||
db=db
|
||||
)
|
||||
|
||||
# Return simplified response for internal use
|
||||
return {
|
||||
"insights_posted": result.total_insights_posted,
|
||||
"tenant_id": tenant_id,
|
||||
"status": "success" if result.success else "failed",
|
||||
"message": result.message,
|
||||
"recipes_analyzed": result.recipes_analyzed,
|
||||
"recipes_with_issues": result.recipes_with_issues
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Internal yield insights generation failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Internal yield insights generation failed: {str(e)}"
|
||||
)
|
||||
241
services/production/app/api/orchestrator.py
Normal file
241
services/production/app/api/orchestrator.py
Normal file
@@ -0,0 +1,241 @@
|
||||
# ================================================================
|
||||
# services/production/app/api/orchestrator.py
|
||||
# ================================================================
|
||||
"""
|
||||
Production Orchestrator API - Endpoints for orchestrated production scheduling
|
||||
Called by the Orchestrator Service to generate production schedules from forecast data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Request
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import date
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
|
||||
from shared.routing import RouteBuilder
|
||||
from app.services.production_service import ProductionService
|
||||
from app.schemas.production import ProductionScheduleResponse
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-orchestrator"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# REQUEST/RESPONSE SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class GenerateScheduleRequest(BaseModel):
|
||||
"""
|
||||
Request to generate production schedule (called by Orchestrator)
|
||||
|
||||
The Orchestrator calls Forecasting Service first, then passes forecast data here.
|
||||
Production Service uses this data to determine what to produce.
|
||||
|
||||
NEW: Accepts cached data snapshots from Orchestrator to eliminate duplicate API calls.
|
||||
"""
|
||||
forecast_data: Dict[str, Any] = Field(..., description="Forecast data from Forecasting Service")
|
||||
target_date: Optional[date] = Field(None, description="Target production date")
|
||||
planning_horizon_days: int = Field(default=1, ge=1, le=7, description="Planning horizon in days")
|
||||
|
||||
# NEW: Cached data from Orchestrator
|
||||
inventory_data: Optional[Dict[str, Any]] = Field(None, description="Cached inventory snapshot from Orchestrator")
|
||||
recipes_data: Optional[Dict[str, Any]] = Field(None, description="Cached recipes snapshot from Orchestrator")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"forecast_data": {
|
||||
"forecasts": [
|
||||
{
|
||||
"product_id": "uuid-here",
|
||||
"predicted_demand": 100.0,
|
||||
"confidence_score": 0.85
|
||||
}
|
||||
],
|
||||
"forecast_id": "uuid-here",
|
||||
"generated_at": "2025-01-30T10:00:00Z"
|
||||
},
|
||||
"target_date": "2025-01-31",
|
||||
"planning_horizon_days": 1
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class GenerateScheduleResponse(BaseModel):
|
||||
"""Response from generate_schedule endpoint"""
|
||||
success: bool
|
||||
message: str
|
||||
schedule_id: Optional[UUID] = None
|
||||
schedule_number: Optional[str] = None
|
||||
batches_created: int = 0
|
||||
total_planned_quantity: float = 0.0
|
||||
warnings: List[str] = []
|
||||
errors: List[str] = []
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"success": True,
|
||||
"message": "Production schedule generated successfully",
|
||||
"schedule_id": "uuid-here",
|
||||
"schedule_number": "PROD-2025-01-30-001",
|
||||
"batches_created": 5,
|
||||
"total_planned_quantity": 500.0,
|
||||
"warnings": [],
|
||||
"errors": []
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# ================================================================
|
||||
# ORCHESTRATOR ENTRY POINT
|
||||
# ================================================================
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("generate-schedule"),
|
||||
response_model=GenerateScheduleResponse
|
||||
)
|
||||
async def generate_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
request_data: GenerateScheduleRequest = ...,
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Generate production schedule from forecast data (called by Orchestrator)
|
||||
|
||||
This is the main entry point for orchestrated production planning.
|
||||
The Orchestrator calls Forecasting Service first, then passes forecast data here.
|
||||
|
||||
Flow:
|
||||
1. Receive forecast data from orchestrator
|
||||
2. Parse forecast to extract product demands
|
||||
3. Check inventory levels for each product
|
||||
4. Calculate production quantities needed
|
||||
5. Create production schedule and batches
|
||||
6. Return schedule summary
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
request_data: Schedule generation request with forecast data
|
||||
|
||||
Returns:
|
||||
GenerateScheduleResponse with schedule details and created batches
|
||||
"""
|
||||
try:
|
||||
logger.info("Generate production schedule endpoint called",
|
||||
tenant_id=str(tenant_id),
|
||||
has_forecast_data=bool(request_data.forecast_data))
|
||||
|
||||
target_date = request_data.target_date or date.today()
|
||||
forecast_data = request_data.forecast_data
|
||||
|
||||
# Parse forecast data from orchestrator
|
||||
forecasts = _parse_forecast_data(forecast_data)
|
||||
|
||||
if not forecasts:
|
||||
return GenerateScheduleResponse(
|
||||
success=False,
|
||||
message="No forecast data provided",
|
||||
errors=["Forecast data is empty or invalid"]
|
||||
)
|
||||
|
||||
# Generate production schedule using the service (with cached data if available)
|
||||
result = await production_service.generate_production_schedule_from_forecast(
|
||||
tenant_id=tenant_id,
|
||||
target_date=target_date,
|
||||
forecasts=forecasts,
|
||||
planning_horizon_days=request_data.planning_horizon_days,
|
||||
inventory_data=request_data.inventory_data, # NEW: Pass cached inventory
|
||||
recipes_data=request_data.recipes_data # NEW: Pass cached recipes
|
||||
)
|
||||
|
||||
logger.info("Production schedule generated successfully",
|
||||
tenant_id=str(tenant_id),
|
||||
schedule_id=str(result.get('schedule_id')) if result.get('schedule_id') else None,
|
||||
batches_created=result.get('batches_created', 0))
|
||||
|
||||
return GenerateScheduleResponse(
|
||||
success=True,
|
||||
message="Production schedule generated successfully",
|
||||
schedule_id=result.get('schedule_id'),
|
||||
schedule_number=result.get('schedule_number'),
|
||||
batches_created=result.get('batches_created', 0),
|
||||
total_planned_quantity=result.get('total_planned_quantity', 0.0),
|
||||
warnings=result.get('warnings', []),
|
||||
errors=[]
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating production schedule",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
return GenerateScheduleResponse(
|
||||
success=False,
|
||||
message="Failed to generate production schedule",
|
||||
errors=[str(e)]
|
||||
)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# HELPER FUNCTIONS
|
||||
# ================================================================
|
||||
|
||||
def _parse_forecast_data(forecast_data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Parse forecast data received from orchestrator
|
||||
|
||||
Expected format from Forecasting Service via Orchestrator:
|
||||
{
|
||||
"forecasts": [
|
||||
{
|
||||
"product_id": "uuid",
|
||||
"inventory_product_id": "uuid", # Alternative field name
|
||||
"predicted_demand": 100.0,
|
||||
"predicted_value": 100.0, # Alternative field name
|
||||
"confidence_score": 0.85,
|
||||
...
|
||||
}
|
||||
],
|
||||
"forecast_id": "uuid",
|
||||
"generated_at": "2025-01-30T10:00:00Z"
|
||||
}
|
||||
"""
|
||||
forecasts = []
|
||||
|
||||
forecast_list = forecast_data.get('forecasts', [])
|
||||
for forecast_item in forecast_list:
|
||||
# Extract product ID (try multiple field names)
|
||||
product_id = (
|
||||
forecast_item.get('product_id') or
|
||||
forecast_item.get('inventory_product_id') or
|
||||
forecast_item.get('item_id')
|
||||
)
|
||||
|
||||
# Extract predicted demand (try multiple field names)
|
||||
predicted_demand = (
|
||||
forecast_item.get('predicted_demand') or
|
||||
forecast_item.get('predicted_value') or
|
||||
forecast_item.get('demand') or
|
||||
0
|
||||
)
|
||||
|
||||
if product_id and predicted_demand > 0:
|
||||
forecasts.append({
|
||||
'product_id': product_id,
|
||||
'predicted_demand': float(predicted_demand),
|
||||
'confidence_score': forecast_item.get('confidence_score', 0.8),
|
||||
'lower_bound': forecast_item.get('lower_bound', 0),
|
||||
'upper_bound': forecast_item.get('upper_bound', 0),
|
||||
'forecast_id': forecast_data.get('forecast_id'),
|
||||
})
|
||||
|
||||
return forecasts
|
||||
357
services/production/app/api/production_batches.py
Normal file
357
services/production/app/api/production_batches.py
Normal file
@@ -0,0 +1,357 @@
|
||||
# services/production/app/api/production_batches.py
|
||||
"""
|
||||
Production Batches API - ATOMIC CRUD operations on ProductionBatch model
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from datetime import date
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.security import create_audit_logger, AuditSeverity, AuditAction
|
||||
from app.core.database import get_db
|
||||
from app.services.production_service import ProductionService
|
||||
from app.models import AuditLog
|
||||
from app.schemas.production import (
|
||||
ProductionBatchCreate,
|
||||
ProductionBatchUpdate,
|
||||
ProductionBatchStatusUpdate,
|
||||
ProductionBatchResponse,
|
||||
ProductionBatchListResponse,
|
||||
ProductionStatusEnum
|
||||
)
|
||||
from app.core.config import settings
|
||||
from app.utils.cache import get_cached, set_cached, make_cache_key
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
async def get_production_alert_service(request: Request) -> ProductionAlertService:
|
||||
"""Dependency injection for production alert service"""
|
||||
# Get the alert service from app state, which is where it's stored during app startup
|
||||
alert_service = getattr(request.app.state, 'production_alert_service', None)
|
||||
if not alert_service:
|
||||
logger.warning("Production alert service not available in app state")
|
||||
return None
|
||||
return alert_service
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-batches"])
|
||||
|
||||
# Initialize audit logger with the production service's AuditLog model
|
||||
audit_logger = create_audit_logger("production-service", AuditLog)
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("batches"),
|
||||
response_model=ProductionBatchListResponse
|
||||
)
|
||||
async def list_production_batches(
|
||||
tenant_id: UUID = Path(...),
|
||||
status: Optional[ProductionStatusEnum] = Query(None, description="Filter by status"),
|
||||
product_id: Optional[UUID] = Query(None, description="Filter by product"),
|
||||
order_id: Optional[UUID] = Query(None, description="Filter by order"),
|
||||
start_date: Optional[date] = Query(None, description="Filter from date"),
|
||||
end_date: Optional[date] = Query(None, description="Filter to date"),
|
||||
page: int = Query(1, ge=1, description="Page number"),
|
||||
page_size: int = Query(50, ge=1, le=100, description="Page size"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""List batches with filters: date, status, product, order_id (with Redis caching - 20s TTL)"""
|
||||
try:
|
||||
# PERFORMANCE OPTIMIZATION: Cache frequently accessed queries (status filter, first page)
|
||||
cache_key = None
|
||||
if page == 1 and product_id is None and order_id is None and start_date is None and end_date is None:
|
||||
# Cache simple status-filtered queries (common for dashboards)
|
||||
cache_key = make_cache_key(
|
||||
"production_batches",
|
||||
str(tenant_id),
|
||||
status=status.value if status else None,
|
||||
page_size=page_size
|
||||
)
|
||||
cached_result = await get_cached(cache_key)
|
||||
if cached_result is not None:
|
||||
logger.debug("Cache hit for production batches", cache_key=cache_key, tenant_id=str(tenant_id), status=status)
|
||||
return ProductionBatchListResponse(**cached_result)
|
||||
|
||||
filters = {
|
||||
"status": status,
|
||||
"product_id": str(product_id) if product_id else None,
|
||||
"order_id": str(order_id) if order_id else None,
|
||||
"start_date": start_date,
|
||||
"end_date": end_date
|
||||
}
|
||||
|
||||
batch_list = await production_service.get_production_batches_list(tenant_id, filters, page, page_size)
|
||||
|
||||
# Cache the result if applicable (20s TTL for production batches)
|
||||
if cache_key:
|
||||
await set_cached(cache_key, batch_list.model_dump(), ttl=20)
|
||||
logger.debug("Cached production batches", cache_key=cache_key, ttl=20, tenant_id=str(tenant_id), status=status)
|
||||
|
||||
logger.info("Retrieved production batches list",
|
||||
tenant_id=str(tenant_id), filters=filters)
|
||||
|
||||
return batch_list
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error listing production batches",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to list production batches")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("batches"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def create_production_batch(
|
||||
batch_data: ProductionBatchCreate,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service),
|
||||
request: Request = None,
|
||||
alert_service: ProductionAlertService = Depends(get_production_alert_service)
|
||||
):
|
||||
"""Create a new production batch"""
|
||||
try:
|
||||
batch = await production_service.create_production_batch(tenant_id, batch_data)
|
||||
|
||||
# Trigger Start Production alert
|
||||
if alert_service:
|
||||
try:
|
||||
# Generate reasoning data for the batch
|
||||
reasoning_data = {
|
||||
"type": "manual_creation",
|
||||
"parameters": {
|
||||
"product_name": batch.product_name,
|
||||
"planned_quantity": batch.planned_quantity,
|
||||
"priority": batch.priority.value if batch.priority else "MEDIUM"
|
||||
},
|
||||
"urgency": {
|
||||
"level": "normal",
|
||||
"ready_by_time": batch.planned_start_time.strftime('%H:%M') if batch.planned_start_time else "unknown"
|
||||
},
|
||||
"metadata": {
|
||||
"trigger_source": "manual_creation",
|
||||
"created_by": current_user.get("user_id", "unknown"),
|
||||
"is_ai_assisted": False
|
||||
}
|
||||
}
|
||||
|
||||
# Update batch with reasoning data
|
||||
from app.core.database import get_db
|
||||
db = next(get_db())
|
||||
batch.reasoning_data = reasoning_data
|
||||
await db.commit()
|
||||
|
||||
# Emit Start Production alert
|
||||
await alert_service.emit_start_production_alert(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch.id,
|
||||
product_name=batch.product_name,
|
||||
batch_number=batch.batch_number,
|
||||
reasoning_data=reasoning_data,
|
||||
planned_start_time=batch.planned_start_time.isoformat() if batch.planned_start_time else None
|
||||
)
|
||||
|
||||
logger.info("Start Production alert triggered for batch",
|
||||
batch_id=str(batch.id), tenant_id=str(tenant_id))
|
||||
|
||||
except Exception as alert_error:
|
||||
logger.error("Failed to trigger Start Production alert",
|
||||
error=str(alert_error), batch_id=str(batch.id))
|
||||
# Don't fail the batch creation if alert fails
|
||||
|
||||
logger.info("Created production batch",
|
||||
batch_id=str(batch.id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid batch data", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error creating production batch",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create production batch")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("batches/active"),
|
||||
response_model=ProductionBatchListResponse
|
||||
)
|
||||
async def get_active_batches(
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Get currently active production batches"""
|
||||
try:
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
batch_repo = ProductionBatchRepository(db)
|
||||
|
||||
batches = await batch_repo.get_active_batches(str(tenant_id))
|
||||
batch_responses = [ProductionBatchResponse.model_validate(batch) for batch in batches]
|
||||
|
||||
logger.info("Retrieved active production batches",
|
||||
count=len(batches), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchListResponse(
|
||||
batches=batch_responses,
|
||||
total_count=len(batches),
|
||||
page=1,
|
||||
page_size=len(batches)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting active batches",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get active batches")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_detail_route("batches", "batch_id"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def get_batch_details(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Get detailed information about a production batch"""
|
||||
try:
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
batch_repo = ProductionBatchRepository(db)
|
||||
|
||||
batch = await batch_repo.get_by_id(batch_id)
|
||||
if not batch or str(batch.tenant_id) != str(tenant_id):
|
||||
raise HTTPException(status_code=404, detail="Production batch not found")
|
||||
|
||||
logger.info("Retrieved production batch details",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error getting batch details",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get batch details")
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_nested_resource_route("batches", "batch_id", "status"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def update_batch_status(
|
||||
status_update: ProductionBatchStatusUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Update production batch status"""
|
||||
try:
|
||||
batch = await production_service.update_batch_status(tenant_id, batch_id, status_update)
|
||||
|
||||
logger.info("Updated production batch status",
|
||||
batch_id=str(batch_id),
|
||||
new_status=status_update.status.value,
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid status update", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error updating batch status",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update batch status")
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_resource_detail_route("batches", "batch_id"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def update_production_batch(
|
||||
batch_update: ProductionBatchUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Update batch (e.g., start time, notes, status)"""
|
||||
try:
|
||||
batch = await production_service.update_production_batch(tenant_id, batch_id, batch_update)
|
||||
|
||||
logger.info("Updated production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid batch update", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error updating production batch",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update production batch")
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_resource_detail_route("batches", "batch_id")
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def delete_production_batch(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Cancel/delete draft batch (Admin+ only, soft delete preferred)"""
|
||||
try:
|
||||
await production_service.delete_production_batch(tenant_id, batch_id)
|
||||
|
||||
# Log audit event for batch deletion
|
||||
try:
|
||||
db = next(get_db())
|
||||
await audit_logger.log_deletion(
|
||||
db_session=db,
|
||||
tenant_id=str(tenant_id),
|
||||
user_id=current_user["user_id"],
|
||||
resource_type="production_batch",
|
||||
resource_id=str(batch_id),
|
||||
description=f"Deleted production batch",
|
||||
endpoint=f"/batches/{batch_id}",
|
||||
method="DELETE"
|
||||
)
|
||||
except Exception as audit_error:
|
||||
logger.warning("Failed to log audit event", error=str(audit_error))
|
||||
|
||||
logger.info("Deleted production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {"message": "Production batch deleted successfully"}
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot delete batch", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error deleting production batch",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to delete production batch")
|
||||
90
services/production/app/api/production_dashboard.py
Normal file
90
services/production/app/api/production_dashboard.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# services/production/app/api/production_dashboard.py
|
||||
"""
|
||||
Production Dashboard API - Dashboard endpoints for production overview
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from datetime import date, datetime
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.routing import RouteBuilder
|
||||
from app.services.production_service import ProductionService
|
||||
from app.schemas.production import ProductionDashboardSummary
|
||||
from app.core.config import settings
|
||||
from app.utils.cache import get_cached, set_cached, make_cache_key
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-dashboard"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_dashboard_route("summary"),
|
||||
response_model=ProductionDashboardSummary
|
||||
)
|
||||
async def get_dashboard_summary(
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get production dashboard summary with caching (60s TTL)"""
|
||||
try:
|
||||
# PHASE 2: Check cache first
|
||||
cache_key = make_cache_key("production_dashboard", str(tenant_id))
|
||||
cached_result = await get_cached(cache_key)
|
||||
if cached_result is not None:
|
||||
logger.debug("Cache hit for production dashboard", cache_key=cache_key, tenant_id=str(tenant_id))
|
||||
return ProductionDashboardSummary(**cached_result)
|
||||
|
||||
# Cache miss - fetch from database
|
||||
summary = await production_service.get_dashboard_summary(tenant_id)
|
||||
|
||||
# PHASE 2: Cache the result (60s TTL for production batches)
|
||||
await set_cached(cache_key, summary.model_dump(), ttl=60)
|
||||
logger.debug("Cached production dashboard", cache_key=cache_key, ttl=60, tenant_id=str(tenant_id))
|
||||
|
||||
logger.info("Retrieved production dashboard summary",
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting dashboard summary",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get dashboard summary")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_dashboard_route("requirements"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_production_requirements(
|
||||
tenant_id: UUID = Path(...),
|
||||
date: Optional[date] = Query(None, description="Target date for production requirements"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get production requirements for procurement planning"""
|
||||
try:
|
||||
target_date = date or datetime.now().date()
|
||||
requirements = await production_service.get_production_requirements(tenant_id, target_date)
|
||||
|
||||
logger.info("Retrieved production requirements for procurement",
|
||||
tenant_id=str(tenant_id), date=target_date.isoformat())
|
||||
|
||||
return requirements
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting production requirements",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get production requirements")
|
||||
470
services/production/app/api/production_operations.py
Normal file
470
services/production/app/api/production_operations.py
Normal file
@@ -0,0 +1,470 @@
|
||||
# services/production/app/api/production_operations.py
|
||||
"""
|
||||
Production Operations API - Business operations for production management
|
||||
Includes: batch start/complete, schedule finalize/optimize, capacity management, transformations, stats
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request, status
|
||||
from typing import Optional
|
||||
from datetime import date, datetime, timedelta
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.monitoring.decorators import monitor_performance
|
||||
from app.services.production_service import ProductionService
|
||||
from app.schemas.production import (
|
||||
ProductionBatchResponse,
|
||||
ProductionScheduleResponse
|
||||
)
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-operations"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
# ===== BATCH OPERATIONS =====
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("batches", "batch_id", "start"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def start_production_batch(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Mark batch as started (updates actual_start_time)"""
|
||||
try:
|
||||
batch = await production_service.start_production_batch(tenant_id, batch_id)
|
||||
|
||||
logger.info("Started production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot start batch", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error starting production batch",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to start production batch")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("batches", "batch_id", "complete"),
|
||||
response_model=ProductionBatchResponse
|
||||
)
|
||||
async def complete_production_batch(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Path(...),
|
||||
completion_data: Optional[dict] = None,
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Complete batch — auto-calculates yield, duration, cost summary"""
|
||||
try:
|
||||
batch = await production_service.complete_production_batch(tenant_id, batch_id, completion_data)
|
||||
|
||||
logger.info("Completed production batch",
|
||||
batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionBatchResponse.model_validate(batch)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot complete batch", error=str(e), batch_id=str(batch_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error completing production batch",
|
||||
error=str(e), batch_id=str(batch_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to complete production batch")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("batches/stats"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_production_batch_stats(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None, description="Start date for stats"),
|
||||
end_date: Optional[date] = Query(None, description="End date for stats"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Aggregated stats: completed vs failed, avg yield, on-time rate"""
|
||||
try:
|
||||
# Default to last 30 days if no dates provided
|
||||
if not start_date:
|
||||
start_date = (datetime.now() - timedelta(days=30)).date()
|
||||
if not end_date:
|
||||
end_date = datetime.now().date()
|
||||
|
||||
stats = await production_service.get_batch_statistics(tenant_id, start_date, end_date)
|
||||
|
||||
logger.info("Retrieved production batch statistics",
|
||||
tenant_id=str(tenant_id), start_date=start_date.isoformat(), end_date=end_date.isoformat())
|
||||
|
||||
return stats
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting production batch stats",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get production batch stats")
|
||||
|
||||
|
||||
# ===== SCHEDULE OPERATIONS =====
|
||||
|
||||
@router.post(
|
||||
route_builder.build_nested_resource_route("schedules", "schedule_id", "finalize"),
|
||||
response_model=ProductionScheduleResponse
|
||||
)
|
||||
async def finalize_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
schedule_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Lock schedule; prevents further changes"""
|
||||
try:
|
||||
schedule = await production_service.finalize_production_schedule(tenant_id, schedule_id)
|
||||
|
||||
logger.info("Finalized production schedule",
|
||||
schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionScheduleResponse.model_validate(schedule)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot finalize schedule", error=str(e), schedule_id=str(schedule_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error finalizing production schedule",
|
||||
error=str(e), schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to finalize production schedule")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("schedules/optimize"),
|
||||
response_model=dict
|
||||
)
|
||||
async def optimize_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
target_date: date = Query(..., description="Date to optimize"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Trigger AI-based rescheduling suggestion based on demand/capacity"""
|
||||
try:
|
||||
optimization_result = await production_service.optimize_schedule(tenant_id, target_date)
|
||||
|
||||
logger.info("Generated schedule optimization suggestions",
|
||||
tenant_id=str(tenant_id), date=target_date.isoformat())
|
||||
|
||||
return optimization_result
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error optimizing production schedule",
|
||||
error=str(e), tenant_id=str(tenant_id), date=target_date.isoformat())
|
||||
raise HTTPException(status_code=500, detail="Failed to optimize production schedule")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("schedules/capacity-usage"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_schedule_capacity_usage(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None),
|
||||
end_date: Optional[date] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get capacity usage report for scheduling period"""
|
||||
try:
|
||||
if not start_date:
|
||||
start_date = datetime.now().date()
|
||||
if not end_date:
|
||||
end_date = start_date + timedelta(days=7)
|
||||
|
||||
usage_report = await production_service.get_capacity_usage_report(tenant_id, start_date, end_date)
|
||||
|
||||
logger.info("Retrieved capacity usage report",
|
||||
tenant_id=str(tenant_id),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat())
|
||||
|
||||
return usage_report
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting capacity usage",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get capacity usage")
|
||||
|
||||
|
||||
# ===== CAPACITY MANAGEMENT =====
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("capacity/status"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_capacity_status(
|
||||
tenant_id: UUID = Path(...),
|
||||
target_date: Optional[date] = Query(None),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get real-time capacity status"""
|
||||
try:
|
||||
if not target_date:
|
||||
target_date = datetime.now().date()
|
||||
|
||||
status = await production_service.get_capacity_status(tenant_id, target_date)
|
||||
|
||||
logger.info("Retrieved capacity status",
|
||||
tenant_id=str(tenant_id), date=target_date.isoformat())
|
||||
|
||||
return status
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting capacity status",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get capacity status")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("capacity/availability"),
|
||||
response_model=dict
|
||||
)
|
||||
async def check_resource_availability(
|
||||
tenant_id: UUID = Path(...),
|
||||
target_date: date = Query(...),
|
||||
required_capacity: float = Query(..., gt=0),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Check if capacity is available for scheduling"""
|
||||
try:
|
||||
availability = await production_service.check_resource_availability(
|
||||
tenant_id, target_date, required_capacity
|
||||
)
|
||||
|
||||
logger.info("Checked resource availability",
|
||||
tenant_id=str(tenant_id),
|
||||
date=target_date.isoformat(),
|
||||
required=required_capacity)
|
||||
|
||||
return availability
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking resource availability",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to check resource availability")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("capacity/reserve"),
|
||||
response_model=dict
|
||||
)
|
||||
async def reserve_capacity(
|
||||
tenant_id: UUID = Path(...),
|
||||
target_date: date = Query(...),
|
||||
capacity_amount: float = Query(..., gt=0),
|
||||
batch_id: UUID = Query(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Reserve capacity for a batch"""
|
||||
try:
|
||||
reservation = await production_service.reserve_capacity(
|
||||
tenant_id, target_date, capacity_amount, batch_id
|
||||
)
|
||||
|
||||
logger.info("Reserved production capacity",
|
||||
tenant_id=str(tenant_id),
|
||||
date=target_date.isoformat(),
|
||||
amount=capacity_amount,
|
||||
batch_id=str(batch_id))
|
||||
|
||||
return reservation
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot reserve capacity", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error reserving capacity",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to reserve capacity")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/capacity/date/{date}",
|
||||
response_model=list
|
||||
)
|
||||
async def get_capacity_by_date(
|
||||
tenant_id: UUID = Path(...),
|
||||
date: date = Path(..., description="Date to retrieve capacity for (format: YYYY-MM-DD)"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Get capacity by date (using direct route to support date path parameter)"""
|
||||
try:
|
||||
capacity_data = await production_service.get_capacity_by_date(tenant_id, date)
|
||||
|
||||
logger.info("Retrieved capacity by date",
|
||||
tenant_id=str(tenant_id), date=date.isoformat())
|
||||
|
||||
return capacity_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting capacity by date",
|
||||
error=str(e), tenant_id=str(tenant_id), date=date.isoformat())
|
||||
raise HTTPException(status_code=500, detail="Failed to get capacity by date")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_operations_route("capacity/bottlenecks"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_capacity_bottlenecks(
|
||||
tenant_id: UUID = Path(...),
|
||||
days_ahead: int = Query(7, ge=1, le=30),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Identify capacity bottlenecks in upcoming period"""
|
||||
try:
|
||||
bottlenecks = await production_service.predict_capacity_bottlenecks(tenant_id, days_ahead)
|
||||
|
||||
logger.info("Retrieved capacity bottlenecks prediction",
|
||||
tenant_id=str(tenant_id), days_ahead=days_ahead)
|
||||
|
||||
return bottlenecks
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting capacity bottlenecks",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get capacity bottlenecks")
|
||||
|
||||
|
||||
# ===== TRANSFORMATION OPERATIONS =====
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("batches/complete-with-transformation"),
|
||||
response_model=dict
|
||||
)
|
||||
async def complete_batch_with_transformation(
|
||||
tenant_id: UUID = Path(...),
|
||||
batch_id: UUID = Query(...),
|
||||
transformation_data: dict = None,
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Complete batch and create product transformation record"""
|
||||
try:
|
||||
result = await production_service.complete_batch_with_transformation(
|
||||
tenant_id, batch_id, transformation_data
|
||||
)
|
||||
|
||||
logger.info("Completed batch with transformation",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=str(batch_id))
|
||||
|
||||
return result
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot complete batch with transformation", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error completing batch with transformation",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to complete batch with transformation")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("transform-par-baked"),
|
||||
response_model=dict
|
||||
)
|
||||
async def transform_par_baked_products(
|
||||
tenant_id: UUID = Path(...),
|
||||
source_batch_id: UUID = Query(...),
|
||||
target_quantity: float = Query(..., gt=0),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Transform par-baked products to fully baked"""
|
||||
try:
|
||||
result = await production_service.transform_par_baked_to_fresh(
|
||||
tenant_id, source_batch_id, target_quantity
|
||||
)
|
||||
|
||||
logger.info("Transformed par-baked products",
|
||||
tenant_id=str(tenant_id),
|
||||
source_batch_id=str(source_batch_id),
|
||||
quantity=target_quantity)
|
||||
|
||||
return result
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Cannot transform products", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error transforming products",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to transform products")
|
||||
|
||||
|
||||
# ===== SCHEDULER OPERATIONS =====
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("scheduler/trigger")
|
||||
)
|
||||
@monitor_performance("trigger_production_scheduler")
|
||||
async def trigger_production_scheduler(
|
||||
tenant_id: UUID = Path(...),
|
||||
request: Request = None
|
||||
):
|
||||
"""
|
||||
Manually trigger the production scheduler for the current tenant
|
||||
|
||||
This endpoint is primarily for testing and development purposes.
|
||||
Triggers the production schedule generation process manually.
|
||||
"""
|
||||
try:
|
||||
# Get the scheduler service from app state
|
||||
if hasattr(request.app.state, 'scheduler_service'):
|
||||
scheduler_service = request.app.state.scheduler_service
|
||||
await scheduler_service.test_production_schedule_generation()
|
||||
|
||||
logger.info("Production scheduler triggered manually",
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Production scheduler executed successfully",
|
||||
"tenant_id": str(tenant_id)
|
||||
}
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
||||
detail="Scheduler service is not available"
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error triggering production scheduler",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Error triggering production scheduler: {str(e)}"
|
||||
)
|
||||
96
services/production/app/api/production_orders_operations.py
Normal file
96
services/production/app/api/production_orders_operations.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# services/production/app/api/production_orders_operations.py
|
||||
"""
|
||||
Tenant Data Deletion Operations (Internal Service Only)
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import service_only_access
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.services.tenant_deletion import TenantDataDeletionResult
|
||||
from app.core.database import get_db
|
||||
from app.services.tenant_deletion_service import ProductionTenantDeletionService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-tenant-deletion"])
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_base_route("tenant/{tenant_id}", include_tenant_prefix=False),
|
||||
response_model=dict
|
||||
)
|
||||
@service_only_access
|
||||
async def delete_tenant_data(
|
||||
tenant_id: str = Path(..., description="Tenant ID to delete data for"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Delete all production data for a tenant (Internal service only)
|
||||
"""
|
||||
try:
|
||||
logger.info("production.tenant_deletion.api_called", tenant_id=tenant_id)
|
||||
|
||||
deletion_service = ProductionTenantDeletionService(db)
|
||||
result = await deletion_service.safe_delete_tenant_data(tenant_id)
|
||||
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Tenant data deletion failed: {', '.join(result.errors)}"
|
||||
)
|
||||
|
||||
return {
|
||||
"message": "Tenant data deletion completed successfully",
|
||||
"summary": result.to_dict()
|
||||
}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("production.tenant_deletion.api_error", tenant_id=tenant_id, error=str(e), exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to delete tenant data: {str(e)}")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("tenant/{tenant_id}/deletion-preview", include_tenant_prefix=False),
|
||||
response_model=dict
|
||||
)
|
||||
@service_only_access
|
||||
async def preview_tenant_data_deletion(
|
||||
tenant_id: str = Path(..., description="Tenant ID to preview deletion for"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Preview what data would be deleted for a tenant (dry-run)
|
||||
"""
|
||||
try:
|
||||
logger.info("production.tenant_deletion.preview_called", tenant_id=tenant_id)
|
||||
|
||||
deletion_service = ProductionTenantDeletionService(db)
|
||||
preview_data = await deletion_service.get_tenant_data_preview(tenant_id)
|
||||
result = TenantDataDeletionResult(tenant_id=tenant_id, service_name=deletion_service.service_name)
|
||||
result.deleted_counts = preview_data
|
||||
result.success = True
|
||||
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Tenant deletion preview failed: {', '.join(result.errors)}"
|
||||
)
|
||||
|
||||
return {
|
||||
"tenant_id": tenant_id,
|
||||
"service": "production-service",
|
||||
"data_counts": result.deleted_counts,
|
||||
"total_items": sum(result.deleted_counts.values())
|
||||
}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("production.tenant_deletion.preview_error", tenant_id=tenant_id, error=str(e), exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to preview tenant data deletion: {str(e)}")
|
||||
223
services/production/app/api/production_schedules.py
Normal file
223
services/production/app/api/production_schedules.py
Normal file
@@ -0,0 +1,223 @@
|
||||
# services/production/app/api/production_schedules.py
|
||||
"""
|
||||
Production Schedules API - ATOMIC CRUD operations on ProductionSchedule model
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
from typing import Optional
|
||||
from datetime import date, datetime, timedelta
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder
|
||||
from shared.security import create_audit_logger, AuditSeverity, AuditAction
|
||||
from app.core.database import get_db
|
||||
from app.services.production_service import ProductionService
|
||||
from app.models import AuditLog
|
||||
from app.schemas.production import (
|
||||
ProductionScheduleCreate,
|
||||
ProductionScheduleUpdate,
|
||||
ProductionScheduleResponse
|
||||
)
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["production-schedules"])
|
||||
|
||||
# Initialize audit logger with the production service's AuditLog model
|
||||
audit_logger = create_audit_logger("production-service", AuditLog)
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("schedules"),
|
||||
response_model=dict
|
||||
)
|
||||
async def get_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: Optional[date] = Query(None, description="Start date for schedule"),
|
||||
end_date: Optional[date] = Query(None, description="End date for schedule"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Get production schedule for a date range"""
|
||||
try:
|
||||
# Default to next 7 days if no dates provided
|
||||
if not start_date:
|
||||
start_date = datetime.now().date()
|
||||
if not end_date:
|
||||
end_date = start_date + timedelta(days=7)
|
||||
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
schedule_repo = ProductionScheduleRepository(db)
|
||||
|
||||
schedules = await schedule_repo.get_schedules_by_date_range(
|
||||
str(tenant_id), start_date, end_date
|
||||
)
|
||||
|
||||
schedule_data = {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat(),
|
||||
"schedules": [
|
||||
{
|
||||
"id": str(schedule.id),
|
||||
"date": schedule.schedule_date.isoformat(),
|
||||
"shift_start": schedule.shift_start.isoformat(),
|
||||
"shift_end": schedule.shift_end.isoformat(),
|
||||
"capacity_utilization": schedule.utilization_percentage,
|
||||
"batches_planned": schedule.total_batches_planned,
|
||||
"is_finalized": schedule.is_finalized
|
||||
}
|
||||
for schedule in schedules
|
||||
],
|
||||
"total_schedules": len(schedules)
|
||||
}
|
||||
|
||||
logger.info("Retrieved production schedule",
|
||||
tenant_id=str(tenant_id),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat(),
|
||||
schedules_count=len(schedules))
|
||||
|
||||
return schedule_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting production schedule",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get production schedule")
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_detail_route("schedules", "schedule_id"),
|
||||
response_model=ProductionScheduleResponse
|
||||
)
|
||||
async def get_production_schedule_details(
|
||||
tenant_id: UUID = Path(...),
|
||||
schedule_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Retrieve full schedule details including assignments"""
|
||||
try:
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
schedule_repo = ProductionScheduleRepository(db)
|
||||
|
||||
schedule = await schedule_repo.get(schedule_id)
|
||||
if not schedule or str(schedule.tenant_id) != str(tenant_id):
|
||||
raise HTTPException(status_code=404, detail="Production schedule not found")
|
||||
|
||||
logger.info("Retrieved production schedule details",
|
||||
schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionScheduleResponse.model_validate(schedule)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error getting production schedule details",
|
||||
error=str(e), schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get production schedule details")
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("schedules"),
|
||||
response_model=ProductionScheduleResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def create_production_schedule(
|
||||
schedule_data: ProductionScheduleCreate,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Generate or manually create a daily/shift schedule (Admin+ only)"""
|
||||
try:
|
||||
schedule = await production_service.create_production_schedule(tenant_id, schedule_data)
|
||||
|
||||
logger.info("Created production schedule",
|
||||
schedule_id=str(schedule.id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionScheduleResponse.model_validate(schedule)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid schedule data", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error creating production schedule",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create production schedule")
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_resource_detail_route("schedules", "schedule_id"),
|
||||
response_model=ProductionScheduleResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def update_production_schedule(
|
||||
schedule_update: ProductionScheduleUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
schedule_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""Edit schedule before finalizing (Admin+ only)"""
|
||||
try:
|
||||
schedule = await production_service.update_production_schedule(tenant_id, schedule_id, schedule_update)
|
||||
|
||||
logger.info("Updated production schedule",
|
||||
schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
|
||||
return ProductionScheduleResponse.model_validate(schedule)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Invalid schedule update", error=str(e), schedule_id=str(schedule_id))
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Error updating production schedule",
|
||||
error=str(e), schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update production schedule")
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_resource_detail_route("schedules", "schedule_id")
|
||||
)
|
||||
async def delete_production_schedule(
|
||||
tenant_id: UUID = Path(...),
|
||||
schedule_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db=Depends(get_db)
|
||||
):
|
||||
"""Delete a production schedule (if not finalized)"""
|
||||
try:
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
schedule_repo = ProductionScheduleRepository(db)
|
||||
|
||||
schedule = await schedule_repo.get(schedule_id)
|
||||
if not schedule or str(schedule.tenant_id) != str(tenant_id):
|
||||
raise HTTPException(status_code=404, detail="Production schedule not found")
|
||||
|
||||
if schedule.is_finalized:
|
||||
raise HTTPException(status_code=400, detail="Cannot delete finalized schedule")
|
||||
|
||||
await schedule_repo.delete(schedule_id)
|
||||
|
||||
logger.info("Deleted production schedule",
|
||||
schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {"message": "Production schedule deleted successfully"}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error deleting production schedule",
|
||||
error=str(e), schedule_id=str(schedule_id), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to delete production schedule")
|
||||
441
services/production/app/api/quality_templates.py
Normal file
441
services/production/app/api/quality_templates.py
Normal file
@@ -0,0 +1,441 @@
|
||||
# services/production/app/api/quality_templates.py
|
||||
"""
|
||||
Quality Check Templates API - CRUD operations on quality check templates
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, status
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder, RouteCategory
|
||||
from app.core.database import get_db
|
||||
from app.services.quality_template_service import QualityTemplateService
|
||||
from app.models.production import ProcessStage, QualityCheckTemplate
|
||||
from app.schemas.quality_templates import (
|
||||
QualityCheckTemplateCreate,
|
||||
QualityCheckTemplateUpdate,
|
||||
QualityCheckTemplateResponse,
|
||||
QualityCheckTemplateList,
|
||||
QualityCheckType
|
||||
)
|
||||
|
||||
logger = structlog.get_logger()
|
||||
route_builder = RouteBuilder('production')
|
||||
router = APIRouter(tags=["quality-templates"])
|
||||
|
||||
|
||||
# ===== Quality Template CRUD Endpoints =====
|
||||
|
||||
@router.get(
|
||||
route_builder.build_base_route("quality-templates"),
|
||||
response_model=QualityCheckTemplateList
|
||||
)
|
||||
async def list_quality_templates(
|
||||
tenant_id: UUID = Path(...),
|
||||
stage: Optional[ProcessStage] = Query(None, description="Filter by process stage"),
|
||||
check_type: Optional[QualityCheckType] = Query(None, description="Filter by check type"),
|
||||
is_active: Optional[bool] = Query(True, description="Filter by active status"),
|
||||
skip: int = Query(0, ge=0, description="Number of templates to skip"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Number of templates to return"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
List quality check templates with filtering and pagination
|
||||
|
||||
Filters:
|
||||
- stage: Filter by applicable process stage
|
||||
- check_type: Filter by type of quality check
|
||||
- is_active: Filter by active status (default: True)
|
||||
"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
templates, total = await service.get_templates(
|
||||
tenant_id=str(tenant_id),
|
||||
stage=stage,
|
||||
check_type=check_type.value if check_type else None,
|
||||
is_active=is_active,
|
||||
skip=skip,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
logger.info("Retrieved quality templates",
|
||||
tenant_id=str(tenant_id),
|
||||
total=total,
|
||||
filters={"stage": stage, "check_type": check_type, "is_active": is_active})
|
||||
|
||||
return QualityCheckTemplateList(
|
||||
templates=[QualityCheckTemplateResponse.from_orm(t) for t in templates],
|
||||
total=total,
|
||||
skip=skip,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error listing quality templates",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve quality templates"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_base_route("quality-templates"),
|
||||
response_model=QualityCheckTemplateResponse,
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def create_quality_template(
|
||||
template_data: QualityCheckTemplateCreate,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Create a new quality check template"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Add created_by from current user
|
||||
template_dict = template_data.dict()
|
||||
template_dict['created_by'] = UUID(current_user["user_id"])
|
||||
template_create = QualityCheckTemplateCreate(**template_dict)
|
||||
|
||||
# Create template via service (handles validation and business rules)
|
||||
template = await service.create_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_data=template_create
|
||||
)
|
||||
|
||||
# Commit the transaction to persist changes
|
||||
await db.commit()
|
||||
|
||||
logger.info("Created quality template",
|
||||
template_id=str(template.id),
|
||||
template_name=template.name,
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(template)
|
||||
|
||||
except ValueError as e:
|
||||
# Business rule validation errors
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Error creating quality template",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to create quality template"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
route_builder.build_resource_detail_route("quality-templates", "template_id"),
|
||||
response_model=QualityCheckTemplateResponse
|
||||
)
|
||||
async def get_quality_template(
|
||||
tenant_id: UUID = Path(...),
|
||||
template_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Get a specific quality check template"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
template = await service.get_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
)
|
||||
|
||||
if not template:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(template)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error getting quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve quality template"
|
||||
)
|
||||
|
||||
|
||||
@router.put(
|
||||
route_builder.build_resource_detail_route("quality-templates", "template_id"),
|
||||
response_model=QualityCheckTemplateResponse
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def update_quality_template(
|
||||
template_data: QualityCheckTemplateUpdate,
|
||||
tenant_id: UUID = Path(...),
|
||||
template_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Update a quality check template"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Update template via service (handles validation and business rules)
|
||||
template = await service.update_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id,
|
||||
template_data=template_data
|
||||
)
|
||||
|
||||
if not template:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
# Commit the transaction to persist changes
|
||||
await db.commit()
|
||||
|
||||
logger.info("Updated quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(template)
|
||||
|
||||
except ValueError as e:
|
||||
# Business rule validation errors
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e)
|
||||
)
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error updating quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to update quality template"
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
route_builder.build_resource_detail_route("quality-templates", "template_id"),
|
||||
status_code=status.HTTP_204_NO_CONTENT
|
||||
)
|
||||
@require_user_role(['admin', 'owner'])
|
||||
async def delete_quality_template(
|
||||
tenant_id: UUID = Path(...),
|
||||
template_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Delete a quality check template
|
||||
|
||||
Note: Service layer determines whether to use soft or hard delete
|
||||
based on business rules (checking dependencies, etc.)
|
||||
"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Delete template via service (handles business rules)
|
||||
success = await service.delete_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
# Commit the transaction to persist changes
|
||||
await db.commit()
|
||||
|
||||
logger.info("Deleted quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error("Error deleting quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to delete quality template"
|
||||
)
|
||||
|
||||
|
||||
# ===== Additional Quality Template Operations =====
|
||||
|
||||
@router.get(
|
||||
route_builder.build_custom_route(
|
||||
RouteCategory.BASE,
|
||||
["quality-templates", "stages", "{stage}"]
|
||||
),
|
||||
response_model=QualityCheckTemplateList
|
||||
)
|
||||
async def get_templates_for_stage(
|
||||
tenant_id: UUID = Path(...),
|
||||
stage: ProcessStage = Path(...),
|
||||
is_active: bool = Query(True, description="Filter by active status"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Get all quality templates applicable to a specific process stage"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
templates = await service.get_templates_for_stage(
|
||||
tenant_id=str(tenant_id),
|
||||
stage=stage,
|
||||
is_active=is_active
|
||||
)
|
||||
|
||||
logger.info("Retrieved templates for stage",
|
||||
tenant_id=str(tenant_id),
|
||||
stage=stage,
|
||||
count=len(templates))
|
||||
|
||||
return QualityCheckTemplateList(
|
||||
templates=[QualityCheckTemplateResponse.from_orm(t) for t in templates],
|
||||
total=len(templates),
|
||||
skip=0,
|
||||
limit=len(templates)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting templates for stage",
|
||||
error=str(e),
|
||||
stage=stage,
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve templates for stage"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_resource_action_route("quality-templates", "template_id", "duplicate"),
|
||||
response_model=QualityCheckTemplateResponse,
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def duplicate_quality_template(
|
||||
tenant_id: UUID = Path(...),
|
||||
template_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""Duplicate an existing quality check template"""
|
||||
try:
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Duplicate template via service (handles business rules)
|
||||
duplicate = await service.duplicate_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
)
|
||||
|
||||
if not duplicate:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
logger.info("Duplicated quality template",
|
||||
original_id=str(template_id),
|
||||
duplicate_id=str(duplicate.id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(duplicate)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error duplicating quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to duplicate quality template"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
route_builder.build_operations_route("quality-templates/validate"),
|
||||
response_model=dict
|
||||
)
|
||||
@require_user_role(['admin', 'owner', 'member'])
|
||||
async def validate_quality_template(
|
||||
template_data: dict,
|
||||
tenant_id: UUID = Path(...),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
):
|
||||
"""
|
||||
Validate quality template configuration without creating it
|
||||
|
||||
Returns validation result with any errors found
|
||||
"""
|
||||
try:
|
||||
errors = []
|
||||
|
||||
# Basic validation
|
||||
if not template_data.get('name'):
|
||||
errors.append("Template name is required")
|
||||
|
||||
if not template_data.get('check_type'):
|
||||
errors.append("Check type is required")
|
||||
|
||||
# Validate measurement fields
|
||||
check_type = template_data.get('check_type')
|
||||
if check_type in ['measurement', 'temperature', 'weight']:
|
||||
if template_data.get('min_value') is not None and template_data.get('max_value') is not None:
|
||||
if template_data['min_value'] >= template_data['max_value']:
|
||||
errors.append("Minimum value must be less than maximum value")
|
||||
|
||||
# Validate weight
|
||||
weight = template_data.get('weight', 1.0)
|
||||
if weight < 0 or weight > 10:
|
||||
errors.append("Weight must be between 0 and 10")
|
||||
|
||||
is_valid = len(errors) == 0
|
||||
|
||||
logger.info("Validated quality template",
|
||||
tenant_id=str(tenant_id),
|
||||
valid=is_valid,
|
||||
error_count=len(errors))
|
||||
|
||||
return {
|
||||
"valid": is_valid,
|
||||
"errors": errors
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error validating quality template",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
return {
|
||||
"valid": False,
|
||||
"errors": [f"Validation error: {str(e)}"]
|
||||
}
|
||||
293
services/production/app/api/sustainability.py
Normal file
293
services/production/app/api/sustainability.py
Normal file
@@ -0,0 +1,293 @@
|
||||
"""
|
||||
Production Service - Sustainability API
|
||||
Exposes production-specific sustainability metrics following microservices principles
|
||||
Each service owns its domain data
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
from fastapi import APIRouter, Depends, Path, Query, Request
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from app.services.production_service import ProductionService
|
||||
from shared.routing import RouteBuilder
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Create route builder for consistent URL structure
|
||||
route_builder = RouteBuilder('production')
|
||||
|
||||
router = APIRouter(tags=["production-sustainability"])
|
||||
|
||||
|
||||
def get_production_service(request: Request) -> ProductionService:
|
||||
"""Dependency injection for production service"""
|
||||
from app.core.database import database_manager
|
||||
from app.core.config import settings
|
||||
notification_service = getattr(request.app.state, 'notification_service', None)
|
||||
return ProductionService(database_manager, settings, notification_service)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/sustainability/waste-metrics",
|
||||
response_model=dict,
|
||||
summary="Get production waste metrics",
|
||||
description="""
|
||||
Returns production-specific waste metrics for sustainability tracking.
|
||||
|
||||
This endpoint is part of the microservices architecture where each service
|
||||
owns its domain data. Frontend aggregates data from multiple services.
|
||||
|
||||
Metrics include:
|
||||
- Total production waste from batches (waste_quantity + defect_quantity)
|
||||
- Production volumes (planned vs actual)
|
||||
- Waste breakdown by defect type
|
||||
- AI-assisted batch tracking
|
||||
"""
|
||||
)
|
||||
async def get_production_waste_metrics(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Start date for metrics (default: 30 days ago)"),
|
||||
end_date: Optional[datetime] = Query(None, description="End date for metrics (default: now)"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get production waste metrics for sustainability dashboard
|
||||
|
||||
Returns production-specific metrics that frontend will aggregate with
|
||||
inventory metrics for complete sustainability picture.
|
||||
"""
|
||||
try:
|
||||
# Set default dates
|
||||
if not end_date:
|
||||
end_date = datetime.now()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Get waste analytics from production service
|
||||
waste_data = await production_service.get_waste_analytics(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
)
|
||||
|
||||
# Enrich with metadata
|
||||
response = {
|
||||
**waste_data,
|
||||
"service": "production",
|
||||
"period": {
|
||||
"start_date": start_date.isoformat(),
|
||||
"end_date": end_date.isoformat(),
|
||||
"days": (end_date - start_date).days
|
||||
},
|
||||
"metadata": {
|
||||
"data_source": "production_batches",
|
||||
"calculation_method": "SUM(waste_quantity + defect_quantity)",
|
||||
"filters_applied": {
|
||||
"status": ["COMPLETED", "QUALITY_CHECK"],
|
||||
"date_range": f"{start_date.date()} to {end_date.date()}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Production waste metrics retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
total_waste_kg=waste_data.get('total_production_waste', 0),
|
||||
period_days=(end_date - start_date).days,
|
||||
user_id=current_user.get('user_id')
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting production waste metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/sustainability/baseline",
|
||||
response_model=dict,
|
||||
summary="Get production baseline metrics",
|
||||
description="""
|
||||
Returns baseline production metrics from the first 90 days of operation.
|
||||
|
||||
Used by frontend to calculate SDG 12.3 compliance (waste reduction targets).
|
||||
If tenant has less than 90 days of data, returns industry average baseline.
|
||||
"""
|
||||
)
|
||||
async def get_production_baseline(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get baseline production metrics for SDG compliance calculations
|
||||
|
||||
Frontend uses this to calculate:
|
||||
- Waste reduction percentage vs baseline
|
||||
- Progress toward SDG 12.3 targets
|
||||
- Grant eligibility based on improvement
|
||||
"""
|
||||
try:
|
||||
baseline_data = await production_service.get_baseline_metrics(tenant_id)
|
||||
|
||||
# Add metadata
|
||||
response = {
|
||||
**baseline_data,
|
||||
"service": "production",
|
||||
"metadata": {
|
||||
"baseline_period_days": 90,
|
||||
"calculation_method": "First 90 days of production data",
|
||||
"fallback": "Industry average (25%) if insufficient data"
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Production baseline metrics retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
has_baseline=baseline_data.get('has_baseline', False),
|
||||
baseline_waste_pct=baseline_data.get('waste_percentage'),
|
||||
user_id=current_user.get('user_id')
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting production baseline",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/sustainability/ai-impact",
|
||||
response_model=dict,
|
||||
summary="Get AI waste reduction impact",
|
||||
description="""
|
||||
Analyzes the impact of AI-assisted production on waste reduction.
|
||||
|
||||
Compares waste rates between:
|
||||
- AI-assisted batches (with is_ai_assisted=true)
|
||||
- Manual batches (is_ai_assisted=false)
|
||||
|
||||
Shows ROI of AI features for sustainability.
|
||||
"""
|
||||
)
|
||||
async def get_ai_waste_impact(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
start_date: Optional[datetime] = Query(None, description="Start date (default: 30 days ago)"),
|
||||
end_date: Optional[datetime] = Query(None, description="End date (default: now)"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get AI impact on waste reduction
|
||||
|
||||
Frontend uses this to showcase:
|
||||
- Value proposition of AI features
|
||||
- Waste avoided through AI assistance
|
||||
- Financial ROI of AI investment
|
||||
"""
|
||||
try:
|
||||
# Set default dates
|
||||
if not end_date:
|
||||
end_date = datetime.now()
|
||||
if not start_date:
|
||||
start_date = end_date - timedelta(days=30)
|
||||
|
||||
# Get AI impact analytics (we'll implement this)
|
||||
ai_impact = await production_service.get_ai_waste_impact(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"AI waste impact retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
ai_waste_reduction_pct=ai_impact.get('waste_reduction_percentage'),
|
||||
user_id=current_user.get('user_id')
|
||||
)
|
||||
|
||||
return ai_impact
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting AI waste impact",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/sustainability/summary",
|
||||
response_model=dict,
|
||||
summary="Get production sustainability summary",
|
||||
description="""
|
||||
Quick summary endpoint combining all production sustainability metrics.
|
||||
|
||||
Useful for dashboard widgets that need overview data without multiple calls.
|
||||
"""
|
||||
)
|
||||
async def get_production_sustainability_summary(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(30, ge=7, le=365, description="Number of days to analyze"),
|
||||
current_user: dict = Depends(get_current_user_dep),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get comprehensive production sustainability summary
|
||||
|
||||
Combines waste metrics, baseline, and AI impact in one response.
|
||||
Optimized for dashboard widgets.
|
||||
"""
|
||||
try:
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
# Get all metrics in parallel (within service)
|
||||
waste_data = await production_service.get_waste_analytics(tenant_id, start_date, end_date)
|
||||
baseline_data = await production_service.get_baseline_metrics(tenant_id)
|
||||
|
||||
# Try to get AI impact (may not be available for all tenants)
|
||||
try:
|
||||
ai_impact = await production_service.get_ai_waste_impact(tenant_id, start_date, end_date)
|
||||
except:
|
||||
ai_impact = {"available": False}
|
||||
|
||||
summary = {
|
||||
"service": "production",
|
||||
"period_days": days,
|
||||
"waste_metrics": waste_data,
|
||||
"baseline": baseline_data,
|
||||
"ai_impact": ai_impact,
|
||||
"last_updated": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Production sustainability summary retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
period_days=days,
|
||||
user_id=current_user.get('user_id')
|
||||
)
|
||||
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting production sustainability summary",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
6
services/production/app/core/__init__.py
Normal file
6
services/production/app/core/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
# ================================================================
|
||||
# services/production/app/core/__init__.py
|
||||
# ================================================================
|
||||
"""
|
||||
Core configuration and database setup
|
||||
"""
|
||||
101
services/production/app/core/config.py
Normal file
101
services/production/app/core/config.py
Normal file
@@ -0,0 +1,101 @@
|
||||
# ================================================================
|
||||
# PRODUCTION SERVICE CONFIGURATION
|
||||
# services/production/app/core/config.py
|
||||
# ================================================================
|
||||
|
||||
"""
|
||||
Production service configuration
|
||||
Production planning and batch management
|
||||
"""
|
||||
|
||||
from shared.config.base import BaseServiceSettings
|
||||
import os
|
||||
|
||||
class ProductionSettings(BaseServiceSettings):
|
||||
"""Production service specific settings"""
|
||||
|
||||
# Service Identity
|
||||
APP_NAME: str = "Production Service"
|
||||
SERVICE_NAME: str = "production-service"
|
||||
VERSION: str = "1.0.0"
|
||||
DESCRIPTION: str = "Production planning and batch management"
|
||||
|
||||
# Database configuration (secure approach - build from components)
|
||||
@property
|
||||
def DATABASE_URL(self) -> str:
|
||||
"""Build database URL from secure components"""
|
||||
# Try complete URL first (for backward compatibility)
|
||||
complete_url = os.getenv("PRODUCTION_DATABASE_URL")
|
||||
if complete_url:
|
||||
return complete_url
|
||||
|
||||
# Build from components (secure approach)
|
||||
user = os.getenv("PRODUCTION_DB_USER", "production_user")
|
||||
password = os.getenv("PRODUCTION_DB_PASSWORD", "production_pass123")
|
||||
host = os.getenv("PRODUCTION_DB_HOST", "localhost")
|
||||
port = os.getenv("PRODUCTION_DB_PORT", "5432")
|
||||
name = os.getenv("PRODUCTION_DB_NAME", "production_db")
|
||||
|
||||
return f"postgresql+asyncpg://{user}:{password}@{host}:{port}/{name}"
|
||||
|
||||
# Redis Database (for production queues and caching)
|
||||
REDIS_DB: int = 3
|
||||
|
||||
# Service URLs for communication
|
||||
GATEWAY_URL: str = os.getenv("GATEWAY_URL", "http://gateway-service:8000")
|
||||
ORDERS_SERVICE_URL: str = os.getenv("ORDERS_SERVICE_URL", "http://orders-service:8000")
|
||||
INVENTORY_SERVICE_URL: str = os.getenv("INVENTORY_SERVICE_URL", "http://inventory-service:8000")
|
||||
RECIPES_SERVICE_URL: str = os.getenv("RECIPES_SERVICE_URL", "http://recipes-service:8000")
|
||||
SALES_SERVICE_URL: str = os.getenv("SALES_SERVICE_URL", "http://sales-service:8000")
|
||||
FORECASTING_SERVICE_URL: str = os.getenv("FORECASTING_SERVICE_URL", "http://forecasting-service:8000")
|
||||
|
||||
# Production Planning Configuration
|
||||
PLANNING_HORIZON_DAYS: int = int(os.getenv("PLANNING_HORIZON_DAYS", "7"))
|
||||
MINIMUM_BATCH_SIZE: float = float(os.getenv("MINIMUM_BATCH_SIZE", "1.0"))
|
||||
MAXIMUM_BATCH_SIZE: float = float(os.getenv("MAXIMUM_BATCH_SIZE", "100.0"))
|
||||
PRODUCTION_BUFFER_PERCENTAGE: float = float(os.getenv("PRODUCTION_BUFFER_PERCENTAGE", "10.0"))
|
||||
|
||||
# Capacity Management
|
||||
DEFAULT_WORKING_HOURS_PER_DAY: int = int(os.getenv("DEFAULT_WORKING_HOURS_PER_DAY", "12"))
|
||||
MAX_OVERTIME_HOURS: int = int(os.getenv("MAX_OVERTIME_HOURS", "4"))
|
||||
CAPACITY_UTILIZATION_TARGET: float = float(os.getenv("CAPACITY_UTILIZATION_TARGET", "0.85"))
|
||||
CAPACITY_WARNING_THRESHOLD: float = float(os.getenv("CAPACITY_WARNING_THRESHOLD", "0.95"))
|
||||
|
||||
# Quality Control
|
||||
QUALITY_CHECK_ENABLED: bool = os.getenv("QUALITY_CHECK_ENABLED", "true").lower() == "true"
|
||||
MINIMUM_YIELD_PERCENTAGE: float = float(os.getenv("MINIMUM_YIELD_PERCENTAGE", "85.0"))
|
||||
QUALITY_SCORE_THRESHOLD: float = float(os.getenv("QUALITY_SCORE_THRESHOLD", "8.0"))
|
||||
|
||||
# Batch Management
|
||||
BATCH_AUTO_NUMBERING: bool = os.getenv("BATCH_AUTO_NUMBERING", "true").lower() == "true"
|
||||
BATCH_NUMBER_PREFIX: str = os.getenv("BATCH_NUMBER_PREFIX", "PROD")
|
||||
BATCH_TRACKING_ENABLED: bool = os.getenv("BATCH_TRACKING_ENABLED", "true").lower() == "true"
|
||||
|
||||
# Production Scheduling
|
||||
SCHEDULE_OPTIMIZATION_ENABLED: bool = os.getenv("SCHEDULE_OPTIMIZATION_ENABLED", "true").lower() == "true"
|
||||
PREP_TIME_BUFFER_MINUTES: int = int(os.getenv("PREP_TIME_BUFFER_MINUTES", "30"))
|
||||
CLEANUP_TIME_BUFFER_MINUTES: int = int(os.getenv("CLEANUP_TIME_BUFFER_MINUTES", "15"))
|
||||
|
||||
# Business Rules for Bakery Operations
|
||||
BUSINESS_HOUR_START: int = 6 # 6 AM - early start for fresh bread
|
||||
BUSINESS_HOUR_END: int = 22 # 10 PM
|
||||
PEAK_PRODUCTION_HOURS_START: int = 4 # 4 AM
|
||||
PEAK_PRODUCTION_HOURS_END: int = 10 # 10 AM
|
||||
|
||||
# Weekend and Holiday Adjustments
|
||||
WEEKEND_PRODUCTION_FACTOR: float = float(os.getenv("WEEKEND_PRODUCTION_FACTOR", "0.7"))
|
||||
HOLIDAY_PRODUCTION_FACTOR: float = float(os.getenv("HOLIDAY_PRODUCTION_FACTOR", "0.3"))
|
||||
SPECIAL_EVENT_PRODUCTION_FACTOR: float = float(os.getenv("SPECIAL_EVENT_PRODUCTION_FACTOR", "1.5"))
|
||||
|
||||
|
||||
# Cost Management
|
||||
COST_TRACKING_ENABLED: bool = os.getenv("COST_TRACKING_ENABLED", "true").lower() == "true"
|
||||
LABOR_COST_PER_HOUR: float = float(os.getenv("LABOR_COST_PER_HOUR", "15.0"))
|
||||
OVERHEAD_COST_PERCENTAGE: float = float(os.getenv("OVERHEAD_COST_PERCENTAGE", "20.0"))
|
||||
|
||||
# Integration Settings
|
||||
INVENTORY_INTEGRATION_ENABLED: bool = os.getenv("INVENTORY_INTEGRATION_ENABLED", "true").lower() == "true"
|
||||
AUTOMATIC_INGREDIENT_RESERVATION: bool = os.getenv("AUTOMATIC_INGREDIENT_RESERVATION", "true").lower() == "true"
|
||||
REAL_TIME_INVENTORY_UPDATES: bool = os.getenv("REAL_TIME_INVENTORY_UPDATES", "true").lower() == "true"
|
||||
|
||||
settings = ProductionSettings()
|
||||
51
services/production/app/core/database.py
Normal file
51
services/production/app/core/database.py
Normal file
@@ -0,0 +1,51 @@
|
||||
# ================================================================
|
||||
# services/production/app/core/database.py
|
||||
# ================================================================
|
||||
"""
|
||||
Database configuration for production service
|
||||
"""
|
||||
|
||||
import structlog
|
||||
from shared.database import DatabaseManager, create_database_manager
|
||||
from shared.database.base import Base
|
||||
from shared.database.transactions import TransactionManager
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Create database manager following shared pattern
|
||||
database_manager = create_database_manager(
|
||||
settings.DATABASE_URL,
|
||||
settings.SERVICE_NAME
|
||||
)
|
||||
|
||||
# Transaction manager for the service
|
||||
transaction_manager = TransactionManager(database_manager)
|
||||
|
||||
# Use exactly the same pattern as training/forecasting services
|
||||
async def get_db():
|
||||
"""Database dependency"""
|
||||
async with database_manager.get_session() as db:
|
||||
yield db
|
||||
|
||||
def get_db_transaction():
|
||||
"""Get database transaction manager"""
|
||||
return database_manager.get_transaction()
|
||||
|
||||
async def get_db_health():
|
||||
"""Check database health"""
|
||||
try:
|
||||
health_status = await database_manager.health_check()
|
||||
return health_status.get("healthy", False)
|
||||
except Exception as e:
|
||||
logger.error(f"Database health check failed: {e}")
|
||||
return False
|
||||
|
||||
async def init_database():
|
||||
"""Initialize database tables"""
|
||||
try:
|
||||
await database_manager.create_tables()
|
||||
logger.info("Production service database initialized successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize database: {e}")
|
||||
raise
|
||||
236
services/production/app/main.py
Normal file
236
services/production/app/main.py
Normal file
@@ -0,0 +1,236 @@
|
||||
# ================================================================
|
||||
# services/production/app/main.py
|
||||
# ================================================================
|
||||
"""
|
||||
Production Service - FastAPI Application
|
||||
Production planning and batch management service
|
||||
"""
|
||||
|
||||
import time
|
||||
from fastapi import FastAPI, Request
|
||||
from sqlalchemy import text
|
||||
from app.core.config import settings
|
||||
from app.core.database import database_manager
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
from app.services.production_scheduler import ProductionScheduler
|
||||
from app.services.production_notification_service import ProductionNotificationService
|
||||
from shared.service_base import StandardFastAPIService
|
||||
|
||||
# Import standardized routers
|
||||
from app.api import (
|
||||
internal_demo,
|
||||
production_batches,
|
||||
production_schedules,
|
||||
production_operations,
|
||||
production_dashboard,
|
||||
analytics,
|
||||
quality_templates,
|
||||
equipment,
|
||||
orchestrator, # NEW: Orchestrator integration endpoint
|
||||
production_orders_operations, # Tenant deletion endpoints
|
||||
audit,
|
||||
ml_insights, # ML insights endpoint
|
||||
batch,
|
||||
sustainability # Sustainability metrics endpoints
|
||||
)
|
||||
from app.api.internal_alert_trigger import router as internal_alert_trigger_router
|
||||
|
||||
|
||||
class ProductionService(StandardFastAPIService):
|
||||
"""Production Service with standardized setup"""
|
||||
|
||||
expected_migration_version = "001_initial_schema"
|
||||
|
||||
async def on_startup(self, app):
|
||||
"""Custom startup logic including migration verification"""
|
||||
await self.verify_migrations()
|
||||
await super().on_startup(app)
|
||||
|
||||
async def verify_migrations(self):
|
||||
"""Verify database schema matches the latest migrations."""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
result = await session.execute(text("SELECT version_num FROM alembic_version"))
|
||||
version = result.scalar()
|
||||
if version != self.expected_migration_version:
|
||||
self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}")
|
||||
raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}")
|
||||
self.logger.info(f"Migration verification successful: {version}")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Migration verification failed: {e}")
|
||||
raise
|
||||
|
||||
def __init__(self):
|
||||
# Define expected database tables for health checks
|
||||
production_expected_tables = [
|
||||
'production_batches', 'production_schedules', 'production_capacity',
|
||||
'quality_check_templates', 'quality_checks', 'equipment'
|
||||
]
|
||||
|
||||
self.alert_service = None
|
||||
self.notification_service = None
|
||||
self.rabbitmq_client = None
|
||||
self.event_publisher = None
|
||||
# REMOVED: scheduler_service (replaced by Orchestrator Service)
|
||||
|
||||
# Create custom checks for services
|
||||
async def check_alert_service():
|
||||
"""Check production alert service health"""
|
||||
try:
|
||||
return bool(self.alert_service) if self.alert_service else False
|
||||
except Exception as e:
|
||||
self.logger.error("Alert service health check failed", error=str(e))
|
||||
return False
|
||||
|
||||
super().__init__(
|
||||
service_name=settings.SERVICE_NAME,
|
||||
app_name=settings.APP_NAME,
|
||||
description=settings.DESCRIPTION,
|
||||
version=settings.VERSION,
|
||||
api_prefix="", # Empty because RouteBuilder already includes /api/v1
|
||||
database_manager=database_manager,
|
||||
expected_tables=production_expected_tables,
|
||||
custom_health_checks={
|
||||
"alert_service": check_alert_service
|
||||
},
|
||||
enable_messaging=True # Enable messaging support
|
||||
)
|
||||
|
||||
async def _setup_messaging(self):
|
||||
"""Setup messaging for production service using unified messaging"""
|
||||
from shared.messaging import UnifiedEventPublisher, RabbitMQClient
|
||||
try:
|
||||
self.rabbitmq_client = RabbitMQClient(settings.RABBITMQ_URL, service_name="production-service")
|
||||
await self.rabbitmq_client.connect()
|
||||
# Create unified event publisher
|
||||
self.event_publisher = UnifiedEventPublisher(self.rabbitmq_client, "production-service")
|
||||
self.logger.info("Production service unified messaging setup completed")
|
||||
except Exception as e:
|
||||
self.logger.error("Failed to setup production unified messaging", error=str(e))
|
||||
raise
|
||||
|
||||
async def _cleanup_messaging(self):
|
||||
"""Cleanup messaging for production service"""
|
||||
try:
|
||||
if self.rabbitmq_client:
|
||||
await self.rabbitmq_client.disconnect()
|
||||
self.logger.info("Production service messaging cleanup completed")
|
||||
except Exception as e:
|
||||
self.logger.error("Error during production messaging cleanup", error=str(e))
|
||||
|
||||
async def on_startup(self, app: FastAPI):
|
||||
"""Custom startup logic for production service"""
|
||||
# Initialize messaging
|
||||
await self._setup_messaging()
|
||||
|
||||
# Initialize alert service with EventPublisher and database manager
|
||||
self.alert_service = ProductionAlertService(self.event_publisher, self.database_manager)
|
||||
await self.alert_service.start()
|
||||
self.logger.info("Production alert service started")
|
||||
|
||||
# Initialize notification service with EventPublisher
|
||||
self.notification_service = ProductionNotificationService(self.event_publisher)
|
||||
self.logger.info("Production notification service initialized")
|
||||
|
||||
# Initialize production scheduler with alert service and database manager
|
||||
self.production_scheduler = ProductionScheduler(self.alert_service, self.database_manager)
|
||||
await self.production_scheduler.start()
|
||||
self.logger.info("Production scheduler started")
|
||||
|
||||
# Store services in app state
|
||||
app.state.alert_service = self.alert_service
|
||||
app.state.production_alert_service = self.alert_service # Also store with this name for internal trigger
|
||||
app.state.notification_service = self.notification_service # Notification service for state change events
|
||||
app.state.production_scheduler = self.production_scheduler # Store scheduler for manual triggering
|
||||
app.state.event_publisher = self.event_publisher # Store event publisher for ML insights
|
||||
|
||||
async def on_shutdown(self, app: FastAPI):
|
||||
"""Custom shutdown logic for production service"""
|
||||
# Stop production scheduler
|
||||
if hasattr(self, 'production_scheduler') and self.production_scheduler:
|
||||
await self.production_scheduler.stop()
|
||||
self.logger.info("Production scheduler stopped")
|
||||
|
||||
# Stop alert service
|
||||
if self.alert_service:
|
||||
await self.alert_service.stop()
|
||||
self.logger.info("Alert service stopped")
|
||||
|
||||
# Cleanup messaging
|
||||
await self._cleanup_messaging()
|
||||
|
||||
def get_service_features(self):
|
||||
"""Return production-specific features"""
|
||||
return [
|
||||
"production_planning",
|
||||
"batch_management",
|
||||
"production_scheduling",
|
||||
"orchestrator_integration", # NEW: Orchestrator-driven scheduling
|
||||
"quality_control",
|
||||
"equipment_management",
|
||||
"capacity_planning",
|
||||
"alert_notifications"
|
||||
]
|
||||
|
||||
def setup_custom_middleware(self):
|
||||
"""Setup custom middleware for production service"""
|
||||
@self.app.middleware("http")
|
||||
async def logging_middleware(request: Request, call_next):
|
||||
"""Add request logging middleware"""
|
||||
start_time = time.time()
|
||||
response = await call_next(request)
|
||||
process_time = time.time() - start_time
|
||||
|
||||
self.logger.info("HTTP request processed",
|
||||
method=request.method,
|
||||
url=str(request.url),
|
||||
status_code=response.status_code,
|
||||
process_time=round(process_time, 4))
|
||||
|
||||
return response
|
||||
|
||||
|
||||
# Create service instance
|
||||
service = ProductionService()
|
||||
|
||||
# Create FastAPI app with standardized setup
|
||||
app = service.create_app()
|
||||
|
||||
# Setup standard endpoints
|
||||
service.setup_standard_endpoints()
|
||||
|
||||
# Setup custom middleware
|
||||
service.setup_custom_middleware()
|
||||
|
||||
# Include standardized routers
|
||||
# NOTE: Register more specific routes before generic parameterized routes
|
||||
# IMPORTANT: Register audit router FIRST to avoid route matching conflicts
|
||||
service.add_router(audit.router)
|
||||
service.add_router(batch.router)
|
||||
service.add_router(orchestrator.router) # NEW: Orchestrator integration endpoint
|
||||
service.add_router(production_orders_operations.router) # Tenant deletion endpoints
|
||||
service.add_router(quality_templates.router) # Register first to avoid route conflicts
|
||||
service.add_router(equipment.router)
|
||||
service.add_router(production_batches.router)
|
||||
service.add_router(production_schedules.router)
|
||||
service.add_router(production_operations.router)
|
||||
service.add_router(production_dashboard.router)
|
||||
service.add_router(analytics.router)
|
||||
service.add_router(sustainability.router) # Sustainability metrics endpoints
|
||||
service.add_router(internal_demo.router, tags=["internal-demo"])
|
||||
service.add_router(ml_insights.router) # ML insights endpoint
|
||||
service.add_router(ml_insights.internal_router) # Internal ML insights endpoint for demo cloning
|
||||
service.add_router(internal_alert_trigger_router) # Internal alert trigger for demo cloning
|
||||
|
||||
# REMOVED: test_production_scheduler endpoint
|
||||
# Production scheduling is now triggered by the Orchestrator Service
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(
|
||||
"main:app",
|
||||
host="0.0.0.0",
|
||||
port=8000,
|
||||
reload=settings.DEBUG
|
||||
)
|
||||
516
services/production/app/ml/yield_insights_orchestrator.py
Normal file
516
services/production/app/ml/yield_insights_orchestrator.py
Normal file
@@ -0,0 +1,516 @@
|
||||
"""
|
||||
Yield Insights Orchestrator
|
||||
Coordinates yield prediction and insight posting
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
from typing import Dict, List, Any, Optional
|
||||
import structlog
|
||||
from datetime import datetime
|
||||
from uuid import UUID
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add shared clients to path
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
|
||||
from shared.clients.ai_insights_client import AIInsightsClient
|
||||
from shared.messaging import UnifiedEventPublisher
|
||||
|
||||
from app.ml.yield_predictor import YieldPredictor
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class YieldInsightsOrchestrator:
|
||||
"""
|
||||
Orchestrates yield prediction and insight generation workflow.
|
||||
|
||||
Workflow:
|
||||
1. Predict yield for upcoming production run or analyze historical performance
|
||||
2. Generate insights for yield optimization opportunities
|
||||
3. Post insights to AI Insights Service
|
||||
4. Publish recommendation events to RabbitMQ
|
||||
5. Provide yield predictions for production planning
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_insights_base_url: str = "http://ai-insights-service:8000",
|
||||
event_publisher: Optional[UnifiedEventPublisher] = None
|
||||
):
|
||||
self.predictor = YieldPredictor()
|
||||
self.ai_insights_client = AIInsightsClient(ai_insights_base_url)
|
||||
self.event_publisher = event_publisher
|
||||
|
||||
async def predict_and_post_insights(
|
||||
self,
|
||||
tenant_id: str,
|
||||
recipe_id: str,
|
||||
production_history: pd.DataFrame,
|
||||
production_context: Dict[str, Any],
|
||||
min_history_runs: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Complete workflow: Predict yield and post insights.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
recipe_id: Recipe identifier
|
||||
production_history: Historical production runs
|
||||
production_context: Upcoming production context:
|
||||
- staff_assigned (list of staff IDs)
|
||||
- planned_start_time
|
||||
- batch_size
|
||||
- planned_quantity
|
||||
- unit_cost (optional)
|
||||
- equipment_id (optional)
|
||||
min_history_runs: Minimum production runs required
|
||||
|
||||
Returns:
|
||||
Workflow results with prediction and posted insights
|
||||
"""
|
||||
logger.info(
|
||||
"Starting yield prediction workflow",
|
||||
tenant_id=tenant_id,
|
||||
recipe_id=recipe_id,
|
||||
history_runs=len(production_history)
|
||||
)
|
||||
|
||||
# Step 1: Predict yield
|
||||
prediction_results = await self.predictor.predict_yield(
|
||||
tenant_id=tenant_id,
|
||||
recipe_id=recipe_id,
|
||||
production_history=production_history,
|
||||
production_context=production_context,
|
||||
min_history_runs=min_history_runs
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Yield prediction complete",
|
||||
recipe_id=recipe_id,
|
||||
predicted_yield=prediction_results.get('predicted_yield'),
|
||||
insights_generated=len(prediction_results.get('insights', []))
|
||||
)
|
||||
|
||||
# Step 2: Enrich insights with tenant_id and recipe context
|
||||
enriched_insights = self._enrich_insights(
|
||||
prediction_results.get('insights', []),
|
||||
tenant_id,
|
||||
recipe_id
|
||||
)
|
||||
|
||||
# Step 3: Post insights to AI Insights Service
|
||||
if enriched_insights:
|
||||
post_results = await self.ai_insights_client.create_insights_bulk(
|
||||
tenant_id=UUID(tenant_id),
|
||||
insights=enriched_insights
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Yield insights posted to AI Insights Service",
|
||||
recipe_id=recipe_id,
|
||||
total=post_results['total'],
|
||||
successful=post_results['successful'],
|
||||
failed=post_results['failed']
|
||||
)
|
||||
|
||||
# Step 4: Publish recommendation events to RabbitMQ
|
||||
created_insights = post_results.get('created_insights', [])
|
||||
if created_insights:
|
||||
recipe_context = production_context.copy() if production_context else {}
|
||||
recipe_context['recipe_id'] = recipe_id
|
||||
await self._publish_insight_events(
|
||||
tenant_id=tenant_id,
|
||||
insights=created_insights,
|
||||
recipe_context=recipe_context
|
||||
)
|
||||
else:
|
||||
post_results = {'total': 0, 'successful': 0, 'failed': 0}
|
||||
logger.info("No insights to post for recipe", recipe_id=recipe_id)
|
||||
|
||||
# Step 4: Return comprehensive results
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'recipe_id': recipe_id,
|
||||
'predicted_at': prediction_results['predicted_at'],
|
||||
'history_runs': prediction_results['history_runs'],
|
||||
'baseline_yield': prediction_results.get('baseline_yield'),
|
||||
'predicted_yield': prediction_results.get('predicted_yield'),
|
||||
'prediction_range': prediction_results.get('prediction_range'),
|
||||
'expected_waste': prediction_results.get('expected_waste'),
|
||||
'confidence': prediction_results['confidence'],
|
||||
'factor_analysis': prediction_results.get('factor_analysis'),
|
||||
'patterns': prediction_results.get('patterns', []),
|
||||
'insights_generated': len(enriched_insights),
|
||||
'insights_posted': post_results['successful'],
|
||||
'insights_failed': post_results['failed'],
|
||||
'created_insights': post_results.get('created_insights', [])
|
||||
}
|
||||
|
||||
async def analyze_and_post_insights(
|
||||
self,
|
||||
tenant_id: str,
|
||||
recipe_id: str,
|
||||
production_history: pd.DataFrame,
|
||||
min_history_runs: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze historical yield performance and post insights (no prediction).
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
recipe_id: Recipe identifier
|
||||
production_history: Historical production runs
|
||||
min_history_runs: Minimum production runs required
|
||||
|
||||
Returns:
|
||||
Workflow results with analysis and posted insights
|
||||
"""
|
||||
logger.info(
|
||||
"Starting yield analysis workflow",
|
||||
tenant_id=tenant_id,
|
||||
recipe_id=recipe_id,
|
||||
history_runs=len(production_history)
|
||||
)
|
||||
|
||||
# Step 1: Analyze historical yield
|
||||
analysis_results = await self.predictor.analyze_recipe_yield_history(
|
||||
tenant_id=tenant_id,
|
||||
recipe_id=recipe_id,
|
||||
production_history=production_history,
|
||||
min_history_runs=min_history_runs
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Yield analysis complete",
|
||||
recipe_id=recipe_id,
|
||||
baseline_yield=analysis_results.get('baseline_stats', {}).get('mean_yield'),
|
||||
insights_generated=len(analysis_results.get('insights', []))
|
||||
)
|
||||
|
||||
# Step 2: Enrich insights
|
||||
enriched_insights = self._enrich_insights(
|
||||
analysis_results.get('insights', []),
|
||||
tenant_id,
|
||||
recipe_id
|
||||
)
|
||||
|
||||
# Step 3: Post insights
|
||||
if enriched_insights:
|
||||
post_results = await self.ai_insights_client.create_insights_bulk(
|
||||
tenant_id=UUID(tenant_id),
|
||||
insights=enriched_insights
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Yield analysis insights posted",
|
||||
recipe_id=recipe_id,
|
||||
total=post_results['total'],
|
||||
successful=post_results['successful']
|
||||
)
|
||||
|
||||
# Step 4: Publish recommendation events to RabbitMQ
|
||||
created_insights = post_results.get('created_insights', [])
|
||||
if created_insights:
|
||||
await self._publish_insight_events(
|
||||
tenant_id=tenant_id,
|
||||
insights=created_insights,
|
||||
recipe_context={'recipe_id': recipe_id}
|
||||
)
|
||||
else:
|
||||
post_results = {'total': 0, 'successful': 0, 'failed': 0}
|
||||
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'recipe_id': recipe_id,
|
||||
'analyzed_at': analysis_results['analyzed_at'],
|
||||
'history_runs': analysis_results['history_runs'],
|
||||
'baseline_stats': analysis_results.get('baseline_stats'),
|
||||
'factor_analysis': analysis_results.get('factor_analysis'),
|
||||
'patterns': analysis_results.get('patterns', []),
|
||||
'insights_generated': len(enriched_insights),
|
||||
'insights_posted': post_results['successful'],
|
||||
'created_insights': post_results.get('created_insights', [])
|
||||
}
|
||||
|
||||
def _enrich_insights(
|
||||
self,
|
||||
insights: List[Dict[str, Any]],
|
||||
tenant_id: str,
|
||||
recipe_id: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Enrich insights with required fields for AI Insights Service.
|
||||
|
||||
Args:
|
||||
insights: Raw insights from predictor
|
||||
tenant_id: Tenant identifier
|
||||
recipe_id: Recipe identifier
|
||||
|
||||
Returns:
|
||||
Enriched insights ready for posting
|
||||
"""
|
||||
enriched = []
|
||||
|
||||
for insight in insights:
|
||||
# Add required tenant_id
|
||||
enriched_insight = insight.copy()
|
||||
enriched_insight['tenant_id'] = tenant_id
|
||||
|
||||
# Add recipe context to metrics
|
||||
if 'metrics_json' not in enriched_insight:
|
||||
enriched_insight['metrics_json'] = {}
|
||||
|
||||
enriched_insight['metrics_json']['recipe_id'] = recipe_id
|
||||
|
||||
# Add source metadata
|
||||
enriched_insight['source_service'] = 'production'
|
||||
enriched_insight['source_model'] = 'yield_predictor'
|
||||
enriched_insight['detected_at'] = datetime.utcnow().isoformat()
|
||||
|
||||
enriched.append(enriched_insight)
|
||||
|
||||
return enriched
|
||||
|
||||
async def _publish_insight_events(
|
||||
self,
|
||||
tenant_id: str,
|
||||
insights: List[Dict[str, Any]],
|
||||
recipe_context: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
"""
|
||||
Publish recommendation events to RabbitMQ for each insight.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
insights: List of created insights (with insight_id from AI Insights Service)
|
||||
recipe_context: Optional recipe context (name, id, etc.)
|
||||
"""
|
||||
if not self.event_publisher:
|
||||
logger.warning("Event publisher not configured, skipping event publication")
|
||||
return
|
||||
|
||||
for insight in insights:
|
||||
try:
|
||||
# Determine severity based on confidence and priority
|
||||
confidence = insight.get('confidence', 0)
|
||||
priority = insight.get('priority', 'medium')
|
||||
|
||||
if priority == 'urgent' or confidence >= 90:
|
||||
severity = 'urgent'
|
||||
elif priority == 'high' or confidence >= 70:
|
||||
severity = 'high'
|
||||
elif priority == 'medium' or confidence >= 50:
|
||||
severity = 'medium'
|
||||
else:
|
||||
severity = 'low'
|
||||
|
||||
# Build event metadata
|
||||
event_metadata = {
|
||||
'insight_id': insight.get('id'), # From AI Insights Service response
|
||||
'insight_type': insight.get('insight_type'),
|
||||
'recipe_id': insight.get('metrics_json', {}).get('recipe_id'),
|
||||
'recipe_name': recipe_context.get('recipe_name') if recipe_context else None,
|
||||
'predicted_yield': insight.get('metrics_json', {}).get('predicted_yield'),
|
||||
'confidence': confidence,
|
||||
'recommendation': insight.get('recommendation'),
|
||||
'impact_type': insight.get('impact_type'),
|
||||
'impact_value': insight.get('impact_value'),
|
||||
'source_service': 'production',
|
||||
'source_model': 'yield_predictor'
|
||||
}
|
||||
|
||||
# Remove None values
|
||||
event_metadata = {k: v for k, v in event_metadata.items() if v is not None}
|
||||
|
||||
# Publish recommendation event
|
||||
await self.event_publisher.publish_recommendation(
|
||||
event_type='ai_yield_prediction',
|
||||
tenant_id=tenant_id,
|
||||
severity=severity,
|
||||
data=event_metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Published yield insight recommendation event",
|
||||
tenant_id=tenant_id,
|
||||
insight_id=insight.get('id'),
|
||||
insight_type=insight.get('insight_type'),
|
||||
severity=severity
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to publish insight event",
|
||||
tenant_id=tenant_id,
|
||||
insight_id=insight.get('id'),
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
# Don't raise - we don't want to fail the whole workflow if event publishing fails
|
||||
|
||||
async def analyze_all_recipes(
|
||||
self,
|
||||
tenant_id: str,
|
||||
recipes_data: Dict[str, pd.DataFrame],
|
||||
min_history_runs: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze yield performance for all recipes for a tenant.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
recipes_data: Dict of {recipe_id: production_history_df}
|
||||
min_history_runs: Minimum production runs required
|
||||
|
||||
Returns:
|
||||
Comprehensive analysis results
|
||||
"""
|
||||
logger.info(
|
||||
"Analyzing yield for all recipes",
|
||||
tenant_id=tenant_id,
|
||||
recipes=len(recipes_data)
|
||||
)
|
||||
|
||||
all_results = []
|
||||
total_insights_posted = 0
|
||||
recipes_with_issues = []
|
||||
|
||||
# Analyze each recipe
|
||||
for recipe_id, production_history in recipes_data.items():
|
||||
try:
|
||||
results = await self.analyze_and_post_insights(
|
||||
tenant_id=tenant_id,
|
||||
recipe_id=recipe_id,
|
||||
production_history=production_history,
|
||||
min_history_runs=min_history_runs
|
||||
)
|
||||
|
||||
all_results.append(results)
|
||||
total_insights_posted += results['insights_posted']
|
||||
|
||||
# Check for low baseline yield
|
||||
baseline_stats = results.get('baseline_stats')
|
||||
if baseline_stats and baseline_stats.get('mean_yield', 100) < 90:
|
||||
recipes_with_issues.append({
|
||||
'recipe_id': recipe_id,
|
||||
'mean_yield': baseline_stats['mean_yield'],
|
||||
'std_yield': baseline_stats['std_yield']
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error analyzing recipe",
|
||||
recipe_id=recipe_id,
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
# Generate portfolio summary insight if there are yield issues
|
||||
if len(recipes_with_issues) > 0:
|
||||
summary_insight = self._generate_portfolio_summary_insight(
|
||||
tenant_id, recipes_with_issues, all_results
|
||||
)
|
||||
|
||||
if summary_insight:
|
||||
enriched_summary = self._enrich_insights(
|
||||
[summary_insight], tenant_id, 'all_recipes'
|
||||
)
|
||||
|
||||
post_results = await self.ai_insights_client.create_insights_bulk(
|
||||
tenant_id=UUID(tenant_id),
|
||||
insights=enriched_summary
|
||||
)
|
||||
|
||||
total_insights_posted += post_results['successful']
|
||||
|
||||
logger.info(
|
||||
"All recipes yield analysis complete",
|
||||
tenant_id=tenant_id,
|
||||
recipes_analyzed=len(all_results),
|
||||
total_insights_posted=total_insights_posted,
|
||||
recipes_with_issues=len(recipes_with_issues)
|
||||
)
|
||||
|
||||
return {
|
||||
'tenant_id': tenant_id,
|
||||
'analyzed_at': datetime.utcnow().isoformat(),
|
||||
'recipes_analyzed': len(all_results),
|
||||
'recipe_results': all_results,
|
||||
'total_insights_posted': total_insights_posted,
|
||||
'recipes_with_issues': recipes_with_issues
|
||||
}
|
||||
|
||||
def _generate_portfolio_summary_insight(
|
||||
self,
|
||||
tenant_id: str,
|
||||
recipes_with_issues: List[Dict[str, Any]],
|
||||
all_results: List[Dict[str, Any]]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Generate portfolio-level summary insight.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
recipes_with_issues: Recipes with low yield
|
||||
all_results: All recipe analysis results
|
||||
|
||||
Returns:
|
||||
Summary insight or None
|
||||
"""
|
||||
if len(recipes_with_issues) == 0:
|
||||
return None
|
||||
|
||||
# Calculate average yield and potential improvement
|
||||
total_recipes = len(all_results)
|
||||
issues_count = len(recipes_with_issues)
|
||||
avg_low_yield = sum(r['mean_yield'] for r in recipes_with_issues) / issues_count
|
||||
|
||||
# Estimate waste reduction potential
|
||||
# Assuming each recipe produces 1000 units/month, €5/unit cost
|
||||
monthly_production = 1000 * issues_count
|
||||
current_waste_pct = 100 - avg_low_yield
|
||||
target_waste_pct = 5 # Target 95% yield
|
||||
|
||||
if current_waste_pct > target_waste_pct:
|
||||
waste_reduction_units = monthly_production * ((current_waste_pct - target_waste_pct) / 100)
|
||||
annual_savings = waste_reduction_units * 12 * 5 # €5 per unit
|
||||
|
||||
return {
|
||||
'type': 'opportunity',
|
||||
'priority': 'high' if issues_count > 3 else 'medium',
|
||||
'category': 'production',
|
||||
'title': f'Production Yield Optimization: {issues_count} Recipes Below 90%',
|
||||
'description': f'{issues_count} of {total_recipes} recipes have average yield below 90% (average {avg_low_yield:.1f}%). Improving to 95% target would reduce waste by {waste_reduction_units:.0f} units/month, saving €{annual_savings:.0f}/year.',
|
||||
'impact_type': 'cost_savings',
|
||||
'impact_value': annual_savings,
|
||||
'impact_unit': 'euros_per_year',
|
||||
'confidence': 75,
|
||||
'metrics_json': {
|
||||
'recipes_analyzed': total_recipes,
|
||||
'recipes_with_issues': issues_count,
|
||||
'avg_low_yield': round(avg_low_yield, 2),
|
||||
'potential_annual_savings': round(annual_savings, 2),
|
||||
'waste_reduction_units_monthly': round(waste_reduction_units, 2)
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [
|
||||
{
|
||||
'label': 'Review Low-Yield Recipes',
|
||||
'action': 'review_yield_insights',
|
||||
'params': {'tenant_id': tenant_id}
|
||||
},
|
||||
{
|
||||
'label': 'Implement Yield Improvements',
|
||||
'action': 'apply_yield_recommendations',
|
||||
'params': {'tenant_id': tenant_id}
|
||||
}
|
||||
],
|
||||
'source_service': 'production',
|
||||
'source_model': 'yield_predictor'
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
async def close(self):
|
||||
"""Close HTTP client connections."""
|
||||
await self.ai_insights_client.close()
|
||||
813
services/production/app/ml/yield_predictor.py
Normal file
813
services/production/app/ml/yield_predictor.py
Normal file
@@ -0,0 +1,813 @@
|
||||
"""
|
||||
Production Yield Predictor
|
||||
Predicts actual vs planned yield and identifies waste reduction opportunities
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from typing import Dict, List, Any, Optional, Tuple
|
||||
from datetime import datetime, timedelta
|
||||
import structlog
|
||||
from scipy import stats
|
||||
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class YieldPredictor:
|
||||
"""
|
||||
Predicts production yield based on historical data and production factors.
|
||||
|
||||
Key Features:
|
||||
- Multi-factor yield prediction (recipe, worker, time-of-day, equipment, batch size)
|
||||
- Identifies low-yield patterns and root causes
|
||||
- Waste categorization (spoilage, measurement error, process inefficiency)
|
||||
- Actionable recommendations for yield improvement
|
||||
- Statistical validation of learned patterns
|
||||
|
||||
Methodology:
|
||||
1. Feature Engineering: Extract worker skill, time factors, batch size effects
|
||||
2. Statistical Analysis: Identify significant yield loss factors
|
||||
3. ML Prediction: Ensemble of Random Forest + Gradient Boosting
|
||||
4. Pattern Detection: Find recurring low-yield situations
|
||||
5. Insight Generation: Actionable recommendations with confidence scores
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.model_cache = {} # Cache trained models per recipe
|
||||
self.baseline_yields = {} # Cache baseline yields per recipe
|
||||
|
||||
async def predict_yield(
|
||||
self,
|
||||
tenant_id: str,
|
||||
recipe_id: str,
|
||||
production_history: pd.DataFrame,
|
||||
production_context: Dict[str, Any],
|
||||
min_history_runs: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Predict yield for upcoming production run and generate insights.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
recipe_id: Recipe identifier
|
||||
production_history: Historical production runs with columns:
|
||||
- production_run_id
|
||||
- recipe_id
|
||||
- planned_quantity
|
||||
- actual_quantity
|
||||
- yield_percentage
|
||||
- staff_assigned (list of staff IDs)
|
||||
- started_at
|
||||
- completed_at
|
||||
- batch_size
|
||||
- equipment_id (optional)
|
||||
- notes (optional)
|
||||
production_context: Upcoming production context:
|
||||
- staff_assigned (list of staff IDs)
|
||||
- planned_start_time
|
||||
- batch_size
|
||||
- equipment_id (optional)
|
||||
min_history_runs: Minimum production runs required for learning
|
||||
|
||||
Returns:
|
||||
Prediction results with yield forecast, confidence, and insights
|
||||
"""
|
||||
logger.info(
|
||||
"Predicting production yield",
|
||||
tenant_id=tenant_id,
|
||||
recipe_id=recipe_id,
|
||||
history_runs=len(production_history)
|
||||
)
|
||||
|
||||
# Validate production history
|
||||
if len(production_history) < min_history_runs:
|
||||
return self._insufficient_data_response(
|
||||
recipe_id, production_context, len(production_history), min_history_runs
|
||||
)
|
||||
|
||||
# Step 1: Calculate baseline statistics
|
||||
baseline_stats = self._calculate_baseline_statistics(production_history)
|
||||
|
||||
# Step 2: Feature engineering
|
||||
feature_df = self._engineer_features(production_history)
|
||||
|
||||
# Step 3: Analyze yield factors
|
||||
factor_analysis = self._analyze_yield_factors(feature_df)
|
||||
|
||||
# Step 4: Train predictive model
|
||||
model_results = self._train_yield_model(feature_df)
|
||||
|
||||
# Step 5: Make prediction for upcoming run
|
||||
prediction = self._predict_upcoming_run(
|
||||
production_context, model_results, baseline_stats, feature_df
|
||||
)
|
||||
|
||||
# Step 6: Identify low-yield patterns
|
||||
patterns = self._identify_yield_patterns(feature_df, factor_analysis)
|
||||
|
||||
# Step 7: Generate insights
|
||||
insights = self._generate_yield_insights(
|
||||
tenant_id, recipe_id, baseline_stats, factor_analysis,
|
||||
patterns, prediction, production_context
|
||||
)
|
||||
|
||||
# Step 8: Calculate confidence
|
||||
confidence = self._calculate_prediction_confidence(
|
||||
production_history, model_results, factor_analysis
|
||||
)
|
||||
|
||||
return {
|
||||
'recipe_id': recipe_id,
|
||||
'predicted_at': datetime.utcnow().isoformat(),
|
||||
'history_runs': len(production_history),
|
||||
'baseline_yield': baseline_stats['mean_yield'],
|
||||
'baseline_std': baseline_stats['std_yield'],
|
||||
'predicted_yield': prediction['predicted_yield'],
|
||||
'prediction_range': prediction['prediction_range'],
|
||||
'expected_waste': prediction['expected_waste'],
|
||||
'confidence': confidence,
|
||||
'factor_analysis': factor_analysis,
|
||||
'patterns': patterns,
|
||||
'model_performance': model_results['performance'],
|
||||
'insights': insights
|
||||
}
|
||||
|
||||
def _insufficient_data_response(
|
||||
self, recipe_id: str, production_context: Dict[str, Any],
|
||||
current_runs: int, required_runs: int
|
||||
) -> Dict[str, Any]:
|
||||
"""Return response when insufficient historical data."""
|
||||
return {
|
||||
'recipe_id': recipe_id,
|
||||
'predicted_at': datetime.utcnow().isoformat(),
|
||||
'history_runs': current_runs,
|
||||
'status': 'insufficient_data',
|
||||
'required_runs': required_runs,
|
||||
'baseline_yield': None,
|
||||
'predicted_yield': None,
|
||||
'confidence': 0,
|
||||
'insights': [{
|
||||
'type': 'warning',
|
||||
'priority': 'low',
|
||||
'category': 'production',
|
||||
'title': f'Insufficient Production History for Yield Prediction',
|
||||
'description': f'Only {current_runs} production runs available. Need at least {required_runs} runs to build reliable yield predictions. Continue tracking production data to enable yield optimization.',
|
||||
'impact_type': 'data_quality',
|
||||
'confidence': 100,
|
||||
'actionable': True,
|
||||
'recommendation_actions': [{
|
||||
'label': 'Track Production Data',
|
||||
'action': 'continue_production_tracking',
|
||||
'params': {'recipe_id': recipe_id}
|
||||
}]
|
||||
}]
|
||||
}
|
||||
|
||||
def _calculate_baseline_statistics(
|
||||
self, production_history: pd.DataFrame
|
||||
) -> Dict[str, Any]:
|
||||
"""Calculate baseline yield statistics."""
|
||||
yields = production_history['yield_percentage'].values
|
||||
|
||||
return {
|
||||
'mean_yield': float(np.mean(yields)),
|
||||
'median_yield': float(np.median(yields)),
|
||||
'std_yield': float(np.std(yields)),
|
||||
'min_yield': float(np.min(yields)),
|
||||
'max_yield': float(np.max(yields)),
|
||||
'cv_yield': float(np.std(yields) / np.mean(yields)), # Coefficient of variation
|
||||
'percentile_25': float(np.percentile(yields, 25)),
|
||||
'percentile_75': float(np.percentile(yields, 75)),
|
||||
'runs_below_90': int(np.sum(yields < 90)),
|
||||
'runs_above_95': int(np.sum(yields > 95))
|
||||
}
|
||||
|
||||
def _engineer_features(self, production_history: pd.DataFrame) -> pd.DataFrame:
|
||||
"""Engineer features from production history."""
|
||||
df = production_history.copy()
|
||||
|
||||
# Time-based features
|
||||
df['started_at'] = pd.to_datetime(df['started_at'])
|
||||
df['hour_of_day'] = df['started_at'].dt.hour
|
||||
df['day_of_week'] = df['started_at'].dt.dayofweek
|
||||
df['is_weekend'] = df['day_of_week'].isin([5, 6]).astype(int)
|
||||
df['is_early_morning'] = (df['hour_of_day'] < 6).astype(int)
|
||||
df['is_late_night'] = (df['hour_of_day'] >= 22).astype(int)
|
||||
|
||||
# Duration features
|
||||
if 'completed_at' in df.columns:
|
||||
df['completed_at'] = pd.to_datetime(df['completed_at'])
|
||||
df['duration_hours'] = (df['completed_at'] - df['started_at']).dt.total_seconds() / 3600
|
||||
df['is_rushed'] = (df['duration_hours'] < df['duration_hours'].quantile(0.25)).astype(int)
|
||||
|
||||
# Batch size features
|
||||
df['batch_size_normalized'] = df['batch_size'] / df['batch_size'].mean()
|
||||
df['is_large_batch'] = (df['batch_size'] > df['batch_size'].quantile(0.75)).astype(int)
|
||||
df['is_small_batch'] = (df['batch_size'] < df['batch_size'].quantile(0.25)).astype(int)
|
||||
|
||||
# Worker experience features (proxy: number of previous runs)
|
||||
# Extract first worker from staff_assigned list
|
||||
df['worker_id'] = df['staff_assigned'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else 'unknown')
|
||||
|
||||
df = df.sort_values('started_at')
|
||||
df['worker_run_count'] = df.groupby('worker_id').cumcount() + 1
|
||||
df['worker_experience_level'] = pd.cut(
|
||||
df['worker_run_count'],
|
||||
bins=[0, 5, 15, 100],
|
||||
labels=['novice', 'intermediate', 'expert']
|
||||
)
|
||||
|
||||
# Recent yield trend for worker
|
||||
df['worker_recent_avg_yield'] = df.groupby('worker_id')['yield_percentage'].transform(
|
||||
lambda x: x.rolling(window=5, min_periods=1).mean()
|
||||
)
|
||||
|
||||
return df
|
||||
|
||||
def _analyze_yield_factors(self, feature_df: pd.DataFrame) -> Dict[str, Any]:
|
||||
"""Analyze factors affecting yield using statistical tests."""
|
||||
factors = {}
|
||||
|
||||
# Worker impact
|
||||
# Extract worker_id from staff_assigned for analysis
|
||||
if 'worker_id' not in feature_df.columns:
|
||||
feature_df['worker_id'] = feature_df['staff_assigned'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else 'unknown')
|
||||
|
||||
worker_yields = feature_df.groupby('worker_id')['yield_percentage'].agg(['mean', 'std', 'count'])
|
||||
worker_yields = worker_yields[worker_yields['count'] >= 3] # Min 3 runs per worker
|
||||
|
||||
if len(worker_yields) > 1:
|
||||
# ANOVA test: Does worker significantly affect yield?
|
||||
worker_groups = [
|
||||
feature_df[feature_df['worker_id'] == worker]['yield_percentage'].values
|
||||
for worker in worker_yields.index
|
||||
]
|
||||
f_stat, p_value = stats.f_oneway(*worker_groups)
|
||||
|
||||
factors['worker'] = {
|
||||
'significant': p_value < 0.05,
|
||||
'p_value': float(p_value),
|
||||
'f_statistic': float(f_stat),
|
||||
'best_worker': worker_yields['mean'].idxmax(),
|
||||
'best_worker_yield': float(worker_yields['mean'].max()),
|
||||
'worst_worker': worker_yields['mean'].idxmin(),
|
||||
'worst_worker_yield': float(worker_yields['mean'].min()),
|
||||
'yield_range': float(worker_yields['mean'].max() - worker_yields['mean'].min())
|
||||
}
|
||||
else:
|
||||
factors['worker'] = {'significant': False, 'reason': 'insufficient_workers'}
|
||||
|
||||
# Time of day impact
|
||||
time_groups = {
|
||||
'early_morning': feature_df[feature_df['hour_of_day'] < 6]['yield_percentage'].values,
|
||||
'morning': feature_df[(feature_df['hour_of_day'] >= 6) & (feature_df['hour_of_day'] < 12)]['yield_percentage'].values,
|
||||
'afternoon': feature_df[(feature_df['hour_of_day'] >= 12) & (feature_df['hour_of_day'] < 18)]['yield_percentage'].values,
|
||||
'evening': feature_df[feature_df['hour_of_day'] >= 18]['yield_percentage'].values
|
||||
}
|
||||
time_groups = {k: v for k, v in time_groups.items() if len(v) >= 3}
|
||||
|
||||
if len(time_groups) > 1:
|
||||
f_stat, p_value = stats.f_oneway(*time_groups.values())
|
||||
time_means = {k: np.mean(v) for k, v in time_groups.items()}
|
||||
|
||||
factors['time_of_day'] = {
|
||||
'significant': p_value < 0.05,
|
||||
'p_value': float(p_value),
|
||||
'best_time': max(time_means, key=time_means.get),
|
||||
'best_time_yield': float(max(time_means.values())),
|
||||
'worst_time': min(time_means, key=time_means.get),
|
||||
'worst_time_yield': float(min(time_means.values())),
|
||||
'yield_range': float(max(time_means.values()) - min(time_means.values()))
|
||||
}
|
||||
else:
|
||||
factors['time_of_day'] = {'significant': False, 'reason': 'insufficient_data'}
|
||||
|
||||
# Batch size impact (correlation)
|
||||
if len(feature_df) >= 10:
|
||||
correlation, p_value = stats.pearsonr(
|
||||
feature_df['batch_size'],
|
||||
feature_df['yield_percentage']
|
||||
)
|
||||
|
||||
factors['batch_size'] = {
|
||||
'significant': abs(correlation) > 0.3 and p_value < 0.05,
|
||||
'correlation': float(correlation),
|
||||
'p_value': float(p_value),
|
||||
'direction': 'positive' if correlation > 0 else 'negative',
|
||||
'interpretation': self._interpret_batch_size_effect(correlation)
|
||||
}
|
||||
else:
|
||||
factors['batch_size'] = {'significant': False, 'reason': 'insufficient_data'}
|
||||
|
||||
# Weekend vs weekday
|
||||
weekend_yields = feature_df[feature_df['is_weekend'] == 1]['yield_percentage'].values
|
||||
weekday_yields = feature_df[feature_df['is_weekend'] == 0]['yield_percentage'].values
|
||||
|
||||
if len(weekend_yields) >= 3 and len(weekday_yields) >= 3:
|
||||
t_stat, p_value = stats.ttest_ind(weekend_yields, weekday_yields)
|
||||
|
||||
factors['weekend_effect'] = {
|
||||
'significant': p_value < 0.05,
|
||||
'p_value': float(p_value),
|
||||
't_statistic': float(t_stat),
|
||||
'weekend_yield': float(np.mean(weekend_yields)),
|
||||
'weekday_yield': float(np.mean(weekday_yields)),
|
||||
'difference': float(np.mean(weekend_yields) - np.mean(weekday_yields))
|
||||
}
|
||||
else:
|
||||
factors['weekend_effect'] = {'significant': False, 'reason': 'insufficient_weekend_data'}
|
||||
|
||||
return factors
|
||||
|
||||
def _interpret_batch_size_effect(self, correlation: float) -> str:
|
||||
"""Interpret batch size correlation."""
|
||||
if abs(correlation) < 0.3:
|
||||
return "Batch size has minimal impact on yield"
|
||||
elif correlation > 0:
|
||||
return "Larger batches tend to have higher yield (economies of scale)"
|
||||
else:
|
||||
return "Larger batches tend to have lower yield (difficulty handling large volumes)"
|
||||
|
||||
def _train_yield_model(self, feature_df: pd.DataFrame) -> Dict[str, Any]:
|
||||
"""Train ML model to predict yield."""
|
||||
# Prepare features
|
||||
feature_columns = [
|
||||
'hour_of_day', 'day_of_week', 'is_weekend',
|
||||
'batch_size_normalized', 'is_large_batch', 'is_small_batch',
|
||||
'worker_run_count'
|
||||
]
|
||||
|
||||
if 'duration_hours' in feature_df.columns:
|
||||
feature_columns.append('duration_hours')
|
||||
|
||||
# Encode worker_id (extracted from staff_assigned)
|
||||
if 'worker_id' not in feature_df.columns:
|
||||
feature_df['worker_id'] = feature_df['staff_assigned'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else 'unknown')
|
||||
|
||||
worker_encoding = {worker: idx for idx, worker in enumerate(feature_df['worker_id'].unique())}
|
||||
feature_df['worker_encoded'] = feature_df['worker_id'].map(worker_encoding)
|
||||
feature_columns.append('worker_encoded')
|
||||
|
||||
X = feature_df[feature_columns].fillna(0).values
|
||||
y = feature_df['yield_percentage'].values
|
||||
|
||||
# Split into train/test (temporal split)
|
||||
split_idx = int(len(X) * 0.8)
|
||||
X_train, X_test = X[:split_idx], X[split_idx:]
|
||||
y_train, y_test = y[:split_idx], y[split_idx:]
|
||||
|
||||
# Scale features
|
||||
scaler = StandardScaler()
|
||||
X_train_scaled = scaler.fit_transform(X_train)
|
||||
X_test_scaled = scaler.transform(X_test)
|
||||
|
||||
# Train ensemble of models
|
||||
models = {
|
||||
'random_forest': RandomForestRegressor(n_estimators=100, max_depth=5, random_state=42),
|
||||
'gradient_boosting': GradientBoostingRegressor(n_estimators=50, max_depth=3, random_state=42),
|
||||
'linear': LinearRegression()
|
||||
}
|
||||
|
||||
performances = {}
|
||||
predictions = {}
|
||||
|
||||
for name, model in models.items():
|
||||
model.fit(X_train_scaled, y_train)
|
||||
y_pred = model.predict(X_test_scaled)
|
||||
|
||||
mae = np.mean(np.abs(y_test - y_pred))
|
||||
rmse = np.sqrt(np.mean((y_test - y_pred) ** 2))
|
||||
r2 = 1 - (np.sum((y_test - y_pred) ** 2) / np.sum((y_test - np.mean(y_test)) ** 2))
|
||||
|
||||
performances[name] = {
|
||||
'mae': float(mae),
|
||||
'rmse': float(rmse),
|
||||
'r2': float(r2)
|
||||
}
|
||||
predictions[name] = y_pred
|
||||
|
||||
# Select best model based on MAE
|
||||
best_model_name = min(performances, key=lambda k: performances[k]['mae'])
|
||||
best_model = models[best_model_name]
|
||||
|
||||
# Feature importance (if available)
|
||||
feature_importance = {}
|
||||
if hasattr(best_model, 'feature_importances_'):
|
||||
importances = best_model.feature_importances_
|
||||
feature_importance = {
|
||||
feature_columns[i]: float(importances[i])
|
||||
for i in range(len(feature_columns))
|
||||
}
|
||||
feature_importance = dict(sorted(
|
||||
feature_importance.items(),
|
||||
key=lambda x: x[1],
|
||||
reverse=True
|
||||
))
|
||||
|
||||
return {
|
||||
'best_model': best_model,
|
||||
'best_model_name': best_model_name,
|
||||
'scaler': scaler,
|
||||
'feature_columns': feature_columns,
|
||||
'worker_encoding': worker_encoding,
|
||||
'performance': performances[best_model_name],
|
||||
'all_performances': performances,
|
||||
'feature_importance': feature_importance
|
||||
}
|
||||
|
||||
def _predict_upcoming_run(
|
||||
self,
|
||||
production_context: Dict[str, Any],
|
||||
model_results: Dict[str, Any],
|
||||
baseline_stats: Dict[str, Any],
|
||||
feature_df: pd.DataFrame
|
||||
) -> Dict[str, Any]:
|
||||
"""Predict yield for upcoming production run."""
|
||||
# Extract context
|
||||
staff_assigned = production_context.get('staff_assigned', [])
|
||||
worker_id = staff_assigned[0] if isinstance(staff_assigned, list) and len(staff_assigned) > 0 else 'unknown'
|
||||
planned_start = pd.to_datetime(production_context.get('planned_start_time'))
|
||||
batch_size = production_context.get('batch_size')
|
||||
|
||||
# Get worker experience
|
||||
if 'worker_id' not in feature_df.columns:
|
||||
feature_df['worker_id'] = feature_df['staff_assigned'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else 'unknown')
|
||||
|
||||
worker_runs = feature_df[feature_df['worker_id'] == worker_id]
|
||||
worker_run_count = len(worker_runs) if len(worker_runs) > 0 else 1
|
||||
|
||||
# Build feature vector
|
||||
mean_batch_size = feature_df['batch_size'].mean()
|
||||
batch_size_normalized = batch_size / mean_batch_size
|
||||
is_large_batch = 1 if batch_size > feature_df['batch_size'].quantile(0.75) else 0
|
||||
is_small_batch = 1 if batch_size < feature_df['batch_size'].quantile(0.25) else 0
|
||||
|
||||
features = {
|
||||
'hour_of_day': planned_start.hour,
|
||||
'day_of_week': planned_start.dayofweek,
|
||||
'is_weekend': 1 if planned_start.dayofweek in [5, 6] else 0,
|
||||
'batch_size_normalized': batch_size_normalized,
|
||||
'is_large_batch': is_large_batch,
|
||||
'is_small_batch': is_small_batch,
|
||||
'worker_run_count': worker_run_count,
|
||||
'duration_hours': 0, # Not known yet
|
||||
'worker_encoded': model_results['worker_encoding'].get(worker_id, 0)
|
||||
}
|
||||
|
||||
# Create feature vector in correct order
|
||||
X = np.array([[features.get(col, 0) for col in model_results['feature_columns']]])
|
||||
X_scaled = model_results['scaler'].transform(X)
|
||||
|
||||
# Predict
|
||||
predicted_yield = float(model_results['best_model'].predict(X_scaled)[0])
|
||||
|
||||
# Prediction range (based on model RMSE)
|
||||
rmse = model_results['performance']['rmse']
|
||||
prediction_range = {
|
||||
'lower': max(0, predicted_yield - 1.96 * rmse),
|
||||
'upper': min(100, predicted_yield + 1.96 * rmse)
|
||||
}
|
||||
|
||||
# Expected waste
|
||||
planned_quantity = production_context.get('planned_quantity', 100)
|
||||
expected_waste_pct = max(0, 100 - predicted_yield)
|
||||
expected_waste_units = planned_quantity * (expected_waste_pct / 100)
|
||||
|
||||
return {
|
||||
'predicted_yield': round(predicted_yield, 2),
|
||||
'prediction_range': prediction_range,
|
||||
'expected_waste_pct': round(expected_waste_pct, 2),
|
||||
'expected_waste_units': round(expected_waste_units, 2),
|
||||
'baseline_comparison': round(predicted_yield - baseline_stats['mean_yield'], 2),
|
||||
'features_used': features
|
||||
}
|
||||
|
||||
def _identify_yield_patterns(
|
||||
self, feature_df: pd.DataFrame, factor_analysis: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Identify recurring low-yield patterns."""
|
||||
patterns = []
|
||||
|
||||
# Pattern 1: Specific worker consistently low
|
||||
if factor_analysis.get('worker', {}).get('significant'):
|
||||
worst_worker = factor_analysis['worker']['worst_worker']
|
||||
worst_yield = factor_analysis['worker']['worst_worker_yield']
|
||||
best_yield = factor_analysis['worker']['best_worker_yield']
|
||||
|
||||
if worst_yield < 90 and (best_yield - worst_yield) > 5:
|
||||
patterns.append({
|
||||
'pattern': 'low_yield_worker',
|
||||
'description': f'Worker {worst_worker} consistently produces {worst_yield:.1f}% yield vs best worker {best_yield:.1f}%',
|
||||
'severity': 'high' if worst_yield < 85 else 'medium',
|
||||
'affected_runs': int(len(feature_df[feature_df['worker_id'] == worst_worker])),
|
||||
'yield_impact': round(best_yield - worst_yield, 2),
|
||||
'recommendation': 'Provide additional training or reassign to different recipes'
|
||||
})
|
||||
|
||||
# Pattern 2: Time-of-day effect
|
||||
if factor_analysis.get('time_of_day', {}).get('significant'):
|
||||
worst_time = factor_analysis['time_of_day']['worst_time']
|
||||
worst_yield = factor_analysis['time_of_day']['worst_time_yield']
|
||||
|
||||
if worst_yield < 90:
|
||||
patterns.append({
|
||||
'pattern': 'low_yield_time',
|
||||
'description': f'{worst_time} shifts produce {worst_yield:.1f}% yield',
|
||||
'severity': 'medium',
|
||||
'affected_runs': 'varies',
|
||||
'yield_impact': round(factor_analysis['time_of_day']['yield_range'], 2),
|
||||
'recommendation': f'Avoid scheduling this recipe during {worst_time}'
|
||||
})
|
||||
|
||||
# Pattern 3: Large batch issues
|
||||
if factor_analysis.get('batch_size', {}).get('significant'):
|
||||
if factor_analysis['batch_size']['direction'] == 'negative':
|
||||
patterns.append({
|
||||
'pattern': 'large_batch_yield_loss',
|
||||
'description': 'Larger batches have lower yield - equipment or process capacity issues',
|
||||
'severity': 'medium',
|
||||
'correlation': round(factor_analysis['batch_size']['correlation'], 3),
|
||||
'recommendation': 'Split large batches or upgrade equipment'
|
||||
})
|
||||
|
||||
# Pattern 4: Weekend effect
|
||||
if factor_analysis.get('weekend_effect', {}).get('significant'):
|
||||
weekend_yield = factor_analysis['weekend_effect']['weekend_yield']
|
||||
weekday_yield = factor_analysis['weekend_effect']['weekday_yield']
|
||||
|
||||
if abs(weekend_yield - weekday_yield) > 3:
|
||||
if weekend_yield < weekday_yield:
|
||||
patterns.append({
|
||||
'pattern': 'weekend_yield_drop',
|
||||
'description': f'Weekend production {weekend_yield:.1f}% vs weekday {weekday_yield:.1f}%',
|
||||
'severity': 'low',
|
||||
'yield_impact': round(weekday_yield - weekend_yield, 2),
|
||||
'recommendation': 'Review weekend staffing or processes'
|
||||
})
|
||||
|
||||
return patterns
|
||||
|
||||
def _generate_yield_insights(
|
||||
self,
|
||||
tenant_id: str,
|
||||
recipe_id: str,
|
||||
baseline_stats: Dict[str, Any],
|
||||
factor_analysis: Dict[str, Any],
|
||||
patterns: List[Dict[str, Any]],
|
||||
prediction: Dict[str, Any],
|
||||
production_context: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Generate actionable insights for yield improvement."""
|
||||
insights = []
|
||||
|
||||
# Insight 1: Low predicted yield warning
|
||||
if prediction['predicted_yield'] < 90:
|
||||
waste_value = prediction['expected_waste_units'] * production_context.get('unit_cost', 5)
|
||||
|
||||
insights.append({
|
||||
'type': 'warning',
|
||||
'priority': 'high' if prediction['predicted_yield'] < 85 else 'medium',
|
||||
'category': 'production',
|
||||
'title': f'Low Yield Predicted: {prediction["predicted_yield"]:.1f}%',
|
||||
'description': f'Upcoming production run predicted to yield {prediction["predicted_yield"]:.1f}%, below baseline {baseline_stats["mean_yield"]:.1f}%. Expected waste: {prediction["expected_waste_units"]:.1f} units (€{waste_value:.2f}).',
|
||||
'impact_type': 'waste',
|
||||
'impact_value': prediction['expected_waste_units'],
|
||||
'impact_unit': 'units',
|
||||
'confidence': 75,
|
||||
'metrics_json': {
|
||||
'recipe_id': recipe_id,
|
||||
'predicted_yield': prediction['predicted_yield'],
|
||||
'expected_waste': prediction['expected_waste_units'],
|
||||
'waste_value': round(waste_value, 2)
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [{
|
||||
'label': 'Review Production Setup',
|
||||
'action': 'review_production_factors',
|
||||
'params': {
|
||||
'recipe_id': recipe_id,
|
||||
'worker_id': worker_id
|
||||
}
|
||||
}]
|
||||
})
|
||||
|
||||
# Insight 2: High-severity patterns
|
||||
for pattern in patterns:
|
||||
if pattern.get('severity') == 'high':
|
||||
if pattern['pattern'] == 'low_yield_worker':
|
||||
insights.append({
|
||||
'type': 'opportunity',
|
||||
'priority': 'high',
|
||||
'category': 'production',
|
||||
'title': f'Worker Training Opportunity: {pattern["yield_impact"]:.1f}% Yield Gap',
|
||||
'description': pattern['description'] + f'. Improving this worker to average performance would save significant waste.',
|
||||
'impact_type': 'yield_improvement',
|
||||
'impact_value': pattern['yield_impact'],
|
||||
'impact_unit': 'percentage_points',
|
||||
'confidence': 85,
|
||||
'metrics_json': {
|
||||
'recipe_id': recipe_id,
|
||||
'pattern': pattern['pattern'],
|
||||
'yield_impact': pattern['yield_impact']
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [{
|
||||
'label': 'Schedule Training',
|
||||
'action': 'schedule_worker_training',
|
||||
'params': {'recipe_id': recipe_id}
|
||||
}]
|
||||
})
|
||||
|
||||
# Insight 3: Excellent yield
|
||||
if prediction['predicted_yield'] > 98:
|
||||
insights.append({
|
||||
'type': 'positive',
|
||||
'priority': 'low',
|
||||
'category': 'production',
|
||||
'title': f'Excellent Yield Expected: {prediction["predicted_yield"]:.1f}%',
|
||||
'description': f'Optimal production conditions detected. Expected yield {prediction["predicted_yield"]:.1f}% exceeds baseline {baseline_stats["mean_yield"]:.1f}%.',
|
||||
'impact_type': 'yield_improvement',
|
||||
'impact_value': prediction['baseline_comparison'],
|
||||
'impact_unit': 'percentage_points',
|
||||
'confidence': 70,
|
||||
'metrics_json': {
|
||||
'recipe_id': recipe_id,
|
||||
'predicted_yield': prediction['predicted_yield']
|
||||
},
|
||||
'actionable': False
|
||||
})
|
||||
|
||||
# Insight 4: Yield variability issue
|
||||
if baseline_stats['cv_yield'] > 0.05: # Coefficient of variation > 5%
|
||||
insights.append({
|
||||
'type': 'opportunity',
|
||||
'priority': 'medium',
|
||||
'category': 'production',
|
||||
'title': f'High Yield Variability: {baseline_stats["cv_yield"]*100:.1f}% CV',
|
||||
'description': f'Yield varies significantly across production runs (CV={baseline_stats["cv_yield"]*100:.1f}%, range {baseline_stats["min_yield"]:.1f}%-{baseline_stats["max_yield"]:.1f}%). Standardizing processes could reduce waste.',
|
||||
'impact_type': 'process_improvement',
|
||||
'confidence': 80,
|
||||
'metrics_json': {
|
||||
'recipe_id': recipe_id,
|
||||
'cv_yield': round(baseline_stats['cv_yield'], 3),
|
||||
'yield_range': round(baseline_stats['max_yield'] - baseline_stats['min_yield'], 2)
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation_actions': [{
|
||||
'label': 'Standardize Process',
|
||||
'action': 'review_production_sop',
|
||||
'params': {'recipe_id': recipe_id}
|
||||
}]
|
||||
})
|
||||
|
||||
return insights
|
||||
|
||||
def _calculate_prediction_confidence(
|
||||
self,
|
||||
production_history: pd.DataFrame,
|
||||
model_results: Dict[str, Any],
|
||||
factor_analysis: Dict[str, Any]
|
||||
) -> int:
|
||||
"""Calculate overall confidence score for predictions."""
|
||||
confidence_factors = []
|
||||
|
||||
# Factor 1: Sample size (0-30 points)
|
||||
n_runs = len(production_history)
|
||||
if n_runs >= 100:
|
||||
sample_score = 30
|
||||
elif n_runs >= 50:
|
||||
sample_score = 25
|
||||
elif n_runs >= 30:
|
||||
sample_score = 20
|
||||
else:
|
||||
sample_score = 10
|
||||
confidence_factors.append(('sample_size', sample_score))
|
||||
|
||||
# Factor 2: Model performance (0-30 points)
|
||||
r2 = model_results['performance']['r2']
|
||||
mae = model_results['performance']['mae']
|
||||
|
||||
if r2 > 0.7 and mae < 3:
|
||||
model_score = 30
|
||||
elif r2 > 0.5 and mae < 5:
|
||||
model_score = 25
|
||||
elif r2 > 0.3 and mae < 7:
|
||||
model_score = 20
|
||||
else:
|
||||
model_score = 10
|
||||
confidence_factors.append(('model_performance', model_score))
|
||||
|
||||
# Factor 3: Statistical significance of factors (0-25 points)
|
||||
significant_factors = sum(
|
||||
1 for factor in factor_analysis.values()
|
||||
if isinstance(factor, dict) and factor.get('significant')
|
||||
)
|
||||
|
||||
if significant_factors >= 3:
|
||||
stats_score = 25
|
||||
elif significant_factors >= 2:
|
||||
stats_score = 20
|
||||
elif significant_factors >= 1:
|
||||
stats_score = 15
|
||||
else:
|
||||
stats_score = 10
|
||||
confidence_factors.append(('significant_factors', stats_score))
|
||||
|
||||
# Factor 4: Data recency (0-15 points)
|
||||
most_recent = production_history['started_at'].max()
|
||||
days_old = (datetime.utcnow() - pd.to_datetime(most_recent)).days
|
||||
|
||||
if days_old <= 7:
|
||||
recency_score = 15
|
||||
elif days_old <= 30:
|
||||
recency_score = 12
|
||||
elif days_old <= 90:
|
||||
recency_score = 8
|
||||
else:
|
||||
recency_score = 5
|
||||
confidence_factors.append(('data_recency', recency_score))
|
||||
|
||||
total_confidence = sum(score for _, score in confidence_factors)
|
||||
|
||||
return min(100, max(0, total_confidence))
|
||||
|
||||
async def analyze_recipe_yield_history(
|
||||
self,
|
||||
tenant_id: str,
|
||||
recipe_id: str,
|
||||
production_history: pd.DataFrame,
|
||||
min_history_runs: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze historical yield performance for a recipe (no prediction).
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
recipe_id: Recipe identifier
|
||||
production_history: Historical production runs
|
||||
min_history_runs: Minimum production runs required
|
||||
|
||||
Returns:
|
||||
Historical analysis with insights
|
||||
"""
|
||||
logger.info(
|
||||
"Analyzing recipe yield history",
|
||||
tenant_id=tenant_id,
|
||||
recipe_id=recipe_id,
|
||||
history_runs=len(production_history)
|
||||
)
|
||||
|
||||
if len(production_history) < min_history_runs:
|
||||
return self._insufficient_data_response(
|
||||
recipe_id, {}, len(production_history), min_history_runs
|
||||
)
|
||||
|
||||
# Calculate statistics
|
||||
baseline_stats = self._calculate_baseline_statistics(production_history)
|
||||
|
||||
# Feature engineering
|
||||
feature_df = self._engineer_features(production_history)
|
||||
|
||||
# Analyze factors
|
||||
factor_analysis = self._analyze_yield_factors(feature_df)
|
||||
|
||||
# Identify patterns
|
||||
patterns = self._identify_yield_patterns(feature_df, factor_analysis)
|
||||
|
||||
# Generate insights (without prediction)
|
||||
insights = []
|
||||
|
||||
# Add insights for patterns
|
||||
for pattern in patterns:
|
||||
if pattern.get('severity') in ['high', 'medium']:
|
||||
insights.append({
|
||||
'type': 'opportunity',
|
||||
'priority': pattern['severity'],
|
||||
'category': 'production',
|
||||
'title': f'Yield Pattern Detected: {pattern["pattern"]}',
|
||||
'description': pattern['description'],
|
||||
'impact_type': 'yield_improvement',
|
||||
'confidence': 80,
|
||||
'metrics_json': {
|
||||
'recipe_id': recipe_id,
|
||||
'pattern': pattern
|
||||
},
|
||||
'actionable': True,
|
||||
'recommendation': pattern['recommendation']
|
||||
})
|
||||
|
||||
return {
|
||||
'recipe_id': recipe_id,
|
||||
'analyzed_at': datetime.utcnow().isoformat(),
|
||||
'history_runs': len(production_history),
|
||||
'baseline_stats': baseline_stats,
|
||||
'factor_analysis': factor_analysis,
|
||||
'patterns': patterns,
|
||||
'insights': insights
|
||||
}
|
||||
42
services/production/app/models/__init__.py
Normal file
42
services/production/app/models/__init__.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# ================================================================
|
||||
# services/production/app/models/__init__.py
|
||||
# ================================================================
|
||||
"""
|
||||
Production service models
|
||||
"""
|
||||
|
||||
# Import AuditLog model for this service
|
||||
from shared.security import create_audit_log_model
|
||||
from shared.database.base import Base
|
||||
|
||||
# Create audit log model for this service
|
||||
AuditLog = create_audit_log_model(Base)
|
||||
|
||||
from .production import (
|
||||
ProductionBatch,
|
||||
ProductionSchedule,
|
||||
ProductionCapacity,
|
||||
QualityCheckTemplate,
|
||||
QualityCheck,
|
||||
Equipment,
|
||||
ProductionStatus,
|
||||
ProductionPriority,
|
||||
EquipmentStatus,
|
||||
ProcessStage,
|
||||
EquipmentType,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ProductionBatch",
|
||||
"ProductionSchedule",
|
||||
"ProductionCapacity",
|
||||
"QualityCheckTemplate",
|
||||
"QualityCheck",
|
||||
"Equipment",
|
||||
"ProductionStatus",
|
||||
"ProductionPriority",
|
||||
"EquipmentStatus",
|
||||
"ProcessStage",
|
||||
"EquipmentType",
|
||||
"AuditLog",
|
||||
]
|
||||
879
services/production/app/models/production.py
Normal file
879
services/production/app/models/production.py
Normal file
@@ -0,0 +1,879 @@
|
||||
# ================================================================
|
||||
# services/production/app/models/production.py
|
||||
# ================================================================
|
||||
"""
|
||||
Production models for the production service
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, String, Integer, Float, DateTime, Boolean, Text, JSON, Enum as SQLEnum
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
from sqlalchemy.orm import deferred
|
||||
from sqlalchemy.sql import func
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, Any, Optional
|
||||
import uuid
|
||||
import enum
|
||||
|
||||
from shared.database.base import Base
|
||||
|
||||
|
||||
class ProductionStatus(str, enum.Enum):
|
||||
"""Production batch status enumeration"""
|
||||
PENDING = "PENDING"
|
||||
IN_PROGRESS = "IN_PROGRESS"
|
||||
COMPLETED = "COMPLETED"
|
||||
CANCELLED = "CANCELLED"
|
||||
ON_HOLD = "ON_HOLD"
|
||||
QUALITY_CHECK = "QUALITY_CHECK"
|
||||
FAILED = "FAILED"
|
||||
|
||||
|
||||
class ProductionPriority(str, enum.Enum):
|
||||
"""Production priority levels"""
|
||||
LOW = "LOW"
|
||||
MEDIUM = "MEDIUM"
|
||||
HIGH = "HIGH"
|
||||
URGENT = "URGENT"
|
||||
|
||||
|
||||
class EquipmentStatus(str, enum.Enum):
|
||||
"""Equipment status enumeration"""
|
||||
OPERATIONAL = "OPERATIONAL"
|
||||
MAINTENANCE = "MAINTENANCE"
|
||||
DOWN = "DOWN"
|
||||
WARNING = "WARNING"
|
||||
|
||||
|
||||
class ProcessStage(str, enum.Enum):
|
||||
"""Production process stages where quality checks can occur"""
|
||||
MIXING = "mixing"
|
||||
PROOFING = "proofing"
|
||||
SHAPING = "shaping"
|
||||
BAKING = "baking"
|
||||
COOLING = "cooling"
|
||||
PACKAGING = "packaging"
|
||||
FINISHING = "finishing"
|
||||
|
||||
|
||||
class EquipmentType(str, enum.Enum):
|
||||
"""Equipment type enumeration"""
|
||||
OVEN = "oven"
|
||||
MIXER = "mixer"
|
||||
PROOFER = "proofer"
|
||||
FREEZER = "freezer"
|
||||
PACKAGING = "packaging"
|
||||
OTHER = "other"
|
||||
|
||||
|
||||
class ProductionBatch(Base):
|
||||
"""Production batch model for tracking individual production runs"""
|
||||
__tablename__ = "production_batches"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
batch_number = Column(String(50), nullable=False, unique=True, index=True)
|
||||
|
||||
# Product and recipe information
|
||||
product_id = Column(UUID(as_uuid=True), nullable=False, index=True) # Reference to inventory/recipes
|
||||
product_name = Column(String(255), nullable=False)
|
||||
recipe_id = Column(UUID(as_uuid=True), nullable=True)
|
||||
|
||||
# Production planning
|
||||
planned_start_time = Column(DateTime(timezone=True), nullable=False)
|
||||
planned_end_time = Column(DateTime(timezone=True), nullable=False)
|
||||
planned_quantity = Column(Float, nullable=False)
|
||||
planned_duration_minutes = Column(Integer, nullable=False)
|
||||
|
||||
# Actual production tracking
|
||||
actual_start_time = Column(DateTime(timezone=True), nullable=True)
|
||||
actual_end_time = Column(DateTime(timezone=True), nullable=True)
|
||||
actual_quantity = Column(Float, nullable=True)
|
||||
actual_duration_minutes = Column(Integer, nullable=True)
|
||||
|
||||
# Status and priority
|
||||
status = Column(SQLEnum(ProductionStatus), nullable=False, default=ProductionStatus.PENDING, index=True)
|
||||
priority = Column(SQLEnum(ProductionPriority), nullable=False, default=ProductionPriority.MEDIUM)
|
||||
|
||||
# Process stage tracking
|
||||
current_process_stage = Column(SQLEnum(ProcessStage), nullable=True, index=True)
|
||||
process_stage_history = Column(JSON, nullable=True) # Track stage transitions with timestamps
|
||||
pending_quality_checks = Column(JSON, nullable=True) # Required quality checks for current stage
|
||||
completed_quality_checks = Column(JSON, nullable=True) # Completed quality checks by stage
|
||||
|
||||
# Cost tracking
|
||||
estimated_cost = Column(Float, nullable=True)
|
||||
actual_cost = Column(Float, nullable=True)
|
||||
labor_cost = Column(Float, nullable=True)
|
||||
material_cost = Column(Float, nullable=True)
|
||||
overhead_cost = Column(Float, nullable=True)
|
||||
|
||||
# Quality metrics
|
||||
yield_percentage = Column(Float, nullable=True) # actual/planned quantity
|
||||
quality_score = Column(Float, nullable=True)
|
||||
waste_quantity = Column(Float, nullable=True)
|
||||
defect_quantity = Column(Float, nullable=True)
|
||||
waste_defect_type = Column(String(100), nullable=True) # Type of defect causing waste (burnt, misshapen, underproofed, temperature_issues, expired)
|
||||
|
||||
# Equipment and resources
|
||||
equipment_used = Column(JSON, nullable=True) # List of equipment IDs
|
||||
staff_assigned = Column(JSON, nullable=True) # List of staff IDs
|
||||
station_id = Column(String(50), nullable=True)
|
||||
|
||||
# Business context
|
||||
order_id = Column(UUID(as_uuid=True), nullable=True) # Associated customer order
|
||||
forecast_id = Column(UUID(as_uuid=True), nullable=True) # Associated demand forecast
|
||||
is_rush_order = Column(Boolean, default=False)
|
||||
is_special_recipe = Column(Boolean, default=False)
|
||||
is_ai_assisted = Column(Boolean, default=False) # Whether batch used AI forecasting/optimization
|
||||
|
||||
# Notes and tracking
|
||||
production_notes = Column(Text, nullable=True)
|
||||
quality_notes = Column(Text, nullable=True)
|
||||
delay_reason = Column(String(255), nullable=True)
|
||||
cancellation_reason = Column(String(255), nullable=True)
|
||||
|
||||
# JTBD Dashboard: Structured reasoning data for i18n support
|
||||
# Backend stores structured data, frontend translates using i18n
|
||||
reasoning_data = Column(JSON, nullable=True) # Structured reasoning data for multilingual support
|
||||
# reasoning_data structure (see shared/schemas/reasoning_types.py):
|
||||
# {
|
||||
# "type": "forecast_demand" | "customer_order" | "stock_replenishment" | etc.,
|
||||
# "parameters": {
|
||||
# "product_name": "Croissant",
|
||||
# "predicted_demand": 500,
|
||||
# "current_stock": 120,
|
||||
# "production_needed": 380,
|
||||
# "confidence_score": 87
|
||||
# },
|
||||
# "urgency": {
|
||||
# "level": "normal",
|
||||
# "ready_by_time": "08:00",
|
||||
# "customer_commitment": false
|
||||
# },
|
||||
# "metadata": {
|
||||
# "trigger_source": "orchestrator_auto",
|
||||
# "forecast_id": "uuid-here",
|
||||
# "ai_assisted": true
|
||||
# }
|
||||
# }
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
|
||||
completed_at = Column(DateTime(timezone=True), nullable=True)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"batch_number": self.batch_number,
|
||||
"product_id": str(self.product_id),
|
||||
"product_name": self.product_name,
|
||||
"recipe_id": str(self.recipe_id) if self.recipe_id else None,
|
||||
"planned_start_time": self.planned_start_time.isoformat() if self.planned_start_time else None,
|
||||
"planned_end_time": self.planned_end_time.isoformat() if self.planned_end_time else None,
|
||||
"planned_quantity": self.planned_quantity,
|
||||
"planned_duration_minutes": self.planned_duration_minutes,
|
||||
"actual_start_time": self.actual_start_time.isoformat() if self.actual_start_time else None,
|
||||
"actual_end_time": self.actual_end_time.isoformat() if self.actual_end_time else None,
|
||||
"actual_quantity": self.actual_quantity,
|
||||
"actual_duration_minutes": self.actual_duration_minutes,
|
||||
"status": self.status.value if self.status else None,
|
||||
"priority": self.priority.value if self.priority else None,
|
||||
"estimated_cost": self.estimated_cost,
|
||||
"actual_cost": self.actual_cost,
|
||||
"labor_cost": self.labor_cost,
|
||||
"material_cost": self.material_cost,
|
||||
"overhead_cost": self.overhead_cost,
|
||||
"yield_percentage": self.yield_percentage,
|
||||
"quality_score": self.quality_score,
|
||||
"waste_quantity": self.waste_quantity,
|
||||
"defect_quantity": self.defect_quantity,
|
||||
"waste_defect_type": self.waste_defect_type,
|
||||
"equipment_used": self.equipment_used,
|
||||
"staff_assigned": self.staff_assigned,
|
||||
"station_id": self.station_id,
|
||||
"order_id": str(self.order_id) if self.order_id else None,
|
||||
"forecast_id": str(self.forecast_id) if self.forecast_id else None,
|
||||
"is_rush_order": self.is_rush_order,
|
||||
"is_special_recipe": self.is_special_recipe,
|
||||
"is_ai_assisted": self.is_ai_assisted,
|
||||
"production_notes": self.production_notes,
|
||||
"quality_notes": self.quality_notes,
|
||||
"delay_reason": self.delay_reason,
|
||||
"cancellation_reason": self.cancellation_reason,
|
||||
"reasoning_data": self.reasoning_data,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
|
||||
}
|
||||
|
||||
|
||||
class ProductionSchedule(Base):
|
||||
"""Production schedule model for planning and tracking daily production"""
|
||||
__tablename__ = "production_schedules"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
|
||||
# Schedule information
|
||||
schedule_date = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
shift_start = Column(DateTime(timezone=True), nullable=False)
|
||||
shift_end = Column(DateTime(timezone=True), nullable=False)
|
||||
|
||||
# Capacity planning
|
||||
total_capacity_hours = Column(Float, nullable=False)
|
||||
planned_capacity_hours = Column(Float, nullable=False)
|
||||
actual_capacity_hours = Column(Float, nullable=True)
|
||||
overtime_hours = Column(Float, nullable=True, default=0.0)
|
||||
|
||||
# Staff and equipment
|
||||
staff_count = Column(Integer, nullable=False)
|
||||
equipment_capacity = Column(JSON, nullable=True) # Equipment availability
|
||||
station_assignments = Column(JSON, nullable=True) # Station schedules
|
||||
|
||||
# Production metrics
|
||||
total_batches_planned = Column(Integer, nullable=False, default=0)
|
||||
total_batches_completed = Column(Integer, nullable=True, default=0)
|
||||
total_quantity_planned = Column(Float, nullable=False, default=0.0)
|
||||
total_quantity_produced = Column(Float, nullable=True, default=0.0)
|
||||
|
||||
# Status tracking
|
||||
is_finalized = Column(Boolean, default=False)
|
||||
is_active = Column(Boolean, default=True)
|
||||
|
||||
# Performance metrics
|
||||
efficiency_percentage = Column(Float, nullable=True)
|
||||
utilization_percentage = Column(Float, nullable=True)
|
||||
on_time_completion_rate = Column(Float, nullable=True)
|
||||
|
||||
# Notes and adjustments
|
||||
schedule_notes = Column(Text, nullable=True)
|
||||
schedule_adjustments = Column(JSON, nullable=True) # Track changes made
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
|
||||
finalized_at = Column(DateTime(timezone=True), nullable=True)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"schedule_date": self.schedule_date.isoformat() if self.schedule_date else None,
|
||||
"shift_start": self.shift_start.isoformat() if self.shift_start else None,
|
||||
"shift_end": self.shift_end.isoformat() if self.shift_end else None,
|
||||
"total_capacity_hours": self.total_capacity_hours,
|
||||
"planned_capacity_hours": self.planned_capacity_hours,
|
||||
"actual_capacity_hours": self.actual_capacity_hours,
|
||||
"overtime_hours": self.overtime_hours,
|
||||
"staff_count": self.staff_count,
|
||||
"equipment_capacity": self.equipment_capacity,
|
||||
"station_assignments": self.station_assignments,
|
||||
"total_batches_planned": self.total_batches_planned,
|
||||
"total_batches_completed": self.total_batches_completed,
|
||||
"total_quantity_planned": self.total_quantity_planned,
|
||||
"total_quantity_produced": self.total_quantity_produced,
|
||||
"is_finalized": self.is_finalized,
|
||||
"is_active": self.is_active,
|
||||
"efficiency_percentage": self.efficiency_percentage,
|
||||
"utilization_percentage": self.utilization_percentage,
|
||||
"on_time_completion_rate": self.on_time_completion_rate,
|
||||
"schedule_notes": self.schedule_notes,
|
||||
"schedule_adjustments": self.schedule_adjustments,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
"finalized_at": self.finalized_at.isoformat() if self.finalized_at else None,
|
||||
}
|
||||
|
||||
|
||||
class ProductionCapacity(Base):
|
||||
"""Production capacity model for tracking equipment and resource availability"""
|
||||
__tablename__ = "production_capacity"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
|
||||
# Capacity definition
|
||||
resource_type = Column(String(50), nullable=False) # equipment, staff, station
|
||||
resource_id = Column(String(100), nullable=False)
|
||||
resource_name = Column(String(255), nullable=False)
|
||||
|
||||
# Time period
|
||||
date = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
start_time = Column(DateTime(timezone=True), nullable=False)
|
||||
end_time = Column(DateTime(timezone=True), nullable=False)
|
||||
|
||||
# Capacity metrics
|
||||
total_capacity_units = Column(Float, nullable=False) # Total available capacity
|
||||
allocated_capacity_units = Column(Float, nullable=False, default=0.0)
|
||||
remaining_capacity_units = Column(Float, nullable=False)
|
||||
|
||||
# Status
|
||||
is_available = Column(Boolean, default=True)
|
||||
is_maintenance = Column(Boolean, default=False)
|
||||
is_reserved = Column(Boolean, default=False)
|
||||
|
||||
# Equipment specific
|
||||
equipment_type = Column(String(100), nullable=True)
|
||||
max_batch_size = Column(Float, nullable=True)
|
||||
min_batch_size = Column(Float, nullable=True)
|
||||
setup_time_minutes = Column(Integer, nullable=True)
|
||||
cleanup_time_minutes = Column(Integer, nullable=True)
|
||||
|
||||
# Performance tracking
|
||||
efficiency_rating = Column(Float, nullable=True)
|
||||
maintenance_status = Column(String(50), nullable=True)
|
||||
last_maintenance_date = Column(DateTime(timezone=True), nullable=True)
|
||||
|
||||
# Notes
|
||||
notes = Column(Text, nullable=True)
|
||||
restrictions = Column(JSON, nullable=True) # Product type restrictions, etc.
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"resource_type": self.resource_type,
|
||||
"resource_id": self.resource_id,
|
||||
"resource_name": self.resource_name,
|
||||
"date": self.date.isoformat() if self.date else None,
|
||||
"start_time": self.start_time.isoformat() if self.start_time else None,
|
||||
"end_time": self.end_time.isoformat() if self.end_time else None,
|
||||
"total_capacity_units": self.total_capacity_units,
|
||||
"allocated_capacity_units": self.allocated_capacity_units,
|
||||
"remaining_capacity_units": self.remaining_capacity_units,
|
||||
"is_available": self.is_available,
|
||||
"is_maintenance": self.is_maintenance,
|
||||
"is_reserved": self.is_reserved,
|
||||
"equipment_type": self.equipment_type,
|
||||
"max_batch_size": self.max_batch_size,
|
||||
"min_batch_size": self.min_batch_size,
|
||||
"setup_time_minutes": self.setup_time_minutes,
|
||||
"cleanup_time_minutes": self.cleanup_time_minutes,
|
||||
"efficiency_rating": self.efficiency_rating,
|
||||
"maintenance_status": self.maintenance_status,
|
||||
"last_maintenance_date": self.last_maintenance_date.isoformat() if self.last_maintenance_date else None,
|
||||
"notes": self.notes,
|
||||
"restrictions": self.restrictions,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
|
||||
class QualityCheckTemplate(Base):
|
||||
"""Quality check templates for tenant-specific quality standards"""
|
||||
__tablename__ = "quality_check_templates"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
|
||||
# Template identification
|
||||
name = Column(String(255), nullable=False)
|
||||
template_code = Column(String(100), nullable=True, index=True)
|
||||
check_type = Column(String(50), nullable=False) # visual, measurement, temperature, weight, boolean
|
||||
category = Column(String(100), nullable=True) # appearance, structure, texture, etc.
|
||||
|
||||
# Template configuration
|
||||
description = Column(Text, nullable=True)
|
||||
instructions = Column(Text, nullable=True)
|
||||
parameters = Column(JSON, nullable=True) # Dynamic check parameters
|
||||
thresholds = Column(JSON, nullable=True) # Pass/fail criteria
|
||||
scoring_criteria = Column(JSON, nullable=True) # Scoring methodology
|
||||
|
||||
# Configurability settings
|
||||
is_active = Column(Boolean, default=True)
|
||||
is_required = Column(Boolean, default=False)
|
||||
is_critical = Column(Boolean, default=False) # Critical failures block production
|
||||
weight = Column(Float, default=1.0) # Weight in overall quality score
|
||||
|
||||
# Measurement specifications
|
||||
min_value = Column(Float, nullable=True)
|
||||
max_value = Column(Float, nullable=True)
|
||||
target_value = Column(Float, nullable=True)
|
||||
unit = Column(String(20), nullable=True)
|
||||
tolerance_percentage = Column(Float, nullable=True)
|
||||
|
||||
# Process stage applicability
|
||||
applicable_stages = Column(JSON, nullable=True) # List of ProcessStage values
|
||||
|
||||
# Metadata
|
||||
created_by = Column(UUID(as_uuid=True), nullable=False)
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"name": self.name,
|
||||
"template_code": self.template_code,
|
||||
"check_type": self.check_type,
|
||||
"category": self.category,
|
||||
"description": self.description,
|
||||
"instructions": self.instructions,
|
||||
"parameters": self.parameters,
|
||||
"thresholds": self.thresholds,
|
||||
"scoring_criteria": self.scoring_criteria,
|
||||
"is_active": self.is_active,
|
||||
"is_required": self.is_required,
|
||||
"is_critical": self.is_critical,
|
||||
"weight": self.weight,
|
||||
"min_value": self.min_value,
|
||||
"max_value": self.max_value,
|
||||
"target_value": self.target_value,
|
||||
"unit": self.unit,
|
||||
"tolerance_percentage": self.tolerance_percentage,
|
||||
"applicable_stages": self.applicable_stages,
|
||||
"created_by": str(self.created_by),
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
|
||||
class QualityCheck(Base):
|
||||
"""Quality check model for tracking production quality metrics with stage support"""
|
||||
__tablename__ = "quality_checks"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
batch_id = Column(UUID(as_uuid=True), nullable=False, index=True) # FK to ProductionBatch
|
||||
template_id = Column(UUID(as_uuid=True), nullable=True, index=True) # FK to QualityCheckTemplate
|
||||
|
||||
# Check information
|
||||
check_type = Column(String(50), nullable=False) # visual, weight, temperature, etc.
|
||||
process_stage = Column(SQLEnum(ProcessStage), nullable=True, index=True) # Stage when check was performed
|
||||
check_time = Column(DateTime(timezone=True), nullable=False)
|
||||
checker_id = Column(String(100), nullable=True) # Staff member who performed check
|
||||
|
||||
# Quality metrics
|
||||
quality_score = Column(Float, nullable=False) # 1-10 scale
|
||||
pass_fail = Column(Boolean, nullable=False)
|
||||
defect_count = Column(Integer, nullable=False, default=0)
|
||||
defect_types = Column(JSON, nullable=True) # List of defect categories
|
||||
|
||||
# Measurements
|
||||
measured_weight = Column(Float, nullable=True)
|
||||
measured_temperature = Column(Float, nullable=True)
|
||||
measured_moisture = Column(Float, nullable=True)
|
||||
measured_dimensions = Column(JSON, nullable=True)
|
||||
stage_specific_data = Column(JSON, nullable=True) # Stage-specific measurements
|
||||
|
||||
# Standards comparison
|
||||
target_weight = Column(Float, nullable=True)
|
||||
target_temperature = Column(Float, nullable=True)
|
||||
target_moisture = Column(Float, nullable=True)
|
||||
tolerance_percentage = Column(Float, nullable=True)
|
||||
|
||||
# Results
|
||||
within_tolerance = Column(Boolean, nullable=True)
|
||||
corrective_action_needed = Column(Boolean, default=False)
|
||||
corrective_actions = Column(JSON, nullable=True)
|
||||
|
||||
# Template-based results
|
||||
template_results = Column(JSON, nullable=True) # Results from template-based checks
|
||||
criteria_scores = Column(JSON, nullable=True) # Individual criteria scores
|
||||
|
||||
# Notes and documentation
|
||||
check_notes = Column(Text, nullable=True)
|
||||
photos_urls = Column(JSON, nullable=True) # URLs to quality check photos
|
||||
certificate_url = Column(String(500), nullable=True)
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"batch_id": str(self.batch_id),
|
||||
"check_type": self.check_type,
|
||||
"check_time": self.check_time.isoformat() if self.check_time else None,
|
||||
"checker_id": self.checker_id,
|
||||
"quality_score": self.quality_score,
|
||||
"pass_fail": self.pass_fail,
|
||||
"defect_count": self.defect_count,
|
||||
"defect_types": self.defect_types,
|
||||
"measured_weight": self.measured_weight,
|
||||
"measured_temperature": self.measured_temperature,
|
||||
"measured_moisture": self.measured_moisture,
|
||||
"measured_dimensions": self.measured_dimensions,
|
||||
"target_weight": self.target_weight,
|
||||
"target_temperature": self.target_temperature,
|
||||
"target_moisture": self.target_moisture,
|
||||
"tolerance_percentage": self.tolerance_percentage,
|
||||
"within_tolerance": self.within_tolerance,
|
||||
"corrective_action_needed": self.corrective_action_needed,
|
||||
"corrective_actions": self.corrective_actions,
|
||||
"check_notes": self.check_notes,
|
||||
"photos_urls": self.photos_urls,
|
||||
"certificate_url": self.certificate_url,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
|
||||
class IoTProtocol(str, enum.Enum):
|
||||
"""IoT protocol enumeration"""
|
||||
REST_API = "rest_api"
|
||||
OPC_UA = "opc_ua"
|
||||
MQTT = "mqtt"
|
||||
MODBUS = "modbus"
|
||||
CUSTOM = "custom"
|
||||
|
||||
|
||||
class IoTConnectionStatus(str, enum.Enum):
|
||||
"""IoT connection status enumeration"""
|
||||
CONNECTED = "connected"
|
||||
DISCONNECTED = "disconnected"
|
||||
ERROR = "error"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
class Equipment(Base):
|
||||
"""Equipment model for tracking production equipment"""
|
||||
__tablename__ = "equipment"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
|
||||
# Equipment identification
|
||||
name = Column(String(255), nullable=False)
|
||||
type = Column(SQLEnum(EquipmentType), nullable=False)
|
||||
model = Column(String(100), nullable=True)
|
||||
serial_number = Column(String(100), nullable=True)
|
||||
location = Column(String(255), nullable=True)
|
||||
manufacturer = Column(String(100), nullable=True)
|
||||
firmware_version = Column(String(50), nullable=True)
|
||||
|
||||
# Status tracking
|
||||
status = Column(SQLEnum(EquipmentStatus), nullable=False, default=EquipmentStatus.OPERATIONAL)
|
||||
|
||||
# Dates
|
||||
install_date = Column(DateTime(timezone=True), nullable=True)
|
||||
last_maintenance_date = Column(DateTime(timezone=True), nullable=True)
|
||||
next_maintenance_date = Column(DateTime(timezone=True), nullable=True)
|
||||
maintenance_interval_days = Column(Integer, nullable=True) # Maintenance interval in days
|
||||
|
||||
# Performance metrics
|
||||
efficiency_percentage = Column(Float, nullable=True) # Current efficiency
|
||||
uptime_percentage = Column(Float, nullable=True) # Overall equipment effectiveness
|
||||
energy_usage_kwh = Column(Float, nullable=True) # Current energy usage
|
||||
|
||||
# Specifications
|
||||
power_kw = Column(Float, nullable=True) # Power in kilowatts
|
||||
capacity = Column(Float, nullable=True) # Capacity (units depend on equipment type)
|
||||
weight_kg = Column(Float, nullable=True) # Weight in kilograms
|
||||
|
||||
# Temperature monitoring
|
||||
current_temperature = Column(Float, nullable=True) # Current temperature reading
|
||||
target_temperature = Column(Float, nullable=True) # Target temperature
|
||||
|
||||
# IoT Connectivity
|
||||
iot_enabled = Column(Boolean, default=False, nullable=False)
|
||||
iot_protocol = Column(String(50), nullable=True) # rest_api, opc_ua, mqtt, modbus, custom
|
||||
iot_endpoint = Column(String(500), nullable=True) # URL or IP address
|
||||
iot_port = Column(Integer, nullable=True) # Connection port
|
||||
iot_credentials = Column(JSON, nullable=True) # Encrypted credentials (API keys, tokens, username/password)
|
||||
iot_connection_status = Column(String(50), nullable=True) # connected, disconnected, error, unknown
|
||||
iot_last_connected = Column(DateTime(timezone=True), nullable=True)
|
||||
iot_config = Column(JSON, nullable=True) # Additional configuration (polling interval, specific endpoints, etc.)
|
||||
|
||||
# Real-time monitoring
|
||||
supports_realtime = Column(Boolean, default=False, nullable=False)
|
||||
poll_interval_seconds = Column(Integer, nullable=True) # How often to poll for data
|
||||
|
||||
# Sensor capabilities
|
||||
temperature_zones = Column(Integer, nullable=True) # Number of temperature zones
|
||||
supports_humidity = Column(Boolean, default=False, nullable=False)
|
||||
supports_energy_monitoring = Column(Boolean, default=False, nullable=False)
|
||||
supports_remote_control = Column(Boolean, default=False, nullable=False)
|
||||
|
||||
# Status
|
||||
is_active = Column(Boolean, default=True)
|
||||
|
||||
# Notes
|
||||
notes = Column(Text, nullable=True)
|
||||
|
||||
# Support contact information
|
||||
support_contact = Column(JSON, nullable=True)
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"name": self.name,
|
||||
"type": self.type.value if self.type else None,
|
||||
"model": self.model,
|
||||
"serial_number": self.serial_number,
|
||||
"location": self.location,
|
||||
"manufacturer": self.manufacturer,
|
||||
"firmware_version": self.firmware_version,
|
||||
"status": self.status.value if self.status else None,
|
||||
"install_date": self.install_date.isoformat() if self.install_date else None,
|
||||
"last_maintenance_date": self.last_maintenance_date.isoformat() if self.last_maintenance_date else None,
|
||||
"next_maintenance_date": self.next_maintenance_date.isoformat() if self.next_maintenance_date else None,
|
||||
"maintenance_interval_days": self.maintenance_interval_days,
|
||||
"efficiency_percentage": self.efficiency_percentage,
|
||||
"uptime_percentage": self.uptime_percentage,
|
||||
"energy_usage_kwh": self.energy_usage_kwh,
|
||||
"power_kw": self.power_kw,
|
||||
"capacity": self.capacity,
|
||||
"weight_kg": self.weight_kg,
|
||||
"current_temperature": self.current_temperature,
|
||||
"target_temperature": self.target_temperature,
|
||||
"iot_enabled": self.iot_enabled,
|
||||
"iot_protocol": self.iot_protocol,
|
||||
"iot_endpoint": self.iot_endpoint,
|
||||
"iot_port": self.iot_port,
|
||||
"iot_connection_status": self.iot_connection_status,
|
||||
"iot_last_connected": self.iot_last_connected.isoformat() if self.iot_last_connected else None,
|
||||
"iot_config": self.iot_config,
|
||||
"supports_realtime": self.supports_realtime,
|
||||
"poll_interval_seconds": self.poll_interval_seconds,
|
||||
"temperature_zones": self.temperature_zones,
|
||||
"supports_humidity": self.supports_humidity,
|
||||
"supports_energy_monitoring": self.supports_energy_monitoring,
|
||||
"supports_remote_control": self.supports_remote_control,
|
||||
"is_active": self.is_active,
|
||||
"notes": self.notes,
|
||||
"support_contact": self.support_contact,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
|
||||
class EquipmentSensorReading(Base):
|
||||
"""Equipment sensor reading model for time-series IoT data"""
|
||||
__tablename__ = "equipment_sensor_readings"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
equipment_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
batch_id = Column(UUID(as_uuid=True), nullable=True, index=True)
|
||||
|
||||
# Timestamp
|
||||
reading_time = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
|
||||
# Temperature readings (support multiple zones)
|
||||
temperature = Column(Float, nullable=True)
|
||||
temperature_zones = Column(JSON, nullable=True) # {"zone1": 180, "zone2": 200, "zone3": 185}
|
||||
target_temperature = Column(Float, nullable=True)
|
||||
|
||||
# Humidity
|
||||
humidity = Column(Float, nullable=True)
|
||||
target_humidity = Column(Float, nullable=True)
|
||||
|
||||
# Energy monitoring
|
||||
energy_consumption_kwh = Column(Float, nullable=True)
|
||||
power_current_kw = Column(Float, nullable=True)
|
||||
|
||||
# Equipment status
|
||||
operational_status = Column(String(50), nullable=True) # running, idle, warming_up, cooling_down
|
||||
cycle_stage = Column(String(100), nullable=True) # preheating, baking, cooling
|
||||
cycle_progress_percentage = Column(Float, nullable=True)
|
||||
time_remaining_minutes = Column(Integer, nullable=True)
|
||||
|
||||
# Process parameters
|
||||
motor_speed_rpm = Column(Float, nullable=True)
|
||||
door_status = Column(String(20), nullable=True) # open, closed
|
||||
steam_level = Column(Float, nullable=True)
|
||||
|
||||
# Quality indicators
|
||||
product_weight_kg = Column(Float, nullable=True)
|
||||
moisture_content = Column(Float, nullable=True)
|
||||
|
||||
# Additional sensor data (flexible JSON for manufacturer-specific metrics)
|
||||
additional_sensors = Column(JSON, nullable=True)
|
||||
|
||||
# Data quality
|
||||
data_quality_score = Column(Float, nullable=True)
|
||||
is_anomaly = Column(Boolean, default=False, nullable=False)
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"equipment_id": str(self.equipment_id),
|
||||
"batch_id": str(self.batch_id) if self.batch_id else None,
|
||||
"reading_time": self.reading_time.isoformat() if self.reading_time else None,
|
||||
"temperature": self.temperature,
|
||||
"temperature_zones": self.temperature_zones,
|
||||
"target_temperature": self.target_temperature,
|
||||
"humidity": self.humidity,
|
||||
"target_humidity": self.target_humidity,
|
||||
"energy_consumption_kwh": self.energy_consumption_kwh,
|
||||
"power_current_kw": self.power_current_kw,
|
||||
"operational_status": self.operational_status,
|
||||
"cycle_stage": self.cycle_stage,
|
||||
"cycle_progress_percentage": self.cycle_progress_percentage,
|
||||
"time_remaining_minutes": self.time_remaining_minutes,
|
||||
"motor_speed_rpm": self.motor_speed_rpm,
|
||||
"door_status": self.door_status,
|
||||
"steam_level": self.steam_level,
|
||||
"product_weight_kg": self.product_weight_kg,
|
||||
"moisture_content": self.moisture_content,
|
||||
"additional_sensors": self.additional_sensors,
|
||||
"data_quality_score": self.data_quality_score,
|
||||
"is_anomaly": self.is_anomaly,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
}
|
||||
|
||||
|
||||
class EquipmentConnectionLog(Base):
|
||||
"""Equipment connection log for tracking IoT connectivity"""
|
||||
__tablename__ = "equipment_connection_logs"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
equipment_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
|
||||
# Connection event
|
||||
event_type = Column(String(50), nullable=False) # connected, disconnected, error, timeout
|
||||
event_time = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
|
||||
# Connection details
|
||||
connection_status = Column(String(50), nullable=False)
|
||||
protocol_used = Column(String(50), nullable=True)
|
||||
endpoint = Column(String(500), nullable=True)
|
||||
|
||||
# Error tracking
|
||||
error_message = Column(Text, nullable=True)
|
||||
error_code = Column(String(50), nullable=True)
|
||||
|
||||
# Performance metrics
|
||||
response_time_ms = Column(Integer, nullable=True)
|
||||
data_points_received = Column(Integer, nullable=True)
|
||||
|
||||
# Additional details
|
||||
additional_data = Column(JSON, nullable=True)
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"equipment_id": str(self.equipment_id),
|
||||
"event_type": self.event_type,
|
||||
"event_time": self.event_time.isoformat() if self.event_time else None,
|
||||
"connection_status": self.connection_status,
|
||||
"protocol_used": self.protocol_used,
|
||||
"endpoint": self.endpoint,
|
||||
"error_message": self.error_message,
|
||||
"error_code": self.error_code,
|
||||
"response_time_ms": self.response_time_ms,
|
||||
"data_points_received": self.data_points_received,
|
||||
"additional_data": self.additional_data,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
}
|
||||
|
||||
|
||||
class EquipmentIoTAlert(Base):
|
||||
"""Equipment IoT alert model for real-time equipment alerts"""
|
||||
__tablename__ = "equipment_iot_alerts"
|
||||
|
||||
# Primary identification
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
equipment_id = Column(UUID(as_uuid=True), nullable=False, index=True)
|
||||
batch_id = Column(UUID(as_uuid=True), nullable=True, index=True)
|
||||
|
||||
# Alert information
|
||||
alert_type = Column(String(50), nullable=False) # temperature_deviation, connection_lost, equipment_error
|
||||
severity = Column(String(20), nullable=False) # info, warning, critical
|
||||
alert_time = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
|
||||
# Alert details
|
||||
title = Column(String(255), nullable=False)
|
||||
message = Column(Text, nullable=False)
|
||||
sensor_reading_id = Column(UUID(as_uuid=True), nullable=True)
|
||||
|
||||
# Threshold information
|
||||
threshold_value = Column(Float, nullable=True)
|
||||
actual_value = Column(Float, nullable=True)
|
||||
deviation_percentage = Column(Float, nullable=True)
|
||||
|
||||
# Status tracking
|
||||
is_active = Column(Boolean, default=True, nullable=False)
|
||||
is_acknowledged = Column(Boolean, default=False, nullable=False)
|
||||
acknowledged_by = Column(UUID(as_uuid=True), nullable=True)
|
||||
acknowledged_at = Column(DateTime(timezone=True), nullable=True)
|
||||
|
||||
is_resolved = Column(Boolean, default=False, nullable=False)
|
||||
resolved_by = Column(UUID(as_uuid=True), nullable=True)
|
||||
resolved_at = Column(DateTime(timezone=True), nullable=True)
|
||||
resolution_notes = Column(Text, nullable=True)
|
||||
|
||||
# Automated response
|
||||
auto_resolved = Column(Boolean, default=False, nullable=False)
|
||||
corrective_action_taken = Column(String(255), nullable=True)
|
||||
|
||||
# Additional data
|
||||
additional_data = Column(JSON, nullable=True)
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary following shared pattern"""
|
||||
return {
|
||||
"id": str(self.id),
|
||||
"tenant_id": str(self.tenant_id),
|
||||
"equipment_id": str(self.equipment_id),
|
||||
"batch_id": str(self.batch_id) if self.batch_id else None,
|
||||
"alert_type": self.alert_type,
|
||||
"severity": self.severity,
|
||||
"alert_time": self.alert_time.isoformat() if self.alert_time else None,
|
||||
"title": self.title,
|
||||
"message": self.message,
|
||||
"sensor_reading_id": str(self.sensor_reading_id) if self.sensor_reading_id else None,
|
||||
"threshold_value": self.threshold_value,
|
||||
"actual_value": self.actual_value,
|
||||
"deviation_percentage": self.deviation_percentage,
|
||||
"is_active": self.is_active,
|
||||
"is_acknowledged": self.is_acknowledged,
|
||||
"acknowledged_by": str(self.acknowledged_by) if self.acknowledged_by else None,
|
||||
"acknowledged_at": self.acknowledged_at.isoformat() if self.acknowledged_at else None,
|
||||
"is_resolved": self.is_resolved,
|
||||
"resolved_by": str(self.resolved_by) if self.resolved_by else None,
|
||||
"resolved_at": self.resolved_at.isoformat() if self.resolved_at else None,
|
||||
"resolution_notes": self.resolution_notes,
|
||||
"auto_resolved": self.auto_resolved,
|
||||
"corrective_action_taken": self.corrective_action_taken,
|
||||
"additional_data": self.additional_data,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
|
||||
20
services/production/app/repositories/__init__.py
Normal file
20
services/production/app/repositories/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# ================================================================
|
||||
# services/production/app/repositories/__init__.py
|
||||
# ================================================================
|
||||
"""
|
||||
Repository layer for data access
|
||||
"""
|
||||
|
||||
from .production_batch_repository import ProductionBatchRepository
|
||||
from .production_schedule_repository import ProductionScheduleRepository
|
||||
from .production_capacity_repository import ProductionCapacityRepository
|
||||
from .quality_check_repository import QualityCheckRepository
|
||||
from .equipment_repository import EquipmentRepository
|
||||
|
||||
__all__ = [
|
||||
"ProductionBatchRepository",
|
||||
"ProductionScheduleRepository",
|
||||
"ProductionCapacityRepository",
|
||||
"QualityCheckRepository",
|
||||
"EquipmentRepository",
|
||||
]
|
||||
217
services/production/app/repositories/base.py
Normal file
217
services/production/app/repositories/base.py
Normal file
@@ -0,0 +1,217 @@
|
||||
"""
|
||||
Base Repository for Production Service
|
||||
Service-specific repository base class with production utilities
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any, Type
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import text, and_, or_
|
||||
from datetime import datetime, date, timedelta
|
||||
import structlog
|
||||
|
||||
from shared.database.repository import BaseRepository
|
||||
from shared.database.exceptions import DatabaseError
|
||||
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionBaseRepository(BaseRepository):
|
||||
"""Base repository for production service with common production operations"""
|
||||
|
||||
def __init__(self, model: Type, session: AsyncSession, cache_ttl: Optional[int] = 300):
|
||||
# Production data is more dynamic, shorter cache time (5 minutes)
|
||||
super().__init__(model, session, cache_ttl)
|
||||
|
||||
async def get_by_tenant_id(self, tenant_id: str, skip: int = 0, limit: int = 100) -> List:
|
||||
"""Get records by tenant ID"""
|
||||
if hasattr(self.model, 'tenant_id'):
|
||||
return await self.get_multi(
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
filters={"tenant_id": tenant_id},
|
||||
order_by="created_at",
|
||||
order_desc=True
|
||||
)
|
||||
return await self.get_multi(skip=skip, limit=limit)
|
||||
|
||||
async def get_by_status(
|
||||
self,
|
||||
tenant_id: str,
|
||||
status: str,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> List:
|
||||
"""Get records by tenant and status"""
|
||||
if hasattr(self.model, 'status'):
|
||||
return await self.get_multi(
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"status": status
|
||||
},
|
||||
order_by="created_at",
|
||||
order_desc=True
|
||||
)
|
||||
return await self.get_by_tenant_id(tenant_id, skip, limit)
|
||||
|
||||
async def get_by_date_range(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date,
|
||||
date_field: str = "created_at",
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> List:
|
||||
"""Get records by tenant and date range"""
|
||||
try:
|
||||
start_datetime = datetime.combine(start_date, datetime.min.time())
|
||||
end_datetime = datetime.combine(end_date, datetime.max.time())
|
||||
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
f"{date_field}__gte": start_datetime,
|
||||
f"{date_field}__lte": end_datetime
|
||||
}
|
||||
|
||||
return await self.get_multi(
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
filters=filters,
|
||||
order_by=date_field,
|
||||
order_desc=True
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Error fetching records by date range",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
raise DatabaseError(f"Failed to fetch records by date range: {str(e)}")
|
||||
|
||||
async def get_active_records(
|
||||
self,
|
||||
tenant_id: str,
|
||||
active_field: str = "is_active",
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> List:
|
||||
"""Get active records for a tenant"""
|
||||
if hasattr(self.model, active_field):
|
||||
return await self.get_multi(
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
active_field: True
|
||||
},
|
||||
order_by="created_at",
|
||||
order_desc=True
|
||||
)
|
||||
return await self.get_by_tenant_id(tenant_id, skip, limit)
|
||||
|
||||
def _validate_production_data(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
required_fields: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
"""Validate production data with required fields"""
|
||||
errors = []
|
||||
|
||||
# Check required fields
|
||||
for field in required_fields:
|
||||
if field not in data or data[field] is None:
|
||||
errors.append(f"Missing required field: {field}")
|
||||
|
||||
# Validate tenant_id format
|
||||
if "tenant_id" in data:
|
||||
try:
|
||||
import uuid
|
||||
uuid.UUID(str(data["tenant_id"]))
|
||||
except (ValueError, TypeError):
|
||||
errors.append("Invalid tenant_id format")
|
||||
|
||||
# Validate datetime fields
|
||||
datetime_fields = ["planned_start_time", "planned_end_time", "actual_start_time", "actual_end_time"]
|
||||
for field in datetime_fields:
|
||||
if field in data and data[field] is not None:
|
||||
if not isinstance(data[field], (datetime, str)):
|
||||
errors.append(f"Invalid datetime format for {field}")
|
||||
|
||||
# Validate numeric fields
|
||||
numeric_fields = ["planned_quantity", "actual_quantity", "quality_score", "yield_percentage"]
|
||||
for field in numeric_fields:
|
||||
if field in data and data[field] is not None:
|
||||
try:
|
||||
float(data[field])
|
||||
if data[field] < 0:
|
||||
errors.append(f"{field} cannot be negative")
|
||||
except (ValueError, TypeError):
|
||||
errors.append(f"Invalid numeric value for {field}")
|
||||
|
||||
# Validate percentage fields (0-100)
|
||||
percentage_fields = ["yield_percentage", "efficiency_percentage", "utilization_percentage"]
|
||||
for field in percentage_fields:
|
||||
if field in data and data[field] is not None:
|
||||
try:
|
||||
value = float(data[field])
|
||||
if value < 0 or value > 100:
|
||||
errors.append(f"{field} must be between 0 and 100")
|
||||
except (ValueError, TypeError):
|
||||
pass # Already caught by numeric validation
|
||||
|
||||
return {
|
||||
"is_valid": len(errors) == 0,
|
||||
"errors": errors
|
||||
}
|
||||
|
||||
async def get_production_statistics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Dict[str, Any]:
|
||||
"""Get production statistics for a tenant and date range"""
|
||||
try:
|
||||
# Base query for the model
|
||||
start_datetime = datetime.combine(start_date, datetime.min.time())
|
||||
end_datetime = datetime.combine(end_date, datetime.max.time())
|
||||
|
||||
# This would need to be implemented per specific model
|
||||
# For now, return basic count
|
||||
records = await self.get_by_date_range(
|
||||
tenant_id, start_date, end_date, limit=1000
|
||||
)
|
||||
|
||||
return {
|
||||
"total_records": len(records),
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating production statistics",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
raise DatabaseError(f"Failed to calculate statistics: {str(e)}")
|
||||
|
||||
async def check_duplicate(
|
||||
self,
|
||||
tenant_id: str,
|
||||
unique_fields: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Check if a record with the same unique fields exists"""
|
||||
try:
|
||||
filters = {"tenant_id": tenant_id}
|
||||
filters.update(unique_fields)
|
||||
|
||||
existing = await self.get_multi(
|
||||
filters=filters,
|
||||
limit=1
|
||||
)
|
||||
|
||||
return len(existing) > 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking for duplicates",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return False
|
||||
386
services/production/app/repositories/equipment_repository.py
Normal file
386
services/production/app/repositories/equipment_repository.py
Normal file
@@ -0,0 +1,386 @@
|
||||
"""
|
||||
Equipment Repository
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy import select, func, and_, text
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from app.repositories.base import ProductionBaseRepository
|
||||
from app.models.production import Equipment, EquipmentStatus, EquipmentType
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class EquipmentRepository(ProductionBaseRepository):
|
||||
"""Repository for equipment operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession):
|
||||
super().__init__(Equipment, session)
|
||||
|
||||
async def get_equipment_filtered(
|
||||
self,
|
||||
filters: Dict[str, Any],
|
||||
page: int = 1,
|
||||
page_size: int = 50
|
||||
) -> List[Equipment]:
|
||||
"""Get equipment list with filters and pagination"""
|
||||
try:
|
||||
# Build base query
|
||||
query = select(Equipment).filter(Equipment.tenant_id == UUID(filters.get("tenant_id")))
|
||||
|
||||
# Apply status filter
|
||||
if "status" in filters and filters["status"]:
|
||||
query = query.filter(Equipment.status == filters["status"])
|
||||
|
||||
# Apply type filter
|
||||
if "type" in filters and filters["type"]:
|
||||
query = query.filter(Equipment.type == filters["type"])
|
||||
|
||||
# Apply active filter
|
||||
if "is_active" in filters and filters["is_active"] is not None:
|
||||
query = query.filter(Equipment.is_active == filters["is_active"])
|
||||
|
||||
# Apply pagination
|
||||
query = query.order_by(Equipment.created_at.desc())
|
||||
query = query.offset((page - 1) * page_size).limit(page_size)
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return list(result.scalars().all())
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting filtered equipment", error=str(e), filters=filters)
|
||||
raise
|
||||
|
||||
async def count_equipment_filtered(self, filters: Dict[str, Any]) -> int:
|
||||
"""Count equipment matching filters"""
|
||||
try:
|
||||
# Build base query
|
||||
query = select(func.count(Equipment.id)).filter(
|
||||
Equipment.tenant_id == UUID(filters.get("tenant_id"))
|
||||
)
|
||||
|
||||
# Apply status filter
|
||||
if "status" in filters and filters["status"]:
|
||||
query = query.filter(Equipment.status == filters["status"])
|
||||
|
||||
# Apply type filter
|
||||
if "type" in filters and filters["type"]:
|
||||
query = query.filter(Equipment.type == filters["type"])
|
||||
|
||||
# Apply active filter
|
||||
if "is_active" in filters and filters["is_active"] is not None:
|
||||
query = query.filter(Equipment.is_active == filters["is_active"])
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return result.scalar() or 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error counting filtered equipment", error=str(e), filters=filters)
|
||||
raise
|
||||
|
||||
async def get_equipment_by_id(self, tenant_id: UUID, equipment_id: UUID) -> Optional[Equipment]:
|
||||
"""Get equipment by ID and tenant"""
|
||||
try:
|
||||
query = select(Equipment).filter(
|
||||
and_(
|
||||
Equipment.id == equipment_id,
|
||||
Equipment.tenant_id == tenant_id
|
||||
)
|
||||
)
|
||||
result = await self.session.execute(query)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting equipment by ID",
|
||||
error=str(e),
|
||||
equipment_id=str(equipment_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def create_equipment(self, equipment_data: Dict[str, Any]) -> Equipment:
|
||||
"""Create new equipment"""
|
||||
try:
|
||||
equipment = Equipment(**equipment_data)
|
||||
self.session.add(equipment)
|
||||
await self.session.flush()
|
||||
await self.session.refresh(equipment)
|
||||
return equipment
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error creating equipment", error=str(e), data=equipment_data)
|
||||
raise
|
||||
|
||||
async def update_equipment(
|
||||
self,
|
||||
equipment_id: UUID,
|
||||
updates: Dict[str, Any]
|
||||
) -> Optional[Equipment]:
|
||||
"""Update equipment"""
|
||||
try:
|
||||
equipment = await self.get(equipment_id)
|
||||
if not equipment:
|
||||
return None
|
||||
|
||||
for key, value in updates.items():
|
||||
if hasattr(equipment, key) and value is not None:
|
||||
setattr(equipment, key, value)
|
||||
|
||||
await self.session.flush()
|
||||
await self.session.refresh(equipment)
|
||||
return equipment
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error updating equipment", error=str(e), equipment_id=str(equipment_id))
|
||||
raise
|
||||
|
||||
async def delete_equipment(self, equipment_id: UUID) -> bool:
|
||||
"""Soft delete equipment (set is_active to False)"""
|
||||
try:
|
||||
equipment = await self.get(equipment_id)
|
||||
if not equipment:
|
||||
return False
|
||||
|
||||
equipment.is_active = False
|
||||
await self.session.flush()
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error deleting equipment", error=str(e), equipment_id=str(equipment_id))
|
||||
raise
|
||||
|
||||
async def hard_delete_equipment(self, equipment_id: UUID) -> bool:
|
||||
"""Permanently delete equipment from database"""
|
||||
try:
|
||||
equipment = await self.get(equipment_id)
|
||||
if not equipment:
|
||||
return False
|
||||
|
||||
await self.session.delete(equipment)
|
||||
await self.session.flush()
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error hard deleting equipment", error=str(e), equipment_id=str(equipment_id))
|
||||
raise
|
||||
|
||||
async def get_equipment_deletion_summary(self, tenant_id: UUID, equipment_id: UUID) -> Dict[str, Any]:
|
||||
"""Get summary of what will be affected by deleting equipment"""
|
||||
try:
|
||||
equipment = await self.get_equipment_by_id(tenant_id, equipment_id)
|
||||
if not equipment:
|
||||
return {
|
||||
"can_delete": False,
|
||||
"warnings": ["Equipment not found"],
|
||||
"production_batches_count": 0,
|
||||
"maintenance_records_count": 0,
|
||||
"temperature_logs_count": 0
|
||||
}
|
||||
|
||||
# Check for related production batches
|
||||
from app.models.production import ProductionBatch
|
||||
batch_query = select(func.count(ProductionBatch.id)).filter(
|
||||
and_(
|
||||
ProductionBatch.tenant_id == tenant_id,
|
||||
ProductionBatch.equipment_id == equipment_id
|
||||
)
|
||||
)
|
||||
batch_result = await self.session.execute(batch_query)
|
||||
batches_count = batch_result.scalar() or 0
|
||||
|
||||
# For now, we'll use placeholder counts for maintenance and temperature logs
|
||||
# These would need to be implemented based on your actual models
|
||||
maintenance_count = 0
|
||||
temperature_logs_count = 0
|
||||
|
||||
warnings = []
|
||||
if batches_count > 0:
|
||||
warnings.append(f"{batches_count} production batch(es) are using this equipment")
|
||||
|
||||
# Equipment can be deleted even with dependencies, but warn the user
|
||||
can_delete = True
|
||||
|
||||
return {
|
||||
"can_delete": can_delete,
|
||||
"warnings": warnings,
|
||||
"production_batches_count": batches_count,
|
||||
"maintenance_records_count": maintenance_count,
|
||||
"temperature_logs_count": temperature_logs_count,
|
||||
"equipment_name": equipment.name,
|
||||
"equipment_type": equipment.type.value,
|
||||
"equipment_location": equipment.location
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting equipment deletion summary",
|
||||
error=str(e),
|
||||
equipment_id=str(equipment_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
# ================================================================
|
||||
# ALERT-RELATED METHODS (migrated from production_alert_repository)
|
||||
# ================================================================
|
||||
|
||||
async def get_equipment_status(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get equipment requiring attention.
|
||||
Returns equipment with maintenance due or status issues.
|
||||
"""
|
||||
try:
|
||||
|
||||
query = text("""
|
||||
SELECT
|
||||
e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days,
|
||||
EXTRACT(DAYS FROM (e.next_maintenance_date - NOW())) as days_to_maintenance,
|
||||
COUNT(ea.id) as active_alerts
|
||||
FROM equipment e
|
||||
LEFT JOIN alerts ea ON ea.equipment_id = e.id
|
||||
AND ea.is_active = true
|
||||
AND ea.is_resolved = false
|
||||
WHERE e.is_active = true
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days
|
||||
ORDER BY e.next_maintenance_date ASC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get equipment status", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_equipment_needing_maintenance(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get equipment that needs maintenance.
|
||||
Returns equipment where next_maintenance_date has passed.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to filter by
|
||||
"""
|
||||
try:
|
||||
|
||||
query_str = """
|
||||
SELECT
|
||||
e.id, e.name, e.type, e.tenant_id,
|
||||
e.last_maintenance_date,
|
||||
e.next_maintenance_date,
|
||||
EXTRACT(DAY FROM (NOW() - e.next_maintenance_date)) as days_overdue
|
||||
FROM equipment e
|
||||
WHERE e.next_maintenance_date IS NOT NULL
|
||||
AND e.next_maintenance_date < NOW()
|
||||
AND e.status = 'OPERATIONAL'
|
||||
AND e.is_active = true
|
||||
"""
|
||||
|
||||
params = {}
|
||||
if tenant_id:
|
||||
query_str += " AND e.tenant_id = :tenant_id"
|
||||
params["tenant_id"] = tenant_id
|
||||
|
||||
query_str += " ORDER BY e.next_maintenance_date ASC LIMIT 50"
|
||||
|
||||
result = await self.session.execute(text(query_str), params)
|
||||
rows = result.fetchall()
|
||||
|
||||
return [
|
||||
{
|
||||
'id': str(row.id),
|
||||
'name': row.name,
|
||||
'type': row.type,
|
||||
'tenant_id': str(row.tenant_id),
|
||||
'last_maintenance_date': row.last_maintenance_date.isoformat() if row.last_maintenance_date else None,
|
||||
'next_maintenance_date': row.next_maintenance_date.isoformat() if row.next_maintenance_date else None,
|
||||
'days_overdue': int(row.days_overdue) if row.days_overdue else 0
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get equipment needing maintenance", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_efficiency_recommendations(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production efficiency improvement recommendations.
|
||||
Analyzes production patterns to identify optimization opportunities.
|
||||
"""
|
||||
try:
|
||||
|
||||
query = text("""
|
||||
WITH efficiency_analysis AS (
|
||||
SELECT
|
||||
pb.tenant_id, pb.product_name,
|
||||
AVG(EXTRACT(EPOCH FROM (pb.actual_end_time - pb.actual_start_time)) / 60) as avg_production_time,
|
||||
AVG(pb.planned_duration_minutes) as avg_planned_duration,
|
||||
COUNT(*) as batch_count,
|
||||
AVG(pb.yield_percentage) as avg_yield,
|
||||
EXTRACT(hour FROM pb.actual_start_time) as start_hour
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'COMPLETED'
|
||||
AND pb.actual_completion_time > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND pb.tenant_id = :tenant_id
|
||||
GROUP BY pb.tenant_id, pb.product_name, EXTRACT(hour FROM pb.actual_start_time)
|
||||
HAVING COUNT(*) >= 3
|
||||
),
|
||||
recommendations AS (
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN avg_production_time > avg_planned_duration * 1.2 THEN 'reduce_production_time'
|
||||
WHEN avg_yield < 85 THEN 'improve_yield'
|
||||
WHEN start_hour BETWEEN 14 AND 16 AND avg_production_time > avg_planned_duration * 1.1 THEN 'avoid_afternoon_production'
|
||||
ELSE null
|
||||
END as recommendation_type,
|
||||
(avg_production_time - avg_planned_duration) / avg_planned_duration * 100 as efficiency_loss_percent
|
||||
FROM efficiency_analysis
|
||||
)
|
||||
SELECT * FROM recommendations
|
||||
WHERE recommendation_type IS NOT NULL
|
||||
AND efficiency_loss_percent > 10
|
||||
ORDER BY efficiency_loss_percent DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get efficiency recommendations", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_energy_consumption_patterns(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get energy consumption patterns for optimization analysis.
|
||||
Returns consumption by equipment and hour of day.
|
||||
"""
|
||||
try:
|
||||
|
||||
query = text("""
|
||||
SELECT
|
||||
e.tenant_id, e.name as equipment_name, e.type,
|
||||
AVG(ec.energy_consumption_kwh) as avg_energy,
|
||||
EXTRACT(hour FROM ec.recorded_at) as hour_of_day,
|
||||
COUNT(*) as readings_count
|
||||
FROM equipment e
|
||||
JOIN energy_consumption ec ON ec.equipment_id = e.id
|
||||
WHERE ec.recorded_at > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.tenant_id, e.id, e.name, e.type, EXTRACT(hour FROM ec.recorded_at)
|
||||
HAVING COUNT(*) >= 10
|
||||
ORDER BY avg_energy DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get energy consumption patterns", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
1011
services/production/app/repositories/production_batch_repository.py
Normal file
1011
services/production/app/repositories/production_batch_repository.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,402 @@
|
||||
"""
|
||||
Production Capacity Repository
|
||||
Repository for production capacity operations
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, and_, text, desc, func
|
||||
from datetime import datetime, timedelta, date
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from .base import ProductionBaseRepository
|
||||
from app.models.production import ProductionCapacity
|
||||
from shared.database.exceptions import DatabaseError, ValidationError
|
||||
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionCapacityRepository(ProductionBaseRepository):
|
||||
"""Repository for production capacity operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession, cache_ttl: Optional[int] = 600):
|
||||
# Capacity data changes moderately, medium cache time (10 minutes)
|
||||
super().__init__(ProductionCapacity, session, cache_ttl)
|
||||
|
||||
async def create_capacity(self, capacity_data: Dict[str, Any]) -> ProductionCapacity:
|
||||
"""Create a new production capacity entry with validation"""
|
||||
try:
|
||||
# Validate capacity data
|
||||
validation_result = self._validate_production_data(
|
||||
capacity_data,
|
||||
["tenant_id", "resource_type", "resource_id", "resource_name",
|
||||
"date", "start_time", "end_time", "total_capacity_units"]
|
||||
)
|
||||
|
||||
if not validation_result["is_valid"]:
|
||||
raise ValidationError(f"Invalid capacity data: {validation_result['errors']}")
|
||||
|
||||
# Set default values
|
||||
if "allocated_capacity_units" not in capacity_data:
|
||||
capacity_data["allocated_capacity_units"] = 0.0
|
||||
if "remaining_capacity_units" not in capacity_data:
|
||||
capacity_data["remaining_capacity_units"] = capacity_data["total_capacity_units"]
|
||||
if "is_available" not in capacity_data:
|
||||
capacity_data["is_available"] = True
|
||||
if "is_maintenance" not in capacity_data:
|
||||
capacity_data["is_maintenance"] = False
|
||||
if "is_reserved" not in capacity_data:
|
||||
capacity_data["is_reserved"] = False
|
||||
|
||||
# Create capacity entry
|
||||
capacity = await self.create(capacity_data)
|
||||
|
||||
logger.info("Production capacity created successfully",
|
||||
capacity_id=str(capacity.id),
|
||||
resource_type=capacity.resource_type,
|
||||
resource_id=capacity.resource_id,
|
||||
tenant_id=str(capacity.tenant_id))
|
||||
|
||||
return capacity
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error creating production capacity", error=str(e))
|
||||
raise DatabaseError(f"Failed to create production capacity: {str(e)}")
|
||||
|
||||
async def get_capacity_by_resource(
|
||||
self,
|
||||
tenant_id: str,
|
||||
resource_id: str,
|
||||
date_filter: Optional[date] = None
|
||||
) -> List[ProductionCapacity]:
|
||||
"""Get capacity entries for a specific resource"""
|
||||
try:
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
"resource_id": resource_id
|
||||
}
|
||||
|
||||
if date_filter:
|
||||
filters["date"] = date_filter
|
||||
|
||||
capacities = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="start_time"
|
||||
)
|
||||
|
||||
logger.info("Retrieved capacity by resource",
|
||||
count=len(capacities),
|
||||
resource_id=resource_id,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return capacities
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching capacity by resource", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch capacity by resource: {str(e)}")
|
||||
|
||||
async def get_available_capacity(
|
||||
self,
|
||||
tenant_id: str,
|
||||
resource_type: str,
|
||||
target_date: date,
|
||||
required_capacity: float
|
||||
) -> List[ProductionCapacity]:
|
||||
"""Get available capacity for a specific date and capacity requirement"""
|
||||
try:
|
||||
capacities = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"resource_type": resource_type,
|
||||
"date": target_date,
|
||||
"is_available": True,
|
||||
"is_maintenance": False,
|
||||
"remaining_capacity_units__gte": required_capacity
|
||||
},
|
||||
order_by="remaining_capacity_units",
|
||||
order_desc=True
|
||||
)
|
||||
|
||||
logger.info("Retrieved available capacity",
|
||||
count=len(capacities),
|
||||
resource_type=resource_type,
|
||||
required_capacity=required_capacity,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return capacities
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching available capacity", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch available capacity: {str(e)}")
|
||||
|
||||
async def allocate_capacity(
|
||||
self,
|
||||
capacity_id: UUID,
|
||||
allocation_amount: float,
|
||||
allocation_notes: Optional[str] = None
|
||||
) -> ProductionCapacity:
|
||||
"""Allocate capacity units from a capacity entry"""
|
||||
try:
|
||||
capacity = await self.get(capacity_id)
|
||||
if not capacity:
|
||||
raise ValidationError(f"Capacity {capacity_id} not found")
|
||||
|
||||
if allocation_amount > capacity.remaining_capacity_units:
|
||||
raise ValidationError(
|
||||
f"Insufficient capacity: requested {allocation_amount}, "
|
||||
f"available {capacity.remaining_capacity_units}"
|
||||
)
|
||||
|
||||
new_allocated = capacity.allocated_capacity_units + allocation_amount
|
||||
new_remaining = capacity.remaining_capacity_units - allocation_amount
|
||||
|
||||
update_data = {
|
||||
"allocated_capacity_units": new_allocated,
|
||||
"remaining_capacity_units": new_remaining,
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
if allocation_notes:
|
||||
current_notes = capacity.notes or ""
|
||||
update_data["notes"] = f"{current_notes}\n{allocation_notes}".strip()
|
||||
|
||||
capacity = await self.update(capacity_id, update_data)
|
||||
|
||||
logger.info("Allocated capacity",
|
||||
capacity_id=str(capacity_id),
|
||||
allocation_amount=allocation_amount,
|
||||
remaining_capacity=new_remaining)
|
||||
|
||||
return capacity
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error allocating capacity", error=str(e))
|
||||
raise DatabaseError(f"Failed to allocate capacity: {str(e)}")
|
||||
|
||||
async def release_capacity(
|
||||
self,
|
||||
capacity_id: UUID,
|
||||
release_amount: float,
|
||||
release_notes: Optional[str] = None
|
||||
) -> ProductionCapacity:
|
||||
"""Release allocated capacity units back to a capacity entry"""
|
||||
try:
|
||||
capacity = await self.get(capacity_id)
|
||||
if not capacity:
|
||||
raise ValidationError(f"Capacity {capacity_id} not found")
|
||||
|
||||
if release_amount > capacity.allocated_capacity_units:
|
||||
raise ValidationError(
|
||||
f"Cannot release more than allocated: requested {release_amount}, "
|
||||
f"allocated {capacity.allocated_capacity_units}"
|
||||
)
|
||||
|
||||
new_allocated = capacity.allocated_capacity_units - release_amount
|
||||
new_remaining = capacity.remaining_capacity_units + release_amount
|
||||
|
||||
update_data = {
|
||||
"allocated_capacity_units": new_allocated,
|
||||
"remaining_capacity_units": new_remaining,
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
if release_notes:
|
||||
current_notes = capacity.notes or ""
|
||||
update_data["notes"] = f"{current_notes}\n{release_notes}".strip()
|
||||
|
||||
capacity = await self.update(capacity_id, update_data)
|
||||
|
||||
logger.info("Released capacity",
|
||||
capacity_id=str(capacity_id),
|
||||
release_amount=release_amount,
|
||||
remaining_capacity=new_remaining)
|
||||
|
||||
return capacity
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error releasing capacity", error=str(e))
|
||||
raise DatabaseError(f"Failed to release capacity: {str(e)}")
|
||||
|
||||
async def get_capacity_utilization_summary(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date,
|
||||
resource_type: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Get capacity utilization summary for a date range"""
|
||||
try:
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
"date__gte": start_date,
|
||||
"date__lte": end_date
|
||||
}
|
||||
|
||||
if resource_type:
|
||||
filters["resource_type"] = resource_type
|
||||
|
||||
capacities = await self.get_multi(filters=filters)
|
||||
|
||||
total_capacity = sum(c.total_capacity_units for c in capacities)
|
||||
total_allocated = sum(c.allocated_capacity_units for c in capacities)
|
||||
total_available = sum(c.remaining_capacity_units for c in capacities)
|
||||
|
||||
# Group by resource type
|
||||
by_resource_type = {}
|
||||
for capacity in capacities:
|
||||
rt = capacity.resource_type
|
||||
if rt not in by_resource_type:
|
||||
by_resource_type[rt] = {
|
||||
"total_capacity": 0,
|
||||
"allocated_capacity": 0,
|
||||
"available_capacity": 0,
|
||||
"resource_count": 0
|
||||
}
|
||||
|
||||
by_resource_type[rt]["total_capacity"] += capacity.total_capacity_units
|
||||
by_resource_type[rt]["allocated_capacity"] += capacity.allocated_capacity_units
|
||||
by_resource_type[rt]["available_capacity"] += capacity.remaining_capacity_units
|
||||
by_resource_type[rt]["resource_count"] += 1
|
||||
|
||||
# Calculate utilization percentages
|
||||
for rt_data in by_resource_type.values():
|
||||
if rt_data["total_capacity"] > 0:
|
||||
rt_data["utilization_percentage"] = round(
|
||||
(rt_data["allocated_capacity"] / rt_data["total_capacity"]) * 100, 2
|
||||
)
|
||||
else:
|
||||
rt_data["utilization_percentage"] = 0
|
||||
|
||||
return {
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"total_capacity_units": total_capacity,
|
||||
"total_allocated_units": total_allocated,
|
||||
"total_available_units": total_available,
|
||||
"overall_utilization_percentage": round(
|
||||
(total_allocated / total_capacity * 100) if total_capacity > 0 else 0, 2
|
||||
),
|
||||
"by_resource_type": by_resource_type,
|
||||
"total_resources": len(capacities),
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating capacity utilization summary", error=str(e))
|
||||
raise DatabaseError(f"Failed to calculate capacity utilization summary: {str(e)}")
|
||||
|
||||
async def set_maintenance_mode(
|
||||
self,
|
||||
capacity_id: UUID,
|
||||
is_maintenance: bool,
|
||||
maintenance_notes: Optional[str] = None
|
||||
) -> ProductionCapacity:
|
||||
"""Set maintenance mode for a capacity entry"""
|
||||
try:
|
||||
capacity = await self.get(capacity_id)
|
||||
if not capacity:
|
||||
raise ValidationError(f"Capacity {capacity_id} not found")
|
||||
|
||||
update_data = {
|
||||
"is_maintenance": is_maintenance,
|
||||
"is_available": not is_maintenance, # Not available when in maintenance
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
if is_maintenance:
|
||||
update_data["maintenance_status"] = "in_maintenance"
|
||||
if maintenance_notes:
|
||||
update_data["notes"] = maintenance_notes
|
||||
else:
|
||||
update_data["maintenance_status"] = "operational"
|
||||
update_data["last_maintenance_date"] = datetime.utcnow()
|
||||
|
||||
capacity = await self.update(capacity_id, update_data)
|
||||
|
||||
logger.info("Set maintenance mode",
|
||||
capacity_id=str(capacity_id),
|
||||
is_maintenance=is_maintenance)
|
||||
|
||||
return capacity
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error setting maintenance mode", error=str(e))
|
||||
raise DatabaseError(f"Failed to set maintenance mode: {str(e)}")
|
||||
|
||||
async def get_capacity_with_filters(
|
||||
self,
|
||||
tenant_id: str,
|
||||
resource_type: Optional[str] = None,
|
||||
date_filter: Optional[date] = None,
|
||||
availability: Optional[bool] = None,
|
||||
page: int = 1,
|
||||
page_size: int = 50
|
||||
) -> tuple[List[ProductionCapacity], int]:
|
||||
"""Get production capacity with filters and pagination"""
|
||||
try:
|
||||
filters = {"tenant_id": tenant_id}
|
||||
|
||||
if resource_type:
|
||||
filters["resource_type"] = resource_type
|
||||
if date_filter:
|
||||
filters["date"] = date_filter
|
||||
if availability is not None:
|
||||
filters["is_available"] = availability
|
||||
|
||||
# Get total count
|
||||
total_count = await self.count(filters)
|
||||
|
||||
# Get paginated results
|
||||
offset = (page - 1) * page_size
|
||||
capacities = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="date",
|
||||
order_desc=True,
|
||||
limit=page_size,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
logger.info("Retrieved capacity with filters",
|
||||
count=len(capacities),
|
||||
total_count=total_count,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return capacities, total_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching capacity with filters", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch capacity with filters: {str(e)}")
|
||||
|
||||
async def get_capacity_by_date(self, tenant_id: str, target_date: date) -> List[ProductionCapacity]:
|
||||
"""Get all capacity entries for a specific date"""
|
||||
try:
|
||||
capacities = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"date": target_date
|
||||
},
|
||||
order_by="start_time"
|
||||
)
|
||||
|
||||
logger.info("Retrieved capacity by date",
|
||||
count=len(capacities),
|
||||
date=target_date.isoformat(),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return capacities
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching capacity by date", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch capacity by date: {str(e)}")
|
||||
@@ -0,0 +1,425 @@
|
||||
"""
|
||||
Production Schedule Repository
|
||||
Repository for production schedule operations
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, and_, text, desc, func
|
||||
from datetime import datetime, timedelta, date
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from .base import ProductionBaseRepository
|
||||
from app.models.production import ProductionSchedule
|
||||
from shared.database.exceptions import DatabaseError, ValidationError
|
||||
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionScheduleRepository(ProductionBaseRepository):
|
||||
"""Repository for production schedule operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession, cache_ttl: Optional[int] = 600):
|
||||
# Schedules are more stable, medium cache time (10 minutes)
|
||||
super().__init__(ProductionSchedule, session, cache_ttl)
|
||||
|
||||
async def create_schedule(self, schedule_data: Dict[str, Any]) -> ProductionSchedule:
|
||||
"""Create a new production schedule with validation"""
|
||||
try:
|
||||
# Validate schedule data
|
||||
validation_result = self._validate_production_data(
|
||||
schedule_data,
|
||||
["tenant_id", "schedule_date", "shift_start", "shift_end",
|
||||
"total_capacity_hours", "planned_capacity_hours", "staff_count"]
|
||||
)
|
||||
|
||||
if not validation_result["is_valid"]:
|
||||
raise ValidationError(f"Invalid schedule data: {validation_result['errors']}")
|
||||
|
||||
# Set default values
|
||||
if "is_finalized" not in schedule_data:
|
||||
schedule_data["is_finalized"] = False
|
||||
if "is_active" not in schedule_data:
|
||||
schedule_data["is_active"] = True
|
||||
if "overtime_hours" not in schedule_data:
|
||||
schedule_data["overtime_hours"] = 0.0
|
||||
|
||||
# Validate date uniqueness
|
||||
existing_schedule = await self.get_schedule_by_date(
|
||||
schedule_data["tenant_id"],
|
||||
schedule_data["schedule_date"]
|
||||
)
|
||||
if existing_schedule:
|
||||
raise ValidationError(f"Schedule for date {schedule_data['schedule_date']} already exists")
|
||||
|
||||
# Create schedule
|
||||
schedule = await self.create(schedule_data)
|
||||
|
||||
logger.info("Production schedule created successfully",
|
||||
schedule_id=str(schedule.id),
|
||||
schedule_date=schedule.schedule_date.isoformat(),
|
||||
tenant_id=str(schedule.tenant_id))
|
||||
|
||||
return schedule
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error creating production schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to create production schedule: {str(e)}")
|
||||
|
||||
async def get_schedule_by_date(
|
||||
self,
|
||||
tenant_id: str,
|
||||
schedule_date: date
|
||||
) -> Optional[ProductionSchedule]:
|
||||
"""Get production schedule for a specific date"""
|
||||
try:
|
||||
schedules = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"schedule_date": schedule_date
|
||||
},
|
||||
limit=1
|
||||
)
|
||||
|
||||
schedule = schedules[0] if schedules else None
|
||||
|
||||
if schedule:
|
||||
logger.info("Retrieved production schedule by date",
|
||||
schedule_id=str(schedule.id),
|
||||
schedule_date=schedule_date.isoformat(),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return schedule
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching schedule by date", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch schedule by date: {str(e)}")
|
||||
|
||||
async def get_schedules_by_date_range(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> List[ProductionSchedule]:
|
||||
"""Get production schedules within a date range"""
|
||||
try:
|
||||
schedules = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"schedule_date__gte": start_date,
|
||||
"schedule_date__lte": end_date
|
||||
},
|
||||
order_by="schedule_date"
|
||||
)
|
||||
|
||||
logger.info("Retrieved schedules by date range",
|
||||
count=len(schedules),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat(),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return schedules
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching schedules by date range", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch schedules by date range: {str(e)}")
|
||||
|
||||
async def get_active_schedules(self, tenant_id: str) -> List[ProductionSchedule]:
|
||||
"""Get active production schedules for a tenant"""
|
||||
try:
|
||||
schedules = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"is_active": True
|
||||
},
|
||||
order_by="schedule_date"
|
||||
)
|
||||
|
||||
logger.info("Retrieved active production schedules",
|
||||
count=len(schedules),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return schedules
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching active schedules", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch active schedules: {str(e)}")
|
||||
|
||||
async def finalize_schedule(
|
||||
self,
|
||||
schedule_id: UUID,
|
||||
finalized_by: str
|
||||
) -> ProductionSchedule:
|
||||
"""Finalize a production schedule"""
|
||||
try:
|
||||
schedule = await self.get(schedule_id)
|
||||
if not schedule:
|
||||
raise ValidationError(f"Schedule {schedule_id} not found")
|
||||
|
||||
if schedule.is_finalized:
|
||||
raise ValidationError("Schedule is already finalized")
|
||||
|
||||
update_data = {
|
||||
"is_finalized": True,
|
||||
"finalized_at": datetime.utcnow(),
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
schedule = await self.update(schedule_id, update_data)
|
||||
|
||||
logger.info("Production schedule finalized",
|
||||
schedule_id=str(schedule_id),
|
||||
finalized_by=finalized_by)
|
||||
|
||||
return schedule
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error finalizing schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to finalize schedule: {str(e)}")
|
||||
|
||||
async def update_schedule_metrics(
|
||||
self,
|
||||
schedule_id: UUID,
|
||||
metrics: Dict[str, Any]
|
||||
) -> ProductionSchedule:
|
||||
"""Update production schedule metrics"""
|
||||
try:
|
||||
schedule = await self.get(schedule_id)
|
||||
if not schedule:
|
||||
raise ValidationError(f"Schedule {schedule_id} not found")
|
||||
|
||||
# Validate metrics
|
||||
valid_metrics = [
|
||||
"actual_capacity_hours", "total_batches_completed",
|
||||
"total_quantity_produced", "efficiency_percentage",
|
||||
"utilization_percentage", "on_time_completion_rate"
|
||||
]
|
||||
|
||||
update_data = {"updated_at": datetime.utcnow()}
|
||||
|
||||
for metric, value in metrics.items():
|
||||
if metric in valid_metrics:
|
||||
update_data[metric] = value
|
||||
|
||||
schedule = await self.update(schedule_id, update_data)
|
||||
|
||||
logger.info("Updated schedule metrics",
|
||||
schedule_id=str(schedule_id),
|
||||
metrics=list(metrics.keys()))
|
||||
|
||||
return schedule
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error updating schedule metrics", error=str(e))
|
||||
raise DatabaseError(f"Failed to update schedule metrics: {str(e)}")
|
||||
|
||||
async def get_schedule_performance_summary(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Dict[str, Any]:
|
||||
"""Get schedule performance summary for a date range"""
|
||||
try:
|
||||
schedules = await self.get_schedules_by_date_range(tenant_id, start_date, end_date)
|
||||
|
||||
total_schedules = len(schedules)
|
||||
finalized_schedules = len([s for s in schedules if s.is_finalized])
|
||||
|
||||
# Calculate averages
|
||||
total_planned_hours = sum(s.planned_capacity_hours for s in schedules)
|
||||
total_actual_hours = sum(s.actual_capacity_hours or 0 for s in schedules)
|
||||
total_overtime = sum(s.overtime_hours or 0 for s in schedules)
|
||||
|
||||
# Calculate efficiency metrics
|
||||
schedules_with_efficiency = [s for s in schedules if s.efficiency_percentage is not None]
|
||||
avg_efficiency = (
|
||||
sum(s.efficiency_percentage for s in schedules_with_efficiency) / len(schedules_with_efficiency)
|
||||
if schedules_with_efficiency else 0
|
||||
)
|
||||
|
||||
schedules_with_utilization = [s for s in schedules if s.utilization_percentage is not None]
|
||||
avg_utilization = (
|
||||
sum(s.utilization_percentage for s in schedules_with_utilization) / len(schedules_with_utilization)
|
||||
if schedules_with_utilization else 0
|
||||
)
|
||||
|
||||
return {
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"total_schedules": total_schedules,
|
||||
"finalized_schedules": finalized_schedules,
|
||||
"finalization_rate": (finalized_schedules / total_schedules * 100) if total_schedules > 0 else 0,
|
||||
"total_planned_hours": total_planned_hours,
|
||||
"total_actual_hours": total_actual_hours,
|
||||
"total_overtime_hours": total_overtime,
|
||||
"capacity_utilization": (total_actual_hours / total_planned_hours * 100) if total_planned_hours > 0 else 0,
|
||||
"average_efficiency_percentage": round(avg_efficiency, 2),
|
||||
"average_utilization_percentage": round(avg_utilization, 2),
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating schedule performance summary", error=str(e))
|
||||
raise DatabaseError(f"Failed to calculate schedule performance summary: {str(e)}")
|
||||
|
||||
async def get_schedules_with_filters(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None,
|
||||
is_finalized: Optional[bool] = None,
|
||||
page: int = 1,
|
||||
page_size: int = 50
|
||||
) -> tuple[List[ProductionSchedule], int]:
|
||||
"""Get production schedules with filters and pagination"""
|
||||
try:
|
||||
filters = {"tenant_id": tenant_id}
|
||||
|
||||
if start_date:
|
||||
filters["schedule_date__gte"] = start_date
|
||||
if end_date:
|
||||
filters["schedule_date__lte"] = end_date
|
||||
if is_finalized is not None:
|
||||
filters["is_finalized"] = is_finalized
|
||||
|
||||
# Get total count
|
||||
total_count = await self.count(filters)
|
||||
|
||||
# Get paginated results
|
||||
offset = (page - 1) * page_size
|
||||
schedules = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="schedule_date",
|
||||
order_desc=True,
|
||||
limit=page_size,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
logger.info("Retrieved schedules with filters",
|
||||
count=len(schedules),
|
||||
total_count=total_count,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return schedules, total_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching schedules with filters", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch schedules with filters: {str(e)}")
|
||||
|
||||
async def update_schedule(self, schedule_id: UUID, update_data: Dict[str, Any]) -> ProductionSchedule:
|
||||
"""Update a production schedule"""
|
||||
try:
|
||||
schedule = await self.get(schedule_id)
|
||||
if not schedule:
|
||||
raise ValidationError(f"Schedule {schedule_id} not found")
|
||||
|
||||
# Add updated timestamp
|
||||
update_data["updated_at"] = datetime.utcnow()
|
||||
|
||||
# Update the schedule
|
||||
schedule = await self.update(schedule_id, update_data)
|
||||
|
||||
logger.info("Updated production schedule",
|
||||
schedule_id=str(schedule_id),
|
||||
update_fields=list(update_data.keys()))
|
||||
|
||||
return schedule
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error updating production schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to update production schedule: {str(e)}")
|
||||
|
||||
async def delete_schedule(self, schedule_id: UUID) -> bool:
|
||||
"""Delete a production schedule"""
|
||||
try:
|
||||
schedule = await self.get(schedule_id)
|
||||
if not schedule:
|
||||
raise ValidationError(f"Schedule {schedule_id} not found")
|
||||
|
||||
# Check if schedule can be deleted (not finalized)
|
||||
if schedule.is_finalized:
|
||||
raise ValidationError("Cannot delete finalized schedule")
|
||||
|
||||
success = await self.delete(schedule_id)
|
||||
|
||||
logger.info("Deleted production schedule",
|
||||
schedule_id=str(schedule_id),
|
||||
success=success)
|
||||
|
||||
return success
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error deleting production schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to delete production schedule: {str(e)}")
|
||||
|
||||
async def get_todays_schedule(self, tenant_id: str) -> Optional[ProductionSchedule]:
|
||||
"""Get today's production schedule for a tenant"""
|
||||
try:
|
||||
today = datetime.utcnow().date()
|
||||
return await self.get_schedule_by_date(tenant_id, today)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching today's schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch today's schedule: {str(e)}")
|
||||
|
||||
async def get_all_schedules_for_tenant(self, tenant_id: UUID) -> List[ProductionSchedule]:
|
||||
"""Get all production schedules for a specific tenant"""
|
||||
try:
|
||||
from sqlalchemy import select
|
||||
from app.models.production import ProductionSchedule
|
||||
|
||||
result = await self.session.execute(
|
||||
select(ProductionSchedule).where(
|
||||
ProductionSchedule.tenant_id == tenant_id
|
||||
)
|
||||
)
|
||||
schedules = result.scalars().all()
|
||||
|
||||
logger.info("Retrieved all schedules for tenant",
|
||||
tenant_id=str(tenant_id),
|
||||
count=len(schedules))
|
||||
|
||||
return list(schedules)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching all tenant schedules", error=str(e), tenant_id=str(tenant_id))
|
||||
raise DatabaseError(f"Failed to fetch all tenant schedules: {str(e)}")
|
||||
|
||||
async def archive_schedule(self, schedule: ProductionSchedule) -> None:
|
||||
"""Archive a production schedule"""
|
||||
try:
|
||||
schedule.archived = True
|
||||
await self.session.commit()
|
||||
logger.info("Archived schedule", schedule_id=str(schedule.id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error archiving schedule", error=str(e), schedule_id=str(schedule.id))
|
||||
raise DatabaseError(f"Failed to archive schedule: {str(e)}")
|
||||
|
||||
async def cancel_schedule(self, schedule: ProductionSchedule, reason: str = None) -> None:
|
||||
"""Cancel a production schedule"""
|
||||
try:
|
||||
schedule.status = "cancelled"
|
||||
if reason:
|
||||
schedule.notes = (schedule.notes or "") + f"\n{reason}"
|
||||
await self.session.commit()
|
||||
logger.info("Cancelled schedule", schedule_id=str(schedule.id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error cancelling schedule", error=str(e), schedule_id=str(schedule.id))
|
||||
raise DatabaseError(f"Failed to cancel schedule: {str(e)}")
|
||||
441
services/production/app/repositories/quality_check_repository.py
Normal file
441
services/production/app/repositories/quality_check_repository.py
Normal file
@@ -0,0 +1,441 @@
|
||||
"""
|
||||
Quality Check Repository
|
||||
Repository for quality check operations
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, and_, text, desc, func
|
||||
from datetime import datetime, timedelta, date
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from .base import ProductionBaseRepository
|
||||
from app.models.production import QualityCheck
|
||||
from shared.database.exceptions import DatabaseError, ValidationError
|
||||
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class QualityCheckRepository(ProductionBaseRepository):
|
||||
"""Repository for quality check operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession, cache_ttl: Optional[int] = 300):
|
||||
# Quality checks are dynamic, short cache time (5 minutes)
|
||||
super().__init__(QualityCheck, session, cache_ttl)
|
||||
|
||||
async def create_quality_check(self, check_data: Dict[str, Any]) -> QualityCheck:
|
||||
"""Create a new quality check with validation"""
|
||||
try:
|
||||
# Validate check data
|
||||
validation_result = self._validate_production_data(
|
||||
check_data,
|
||||
["tenant_id", "batch_id", "check_type", "check_time",
|
||||
"quality_score", "pass_fail"]
|
||||
)
|
||||
|
||||
if not validation_result["is_valid"]:
|
||||
raise ValidationError(f"Invalid quality check data: {validation_result['errors']}")
|
||||
|
||||
# Validate quality score range (1-10)
|
||||
if check_data.get("quality_score"):
|
||||
score = float(check_data["quality_score"])
|
||||
if score < 1 or score > 10:
|
||||
raise ValidationError("Quality score must be between 1 and 10")
|
||||
|
||||
# Set default values
|
||||
if "defect_count" not in check_data:
|
||||
check_data["defect_count"] = 0
|
||||
if "corrective_action_needed" not in check_data:
|
||||
check_data["corrective_action_needed"] = False
|
||||
|
||||
# Create quality check
|
||||
quality_check = await self.create(check_data)
|
||||
|
||||
logger.info("Quality check created successfully",
|
||||
check_id=str(quality_check.id),
|
||||
batch_id=str(quality_check.batch_id),
|
||||
check_type=quality_check.check_type,
|
||||
quality_score=quality_check.quality_score,
|
||||
tenant_id=str(quality_check.tenant_id))
|
||||
|
||||
return quality_check
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error creating quality check", error=str(e))
|
||||
raise DatabaseError(f"Failed to create quality check: {str(e)}")
|
||||
|
||||
async def get_checks_by_batch(
|
||||
self,
|
||||
tenant_id: str,
|
||||
batch_id: str
|
||||
) -> List[QualityCheck]:
|
||||
"""Get all quality checks for a specific batch"""
|
||||
try:
|
||||
checks = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"batch_id": batch_id
|
||||
},
|
||||
order_by="check_time"
|
||||
)
|
||||
|
||||
logger.info("Retrieved quality checks by batch",
|
||||
count=len(checks),
|
||||
batch_id=batch_id,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return checks
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching quality checks by batch", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch quality checks by batch: {str(e)}")
|
||||
|
||||
async def get_checks_by_date_range(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date,
|
||||
check_type: Optional[str] = None
|
||||
) -> List[QualityCheck]:
|
||||
"""Get quality checks within a date range"""
|
||||
try:
|
||||
start_datetime = datetime.combine(start_date, datetime.min.time())
|
||||
end_datetime = datetime.combine(end_date, datetime.max.time())
|
||||
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
"check_time__gte": start_datetime,
|
||||
"check_time__lte": end_datetime
|
||||
}
|
||||
|
||||
if check_type:
|
||||
filters["check_type"] = check_type
|
||||
|
||||
checks = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="check_time",
|
||||
order_desc=True
|
||||
)
|
||||
|
||||
logger.info("Retrieved quality checks by date range",
|
||||
count=len(checks),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat(),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return checks
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching quality checks by date range", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch quality checks by date range: {str(e)}")
|
||||
|
||||
async def get_failed_checks(
|
||||
self,
|
||||
tenant_id: str,
|
||||
days_back: int = 7
|
||||
) -> List[QualityCheck]:
|
||||
"""Get failed quality checks from the last N days"""
|
||||
try:
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days_back)
|
||||
|
||||
checks = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"pass_fail": False,
|
||||
"check_time__gte": cutoff_date
|
||||
},
|
||||
order_by="check_time",
|
||||
order_desc=True
|
||||
)
|
||||
|
||||
logger.info("Retrieved failed quality checks",
|
||||
count=len(checks),
|
||||
days_back=days_back,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return checks
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching failed quality checks", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch failed quality checks: {str(e)}")
|
||||
|
||||
async def get_quality_metrics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Dict[str, Any]:
|
||||
"""Get quality metrics for a tenant and date range"""
|
||||
try:
|
||||
checks = await self.get_checks_by_date_range(tenant_id, start_date, end_date)
|
||||
|
||||
total_checks = len(checks)
|
||||
passed_checks = len([c for c in checks if c.pass_fail])
|
||||
failed_checks = total_checks - passed_checks
|
||||
|
||||
# Calculate average quality score
|
||||
quality_scores = [c.quality_score for c in checks if c.quality_score is not None]
|
||||
avg_quality_score = sum(quality_scores) / len(quality_scores) if quality_scores else 0
|
||||
|
||||
# Calculate defect rate
|
||||
total_defects = sum(c.defect_count for c in checks)
|
||||
avg_defects_per_check = total_defects / total_checks if total_checks > 0 else 0
|
||||
|
||||
# Group by check type
|
||||
by_check_type = {}
|
||||
for check in checks:
|
||||
check_type = check.check_type
|
||||
if check_type not in by_check_type:
|
||||
by_check_type[check_type] = {
|
||||
"total_checks": 0,
|
||||
"passed_checks": 0,
|
||||
"failed_checks": 0,
|
||||
"avg_quality_score": 0,
|
||||
"total_defects": 0
|
||||
}
|
||||
|
||||
by_check_type[check_type]["total_checks"] += 1
|
||||
if check.pass_fail:
|
||||
by_check_type[check_type]["passed_checks"] += 1
|
||||
else:
|
||||
by_check_type[check_type]["failed_checks"] += 1
|
||||
by_check_type[check_type]["total_defects"] += check.defect_count
|
||||
|
||||
# Calculate pass rates by check type
|
||||
for type_data in by_check_type.values():
|
||||
if type_data["total_checks"] > 0:
|
||||
type_data["pass_rate"] = round(
|
||||
(type_data["passed_checks"] / type_data["total_checks"]) * 100, 2
|
||||
)
|
||||
else:
|
||||
type_data["pass_rate"] = 0
|
||||
|
||||
type_scores = [c.quality_score for c in checks
|
||||
if c.check_type == check_type and c.quality_score is not None]
|
||||
type_data["avg_quality_score"] = round(
|
||||
sum(type_scores) / len(type_scores) if type_scores else 0, 2
|
||||
)
|
||||
|
||||
# Identify trends
|
||||
checks_needing_action = len([c for c in checks if c.corrective_action_needed])
|
||||
|
||||
return {
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"total_checks": total_checks,
|
||||
"passed_checks": passed_checks,
|
||||
"failed_checks": failed_checks,
|
||||
"pass_rate_percentage": round((passed_checks / total_checks * 100) if total_checks > 0 else 0, 2),
|
||||
"average_quality_score": round(avg_quality_score, 2),
|
||||
"total_defects": total_defects,
|
||||
"average_defects_per_check": round(avg_defects_per_check, 2),
|
||||
"checks_needing_corrective_action": checks_needing_action,
|
||||
"by_check_type": by_check_type,
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating quality metrics", error=str(e))
|
||||
raise DatabaseError(f"Failed to calculate quality metrics: {str(e)}")
|
||||
|
||||
async def get_quality_trends(
|
||||
self,
|
||||
tenant_id: str,
|
||||
check_type: str,
|
||||
days_back: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
"""Get quality trends for a specific check type"""
|
||||
try:
|
||||
end_date = datetime.utcnow().date()
|
||||
start_date = end_date - timedelta(days=days_back)
|
||||
|
||||
checks = await self.get_checks_by_date_range(
|
||||
tenant_id, start_date, end_date, check_type
|
||||
)
|
||||
|
||||
# Group by date
|
||||
daily_metrics = {}
|
||||
for check in checks:
|
||||
check_date = check.check_time.date()
|
||||
if check_date not in daily_metrics:
|
||||
daily_metrics[check_date] = {
|
||||
"total_checks": 0,
|
||||
"passed_checks": 0,
|
||||
"quality_scores": [],
|
||||
"defect_count": 0
|
||||
}
|
||||
|
||||
daily_metrics[check_date]["total_checks"] += 1
|
||||
if check.pass_fail:
|
||||
daily_metrics[check_date]["passed_checks"] += 1
|
||||
if check.quality_score is not None:
|
||||
daily_metrics[check_date]["quality_scores"].append(check.quality_score)
|
||||
daily_metrics[check_date]["defect_count"] += check.defect_count
|
||||
|
||||
# Calculate daily pass rates and averages
|
||||
trend_data = []
|
||||
for date_key, metrics in sorted(daily_metrics.items()):
|
||||
pass_rate = (metrics["passed_checks"] / metrics["total_checks"] * 100) if metrics["total_checks"] > 0 else 0
|
||||
avg_score = sum(metrics["quality_scores"]) / len(metrics["quality_scores"]) if metrics["quality_scores"] else 0
|
||||
|
||||
trend_data.append({
|
||||
"date": date_key.isoformat(),
|
||||
"total_checks": metrics["total_checks"],
|
||||
"pass_rate": round(pass_rate, 2),
|
||||
"average_quality_score": round(avg_score, 2),
|
||||
"total_defects": metrics["defect_count"]
|
||||
})
|
||||
|
||||
# Calculate overall trend direction
|
||||
if len(trend_data) >= 2:
|
||||
recent_avg = sum(d["pass_rate"] for d in trend_data[-7:]) / min(7, len(trend_data))
|
||||
earlier_avg = sum(d["pass_rate"] for d in trend_data[:-7]) / max(1, len(trend_data) - 7)
|
||||
trend_direction = "improving" if recent_avg > earlier_avg else "declining" if recent_avg < earlier_avg else "stable"
|
||||
else:
|
||||
trend_direction = "insufficient_data"
|
||||
|
||||
return {
|
||||
"check_type": check_type,
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"trend_direction": trend_direction,
|
||||
"daily_data": trend_data,
|
||||
"total_checks": len(checks),
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating quality trends", error=str(e))
|
||||
raise DatabaseError(f"Failed to calculate quality trends: {str(e)}")
|
||||
|
||||
async def get_quality_checks_with_filters(
|
||||
self,
|
||||
tenant_id: str,
|
||||
batch_id: Optional[UUID] = None,
|
||||
product_id: Optional[UUID] = None,
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None,
|
||||
pass_fail: Optional[bool] = None,
|
||||
page: int = 1,
|
||||
page_size: int = 50
|
||||
) -> tuple[List[QualityCheck], int]:
|
||||
"""Get quality checks with filters and pagination"""
|
||||
try:
|
||||
filters = {"tenant_id": tenant_id}
|
||||
|
||||
if batch_id:
|
||||
filters["batch_id"] = batch_id
|
||||
if product_id:
|
||||
# Note: This would require a join with production batches to filter by product_id
|
||||
# For now, we'll skip this filter or implement it differently
|
||||
pass
|
||||
if start_date:
|
||||
start_datetime = datetime.combine(start_date, datetime.min.time())
|
||||
filters["check_time__gte"] = start_datetime
|
||||
if end_date:
|
||||
end_datetime = datetime.combine(end_date, datetime.max.time())
|
||||
filters["check_time__lte"] = end_datetime
|
||||
if pass_fail is not None:
|
||||
filters["pass_fail"] = pass_fail
|
||||
|
||||
# Get total count
|
||||
total_count = await self.count(filters)
|
||||
|
||||
# Get paginated results
|
||||
offset = (page - 1) * page_size
|
||||
checks = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="check_time",
|
||||
order_desc=True,
|
||||
limit=page_size,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
logger.info("Retrieved quality checks with filters",
|
||||
count=len(checks),
|
||||
total_count=total_count,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return checks, total_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching quality checks with filters", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch quality checks with filters: {str(e)}")
|
||||
|
||||
# ================================================================
|
||||
# ALERT-RELATED METHODS (migrated from production_alert_repository)
|
||||
# ================================================================
|
||||
|
||||
async def get_quality_issues(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get quality control failures.
|
||||
Returns quality checks that failed within recent hours.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to filter by
|
||||
"""
|
||||
try:
|
||||
from app.models.production import ProductionBatch
|
||||
|
||||
query_str = """
|
||||
SELECT
|
||||
qc.id, qc.tenant_id, qc.batch_id, qc.check_type,
|
||||
qc.quality_score, qc.within_tolerance,
|
||||
qc.pass_fail, qc.defect_count,
|
||||
qc.check_notes as qc_severity,
|
||||
1 as total_failures,
|
||||
pb.product_name, pb.batch_number,
|
||||
qc.created_at,
|
||||
qc.process_stage
|
||||
FROM quality_checks qc
|
||||
JOIN production_batches pb ON pb.id = qc.batch_id
|
||||
WHERE qc.pass_fail = false
|
||||
AND qc.created_at > NOW() - INTERVAL '4 hours'
|
||||
AND qc.corrective_action_needed = true
|
||||
"""
|
||||
|
||||
params = {}
|
||||
if tenant_id:
|
||||
query_str += " AND qc.tenant_id = :tenant_id"
|
||||
params["tenant_id"] = tenant_id
|
||||
|
||||
query_str += """
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 1
|
||||
WHEN qc.pass_fail = false THEN 2
|
||||
ELSE 3
|
||||
END,
|
||||
qc.created_at DESC
|
||||
"""
|
||||
|
||||
result = await self.session.execute(text(query_str), params)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality issues", error=str(e))
|
||||
raise DatabaseError(f"Failed to get quality issues: {str(e)}")
|
||||
|
||||
async def mark_quality_check_acknowledged(self, quality_check_id: UUID) -> None:
|
||||
"""
|
||||
Mark a quality check as acknowledged to avoid duplicate alerts.
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
UPDATE quality_checks
|
||||
SET acknowledged = true
|
||||
WHERE id = :id
|
||||
""")
|
||||
|
||||
await self.session.execute(query, {"id": quality_check_id})
|
||||
await self.session.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to mark quality check acknowledged", error=str(e), qc_id=str(quality_check_id))
|
||||
raise DatabaseError(f"Failed to mark quality check acknowledged: {str(e)}")
|
||||
@@ -0,0 +1,162 @@
|
||||
"""
|
||||
Quality Template Repository for Production Service
|
||||
"""
|
||||
|
||||
from typing import List, Optional, Tuple
|
||||
from sqlalchemy import and_, or_, func, select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from .base import ProductionBaseRepository
|
||||
from ..models.production import QualityCheckTemplate, ProcessStage
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class QualityTemplateRepository(ProductionBaseRepository):
|
||||
"""Repository for quality check template operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession):
|
||||
super().__init__(QualityCheckTemplate, session)
|
||||
|
||||
async def get_templates_by_tenant(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: Optional[ProcessStage] = None,
|
||||
check_type: Optional[str] = None,
|
||||
is_active: Optional[bool] = True,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> Tuple[List[QualityCheckTemplate], int]:
|
||||
"""Get quality check templates with filtering and pagination"""
|
||||
|
||||
filters = [QualityCheckTemplate.tenant_id == tenant_id]
|
||||
|
||||
if is_active is not None:
|
||||
filters.append(QualityCheckTemplate.is_active == is_active)
|
||||
|
||||
if check_type:
|
||||
filters.append(QualityCheckTemplate.check_type == check_type)
|
||||
|
||||
if stage:
|
||||
filters.append(
|
||||
or_(
|
||||
func.json_contains(
|
||||
QualityCheckTemplate.applicable_stages,
|
||||
f'"{stage.value}"'
|
||||
),
|
||||
QualityCheckTemplate.applicable_stages.is_(None)
|
||||
)
|
||||
)
|
||||
|
||||
# Get total count with SQLAlchemy conditions
|
||||
count_query = select(func.count(QualityCheckTemplate.id)).where(and_(*filters))
|
||||
count_result = await self.session.execute(count_query)
|
||||
total = count_result.scalar()
|
||||
|
||||
# Get templates with ordering
|
||||
query = select(QualityCheckTemplate).where(and_(*filters)).order_by(
|
||||
QualityCheckTemplate.is_critical.desc(),
|
||||
QualityCheckTemplate.is_required.desc(),
|
||||
QualityCheckTemplate.name
|
||||
).offset(skip).limit(limit)
|
||||
|
||||
result = await self.session.execute(query)
|
||||
templates = result.scalars().all()
|
||||
|
||||
return templates, total
|
||||
|
||||
async def get_by_tenant_and_id(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID
|
||||
) -> Optional[QualityCheckTemplate]:
|
||||
"""Get a specific quality check template by tenant and ID"""
|
||||
|
||||
return await self.get_by_filters(
|
||||
and_(
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.id == template_id
|
||||
)
|
||||
)
|
||||
|
||||
async def get_templates_for_stage(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: ProcessStage,
|
||||
is_active: Optional[bool] = True
|
||||
) -> List[QualityCheckTemplate]:
|
||||
"""Get all quality check templates applicable to a specific process stage"""
|
||||
|
||||
filters = [
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
or_(
|
||||
func.json_contains(
|
||||
QualityCheckTemplate.applicable_stages,
|
||||
f'"{stage.value}"'
|
||||
),
|
||||
QualityCheckTemplate.applicable_stages.is_(None)
|
||||
)
|
||||
]
|
||||
|
||||
if is_active is not None:
|
||||
filters.append(QualityCheckTemplate.is_active == is_active)
|
||||
|
||||
return await self.get_multi(
|
||||
filters=and_(*filters),
|
||||
order_by=[
|
||||
QualityCheckTemplate.is_critical.desc(),
|
||||
QualityCheckTemplate.is_required.desc(),
|
||||
QualityCheckTemplate.weight.desc(),
|
||||
QualityCheckTemplate.name
|
||||
]
|
||||
)
|
||||
|
||||
async def check_template_code_exists(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_code: str,
|
||||
exclude_id: Optional[UUID] = None
|
||||
) -> bool:
|
||||
"""Check if a template code already exists for the tenant"""
|
||||
|
||||
filters = [
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.template_code == template_code
|
||||
]
|
||||
|
||||
if exclude_id:
|
||||
filters.append(QualityCheckTemplate.id != exclude_id)
|
||||
|
||||
existing = await self.get_by_filters(and_(*filters))
|
||||
return existing is not None
|
||||
|
||||
async def get_by_filters(self, *filters):
|
||||
"""Get a single record by filters"""
|
||||
try:
|
||||
query = select(self.model).where(and_(*filters))
|
||||
result = await self.session.execute(query)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error("Error getting record by filters", error=str(e), filters=str(filters))
|
||||
raise
|
||||
|
||||
async def get_templates_by_ids(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_ids: List[UUID]
|
||||
) -> List[QualityCheckTemplate]:
|
||||
"""Get quality check templates by list of IDs"""
|
||||
|
||||
return await self.get_multi(
|
||||
filters=and_(
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.id.in_(template_ids)
|
||||
),
|
||||
order_by=[
|
||||
QualityCheckTemplate.is_critical.desc(),
|
||||
QualityCheckTemplate.is_required.desc(),
|
||||
QualityCheckTemplate.weight.desc()
|
||||
]
|
||||
)
|
||||
6
services/production/app/schemas/__init__.py
Normal file
6
services/production/app/schemas/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
# ================================================================
|
||||
# services/production/app/schemas/__init__.py
|
||||
# ================================================================
|
||||
"""
|
||||
Pydantic schemas for request/response models
|
||||
"""
|
||||
488
services/production/app/schemas/equipment.py
Normal file
488
services/production/app/schemas/equipment.py
Normal file
@@ -0,0 +1,488 @@
|
||||
# services/production/app/schemas/equipment.py
|
||||
"""
|
||||
Equipment schemas for Production Service
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field, ConfigDict
|
||||
from typing import Optional, List
|
||||
from datetime import datetime
|
||||
from uuid import UUID
|
||||
|
||||
from app.models.production import EquipmentType, EquipmentStatus, IoTProtocol, IoTConnectionStatus
|
||||
|
||||
|
||||
class IoTConnectionConfig(BaseModel):
|
||||
"""Schema for IoT connection configuration"""
|
||||
protocol: str = Field(..., description="IoT protocol (rest_api, opc_ua, mqtt, modbus, custom)")
|
||||
endpoint: str = Field(..., description="Connection endpoint (URL or IP address)")
|
||||
port: Optional[int] = Field(None, description="Connection port")
|
||||
username: Optional[str] = Field(None, description="Username for authentication")
|
||||
password: Optional[str] = Field(None, description="Password for authentication")
|
||||
api_key: Optional[str] = Field(None, description="API key for authentication")
|
||||
token: Optional[str] = Field(None, description="Authentication token")
|
||||
additional_config: Optional[dict] = Field(None, description="Additional protocol-specific configuration")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"protocol": "rest_api",
|
||||
"endpoint": "https://connectedcooking.com/api/v1",
|
||||
"port": 443,
|
||||
"api_key": "your-api-key-here",
|
||||
"additional_config": {"poll_interval": 30}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class EquipmentCreate(BaseModel):
|
||||
"""Schema for creating new equipment"""
|
||||
name: str = Field(..., min_length=1, max_length=255, description="Equipment name")
|
||||
type: EquipmentType = Field(..., description="Equipment type")
|
||||
model: Optional[str] = Field(None, max_length=100, description="Equipment model")
|
||||
serial_number: Optional[str] = Field(None, max_length=100, description="Serial number")
|
||||
location: Optional[str] = Field(None, max_length=255, description="Physical location")
|
||||
manufacturer: Optional[str] = Field(None, max_length=100, description="Manufacturer")
|
||||
firmware_version: Optional[str] = Field(None, max_length=50, description="Firmware version")
|
||||
status: EquipmentStatus = Field(default=EquipmentStatus.OPERATIONAL, description="Equipment status")
|
||||
|
||||
# Installation and maintenance
|
||||
install_date: Optional[datetime] = Field(None, description="Installation date")
|
||||
last_maintenance_date: Optional[datetime] = Field(None, description="Last maintenance date")
|
||||
next_maintenance_date: Optional[datetime] = Field(None, description="Next scheduled maintenance date")
|
||||
maintenance_interval_days: Optional[int] = Field(None, ge=1, description="Maintenance interval in days")
|
||||
|
||||
# Performance metrics
|
||||
efficiency_percentage: Optional[float] = Field(None, ge=0, le=100, description="Current efficiency percentage")
|
||||
uptime_percentage: Optional[float] = Field(None, ge=0, le=100, description="Overall uptime percentage")
|
||||
energy_usage_kwh: Optional[float] = Field(None, ge=0, description="Current energy usage in kWh")
|
||||
|
||||
# Specifications
|
||||
power_kw: Optional[float] = Field(None, ge=0, description="Power consumption in kilowatts")
|
||||
capacity: Optional[float] = Field(None, ge=0, description="Equipment capacity")
|
||||
weight_kg: Optional[float] = Field(None, ge=0, description="Weight in kilograms")
|
||||
|
||||
# Temperature monitoring
|
||||
current_temperature: Optional[float] = Field(None, description="Current temperature")
|
||||
target_temperature: Optional[float] = Field(None, description="Target temperature")
|
||||
|
||||
# IoT Connectivity
|
||||
iot_enabled: bool = Field(default=False, description="Enable IoT connectivity")
|
||||
iot_protocol: Optional[str] = Field(None, description="IoT protocol")
|
||||
iot_endpoint: Optional[str] = Field(None, description="IoT endpoint URL or IP")
|
||||
iot_port: Optional[int] = Field(None, description="IoT connection port")
|
||||
iot_config: Optional[dict] = Field(None, description="IoT configuration")
|
||||
|
||||
# Real-time monitoring
|
||||
supports_realtime: bool = Field(default=False, description="Supports real-time monitoring")
|
||||
poll_interval_seconds: Optional[int] = Field(None, ge=1, description="Polling interval in seconds")
|
||||
|
||||
# Sensor capabilities
|
||||
temperature_zones: Optional[int] = Field(None, ge=1, description="Number of temperature zones")
|
||||
supports_humidity: bool = Field(default=False, description="Supports humidity monitoring")
|
||||
supports_energy_monitoring: bool = Field(default=False, description="Supports energy monitoring")
|
||||
supports_remote_control: bool = Field(default=False, description="Supports remote control")
|
||||
|
||||
# Notes
|
||||
notes: Optional[str] = Field(None, description="Additional notes")
|
||||
|
||||
# Support contact information
|
||||
support_contact: Optional[dict] = Field(
|
||||
None,
|
||||
description="Support contact information for equipment maintenance",
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"email": "support@ovenfactory.com",
|
||||
"phone": "+1-800-555-1234",
|
||||
"company": "OvenTech Support",
|
||||
"contract_number": "SUP-2023-001",
|
||||
"response_time_sla": 24
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"name": "Horno Principal #1",
|
||||
"type": "oven",
|
||||
"model": "Miwe Condo CO 4.1212",
|
||||
"serial_number": "MCO-2021-001",
|
||||
"location": "Área de Horneado - Zona A",
|
||||
"status": "operational",
|
||||
"install_date": "2021-03-15T00:00:00Z",
|
||||
"maintenance_interval_days": 90,
|
||||
"efficiency_percentage": 92.0,
|
||||
"uptime_percentage": 98.5,
|
||||
"power_kw": 45.0,
|
||||
"capacity": 24.0
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class EquipmentUpdate(BaseModel):
|
||||
"""Schema for updating equipment"""
|
||||
name: Optional[str] = Field(None, min_length=1, max_length=255)
|
||||
type: Optional[EquipmentType] = None
|
||||
model: Optional[str] = Field(None, max_length=100)
|
||||
serial_number: Optional[str] = Field(None, max_length=100)
|
||||
location: Optional[str] = Field(None, max_length=255)
|
||||
manufacturer: Optional[str] = Field(None, max_length=100)
|
||||
firmware_version: Optional[str] = Field(None, max_length=50)
|
||||
status: Optional[EquipmentStatus] = None
|
||||
|
||||
# Installation and maintenance
|
||||
install_date: Optional[datetime] = None
|
||||
last_maintenance_date: Optional[datetime] = None
|
||||
next_maintenance_date: Optional[datetime] = None
|
||||
maintenance_interval_days: Optional[int] = Field(None, ge=1)
|
||||
|
||||
# Performance metrics
|
||||
efficiency_percentage: Optional[float] = Field(None, ge=0, le=100)
|
||||
uptime_percentage: Optional[float] = Field(None, ge=0, le=100)
|
||||
energy_usage_kwh: Optional[float] = Field(None, ge=0)
|
||||
|
||||
# Specifications
|
||||
power_kw: Optional[float] = Field(None, ge=0)
|
||||
capacity: Optional[float] = Field(None, ge=0)
|
||||
weight_kg: Optional[float] = Field(None, ge=0)
|
||||
|
||||
# Temperature monitoring
|
||||
current_temperature: Optional[float] = None
|
||||
target_temperature: Optional[float] = None
|
||||
|
||||
# IoT Connectivity
|
||||
iot_enabled: Optional[bool] = None
|
||||
iot_protocol: Optional[str] = None
|
||||
iot_endpoint: Optional[str] = None
|
||||
iot_port: Optional[int] = None
|
||||
iot_config: Optional[dict] = None
|
||||
|
||||
# Real-time monitoring
|
||||
supports_realtime: Optional[bool] = None
|
||||
poll_interval_seconds: Optional[int] = Field(None, ge=1)
|
||||
|
||||
# Sensor capabilities
|
||||
temperature_zones: Optional[int] = Field(None, ge=1)
|
||||
supports_humidity: Optional[bool] = None
|
||||
supports_energy_monitoring: Optional[bool] = None
|
||||
supports_remote_control: Optional[bool] = None
|
||||
|
||||
# Notes
|
||||
notes: Optional[str] = None
|
||||
|
||||
# Support contact information
|
||||
support_contact: Optional[dict] = None
|
||||
|
||||
# Status flag
|
||||
is_active: Optional[bool] = None
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"status": "maintenance",
|
||||
"last_maintenance_date": "2024-01-15T00:00:00Z",
|
||||
"next_maintenance_date": "2024-04-15T00:00:00Z",
|
||||
"efficiency_percentage": 88.0
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class EquipmentResponse(BaseModel):
|
||||
"""Schema for equipment response"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
name: str
|
||||
type: EquipmentType
|
||||
model: Optional[str] = None
|
||||
serial_number: Optional[str] = None
|
||||
location: Optional[str] = None
|
||||
manufacturer: Optional[str] = None
|
||||
firmware_version: Optional[str] = None
|
||||
status: EquipmentStatus
|
||||
|
||||
# Installation and maintenance
|
||||
install_date: Optional[datetime] = None
|
||||
last_maintenance_date: Optional[datetime] = None
|
||||
next_maintenance_date: Optional[datetime] = None
|
||||
maintenance_interval_days: Optional[int] = None
|
||||
|
||||
# Performance metrics
|
||||
efficiency_percentage: Optional[float] = None
|
||||
uptime_percentage: Optional[float] = None
|
||||
energy_usage_kwh: Optional[float] = None
|
||||
|
||||
# Specifications
|
||||
power_kw: Optional[float] = None
|
||||
capacity: Optional[float] = None
|
||||
weight_kg: Optional[float] = None
|
||||
|
||||
# Temperature monitoring
|
||||
current_temperature: Optional[float] = None
|
||||
target_temperature: Optional[float] = None
|
||||
|
||||
# IoT Connectivity
|
||||
iot_enabled: bool = False
|
||||
iot_protocol: Optional[str] = None
|
||||
iot_endpoint: Optional[str] = None
|
||||
iot_port: Optional[int] = None
|
||||
iot_connection_status: Optional[str] = None
|
||||
iot_last_connected: Optional[datetime] = None
|
||||
iot_config: Optional[dict] = None
|
||||
|
||||
# Real-time monitoring
|
||||
supports_realtime: bool = False
|
||||
poll_interval_seconds: Optional[int] = None
|
||||
|
||||
# Sensor capabilities
|
||||
temperature_zones: Optional[int] = None
|
||||
supports_humidity: bool = False
|
||||
supports_energy_monitoring: bool = False
|
||||
supports_remote_control: bool = False
|
||||
|
||||
# Status
|
||||
is_active: bool
|
||||
notes: Optional[str] = None
|
||||
|
||||
# Support contact information
|
||||
support_contact: Optional[dict] = None
|
||||
|
||||
# Timestamps
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class EquipmentListResponse(BaseModel):
|
||||
"""Schema for paginated equipment list response"""
|
||||
equipment: List[EquipmentResponse]
|
||||
total_count: int
|
||||
page: int
|
||||
page_size: int
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"equipment": [],
|
||||
"total_count": 10,
|
||||
"page": 1,
|
||||
"page_size": 50
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class EquipmentDeletionSummary(BaseModel):
|
||||
"""Schema for equipment deletion summary"""
|
||||
can_delete: bool = Field(..., description="Whether the equipment can be deleted")
|
||||
warnings: List[str] = Field(default_factory=list, description="List of warnings about deletion")
|
||||
production_batches_count: int = Field(default=0, description="Number of production batches using this equipment")
|
||||
maintenance_records_count: int = Field(default=0, description="Number of maintenance records")
|
||||
temperature_logs_count: int = Field(default=0, description="Number of temperature logs")
|
||||
equipment_name: Optional[str] = Field(None, description="Equipment name")
|
||||
equipment_type: Optional[str] = Field(None, description="Equipment type")
|
||||
equipment_location: Optional[str] = Field(None, description="Equipment location")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"can_delete": True,
|
||||
"warnings": ["3 production batch(es) are using this equipment"],
|
||||
"production_batches_count": 3,
|
||||
"maintenance_records_count": 5,
|
||||
"temperature_logs_count": 120,
|
||||
"equipment_name": "Horno Principal #1",
|
||||
"equipment_type": "oven",
|
||||
"equipment_location": "Área de Horneado"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# ================================================================
|
||||
# IoT-SPECIFIC SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class EquipmentSensorReadingResponse(BaseModel):
|
||||
"""Schema for equipment sensor reading response"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
equipment_id: UUID
|
||||
batch_id: Optional[UUID] = None
|
||||
reading_time: datetime
|
||||
|
||||
# Temperature readings
|
||||
temperature: Optional[float] = None
|
||||
temperature_zones: Optional[dict] = None
|
||||
target_temperature: Optional[float] = None
|
||||
|
||||
# Humidity
|
||||
humidity: Optional[float] = None
|
||||
target_humidity: Optional[float] = None
|
||||
|
||||
# Energy monitoring
|
||||
energy_consumption_kwh: Optional[float] = None
|
||||
power_current_kw: Optional[float] = None
|
||||
|
||||
# Equipment status
|
||||
operational_status: Optional[str] = None
|
||||
cycle_stage: Optional[str] = None
|
||||
cycle_progress_percentage: Optional[float] = None
|
||||
time_remaining_minutes: Optional[int] = None
|
||||
|
||||
# Process parameters
|
||||
motor_speed_rpm: Optional[float] = None
|
||||
door_status: Optional[str] = None
|
||||
steam_level: Optional[float] = None
|
||||
|
||||
# Quality indicators
|
||||
product_weight_kg: Optional[float] = None
|
||||
moisture_content: Optional[float] = None
|
||||
|
||||
# Additional sensor data
|
||||
additional_sensors: Optional[dict] = None
|
||||
|
||||
# Data quality
|
||||
data_quality_score: Optional[float] = None
|
||||
is_anomaly: bool = False
|
||||
|
||||
created_at: datetime
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class EquipmentConnectionTestResponse(BaseModel):
|
||||
"""Schema for IoT connection test response"""
|
||||
success: bool = Field(..., description="Whether connection test succeeded")
|
||||
status: str = Field(..., description="Connection status")
|
||||
message: str = Field(..., description="Detailed message")
|
||||
response_time_ms: Optional[int] = Field(None, description="Response time in milliseconds")
|
||||
protocol_tested: str = Field(..., description="Protocol that was tested")
|
||||
endpoint_tested: str = Field(..., description="Endpoint that was tested")
|
||||
error_details: Optional[str] = Field(None, description="Error details if connection failed")
|
||||
supported_features: Optional[List[str]] = Field(None, description="List of supported IoT features")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"success": True,
|
||||
"status": "connected",
|
||||
"message": "Successfully connected to equipment",
|
||||
"response_time_ms": 145,
|
||||
"protocol_tested": "rest_api",
|
||||
"endpoint_tested": "https://connectedcooking.com/api/v1",
|
||||
"supported_features": ["temperature", "humidity", "energy_monitoring"]
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class RealTimeDataResponse(BaseModel):
|
||||
"""Schema for real-time equipment data response"""
|
||||
equipment_id: UUID
|
||||
equipment_name: str
|
||||
timestamp: datetime
|
||||
connection_status: str
|
||||
|
||||
# Current readings
|
||||
temperature: Optional[float] = None
|
||||
temperature_zones: Optional[dict] = None
|
||||
humidity: Optional[float] = None
|
||||
energy_consumption_kwh: Optional[float] = None
|
||||
power_current_kw: Optional[float] = None
|
||||
|
||||
# Status
|
||||
operational_status: Optional[str] = None
|
||||
cycle_stage: Optional[str] = None
|
||||
cycle_progress_percentage: Optional[float] = None
|
||||
time_remaining_minutes: Optional[int] = None
|
||||
|
||||
# Active batch
|
||||
active_batch_id: Optional[UUID] = None
|
||||
active_batch_name: Optional[str] = None
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"equipment_id": "123e4567-e89b-12d3-a456-426614174000",
|
||||
"equipment_name": "Horno Principal #1",
|
||||
"timestamp": "2025-01-12T10:30:00Z",
|
||||
"connection_status": "connected",
|
||||
"temperature": 185.5,
|
||||
"temperature_zones": {"zone1": 180, "zone2": 190, "zone3": 185},
|
||||
"humidity": 65.0,
|
||||
"operational_status": "running",
|
||||
"cycle_stage": "baking",
|
||||
"cycle_progress_percentage": 45.0,
|
||||
"time_remaining_minutes": 12
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class EquipmentIoTAlertResponse(BaseModel):
|
||||
"""Schema for IoT alert response"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
equipment_id: UUID
|
||||
batch_id: Optional[UUID] = None
|
||||
|
||||
# Alert information
|
||||
alert_type: str
|
||||
severity: str
|
||||
alert_time: datetime
|
||||
|
||||
# Alert details
|
||||
title: str
|
||||
message: str
|
||||
|
||||
# Threshold information
|
||||
threshold_value: Optional[float] = None
|
||||
actual_value: Optional[float] = None
|
||||
deviation_percentage: Optional[float] = None
|
||||
|
||||
# Status
|
||||
is_active: bool
|
||||
is_acknowledged: bool
|
||||
acknowledged_by: Optional[UUID] = None
|
||||
acknowledged_at: Optional[datetime] = None
|
||||
|
||||
is_resolved: bool
|
||||
resolved_by: Optional[UUID] = None
|
||||
resolved_at: Optional[datetime] = None
|
||||
resolution_notes: Optional[str] = None
|
||||
|
||||
# Automated response
|
||||
auto_resolved: bool
|
||||
corrective_action_taken: Optional[str] = None
|
||||
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class EquipmentSensorHistoryResponse(BaseModel):
|
||||
"""Schema for sensor reading history response"""
|
||||
equipment_id: UUID
|
||||
equipment_name: str
|
||||
start_time: datetime
|
||||
end_time: datetime
|
||||
total_readings: int
|
||||
readings: List[EquipmentSensorReadingResponse]
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"equipment_id": "123e4567-e89b-12d3-a456-426614174000",
|
||||
"equipment_name": "Horno Principal #1",
|
||||
"start_time": "2025-01-12T08:00:00Z",
|
||||
"end_time": "2025-01-12T12:00:00Z",
|
||||
"total_readings": 48,
|
||||
"readings": []
|
||||
}
|
||||
}
|
||||
)
|
||||
352
services/production/app/schemas/production.py
Normal file
352
services/production/app/schemas/production.py
Normal file
@@ -0,0 +1,352 @@
|
||||
# ================================================================
|
||||
# services/production/app/schemas/production.py
|
||||
# ================================================================
|
||||
"""
|
||||
Pydantic schemas for production service
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field, validator
|
||||
from typing import Optional, List, Dict, Any, Union
|
||||
from datetime import datetime, date
|
||||
from uuid import UUID
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class ProductionStatusEnum(str, Enum):
|
||||
"""Production batch status enumeration for API"""
|
||||
PENDING = "PENDING"
|
||||
IN_PROGRESS = "IN_PROGRESS"
|
||||
COMPLETED = "COMPLETED"
|
||||
CANCELLED = "CANCELLED"
|
||||
ON_HOLD = "ON_HOLD"
|
||||
QUALITY_CHECK = "QUALITY_CHECK"
|
||||
FAILED = "FAILED"
|
||||
|
||||
|
||||
class ProductionPriorityEnum(str, Enum):
|
||||
"""Production priority levels for API"""
|
||||
LOW = "LOW"
|
||||
MEDIUM = "MEDIUM"
|
||||
HIGH = "HIGH"
|
||||
URGENT = "URGENT"
|
||||
|
||||
|
||||
|
||||
|
||||
# ================================================================
|
||||
# PRODUCTION BATCH SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class ProductionBatchBase(BaseModel):
|
||||
"""Base schema for production batch"""
|
||||
product_id: UUID
|
||||
product_name: str = Field(..., min_length=1, max_length=255)
|
||||
recipe_id: Optional[UUID] = None
|
||||
planned_start_time: datetime
|
||||
planned_end_time: datetime
|
||||
planned_quantity: float = Field(..., gt=0)
|
||||
planned_duration_minutes: int = Field(..., gt=0)
|
||||
priority: ProductionPriorityEnum = ProductionPriorityEnum.MEDIUM
|
||||
is_rush_order: bool = False
|
||||
is_special_recipe: bool = False
|
||||
production_notes: Optional[str] = None
|
||||
|
||||
@validator('planned_end_time')
|
||||
def validate_end_time_after_start(cls, v, values):
|
||||
if 'planned_start_time' in values and v <= values['planned_start_time']:
|
||||
raise ValueError('planned_end_time must be after planned_start_time')
|
||||
return v
|
||||
|
||||
|
||||
class ProductionBatchCreate(ProductionBatchBase):
|
||||
"""Schema for creating a production batch"""
|
||||
batch_number: Optional[str] = Field(None, max_length=50)
|
||||
order_id: Optional[UUID] = None
|
||||
forecast_id: Optional[UUID] = None
|
||||
equipment_used: Optional[List[str]] = None
|
||||
staff_assigned: Optional[List[str]] = None
|
||||
station_id: Optional[str] = Field(None, max_length=50)
|
||||
|
||||
|
||||
class ProductionBatchUpdate(BaseModel):
|
||||
"""Schema for updating a production batch"""
|
||||
product_name: Optional[str] = Field(None, min_length=1, max_length=255)
|
||||
planned_start_time: Optional[datetime] = None
|
||||
planned_end_time: Optional[datetime] = None
|
||||
planned_quantity: Optional[float] = Field(None, gt=0)
|
||||
planned_duration_minutes: Optional[int] = Field(None, gt=0)
|
||||
actual_quantity: Optional[float] = Field(None, ge=0)
|
||||
priority: Optional[ProductionPriorityEnum] = None
|
||||
equipment_used: Optional[List[str]] = None
|
||||
staff_assigned: Optional[List[str]] = None
|
||||
station_id: Optional[str] = Field(None, max_length=50)
|
||||
production_notes: Optional[str] = None
|
||||
|
||||
|
||||
class ProductionBatchStatusUpdate(BaseModel):
|
||||
"""Schema for updating production batch status"""
|
||||
status: ProductionStatusEnum
|
||||
actual_quantity: Optional[float] = Field(None, ge=0)
|
||||
notes: Optional[str] = None
|
||||
|
||||
|
||||
class ProductionBatchResponse(BaseModel):
|
||||
"""Schema for production batch response"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
batch_number: str
|
||||
product_id: UUID
|
||||
product_name: str
|
||||
recipe_id: Optional[UUID]
|
||||
planned_start_time: datetime
|
||||
planned_end_time: datetime
|
||||
planned_quantity: float
|
||||
planned_duration_minutes: int
|
||||
actual_start_time: Optional[datetime]
|
||||
actual_end_time: Optional[datetime]
|
||||
actual_quantity: Optional[float]
|
||||
actual_duration_minutes: Optional[int]
|
||||
status: ProductionStatusEnum
|
||||
priority: ProductionPriorityEnum
|
||||
|
||||
# Process stage tracking (added to replace frontend mock data)
|
||||
current_process_stage: Optional[str] = None
|
||||
process_stage_history: Optional[List[Dict[str, Any]]] = None
|
||||
pending_quality_checks: Optional[List[Dict[str, Any]]] = None
|
||||
completed_quality_checks: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
estimated_cost: Optional[float]
|
||||
actual_cost: Optional[float]
|
||||
yield_percentage: Optional[float]
|
||||
quality_score: Optional[float]
|
||||
equipment_used: Optional[List[str]]
|
||||
staff_assigned: Optional[List[str]]
|
||||
station_id: Optional[str]
|
||||
order_id: Optional[UUID]
|
||||
forecast_id: Optional[UUID]
|
||||
is_rush_order: bool
|
||||
is_special_recipe: bool
|
||||
production_notes: Optional[str]
|
||||
quality_notes: Optional[str]
|
||||
delay_reason: Optional[str]
|
||||
cancellation_reason: Optional[str]
|
||||
reasoning_data: Optional[Dict[str, Any]] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
completed_at: Optional[datetime]
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ================================================================
|
||||
# PRODUCTION SCHEDULE SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class ProductionScheduleBase(BaseModel):
|
||||
"""Base schema for production schedule"""
|
||||
schedule_date: date
|
||||
shift_start: datetime
|
||||
shift_end: datetime
|
||||
total_capacity_hours: float = Field(..., gt=0)
|
||||
planned_capacity_hours: float = Field(..., gt=0)
|
||||
staff_count: int = Field(..., gt=0)
|
||||
equipment_capacity: Optional[Dict[str, Any]] = None
|
||||
station_assignments: Optional[Dict[str, Any]] = None
|
||||
schedule_notes: Optional[str] = None
|
||||
|
||||
@validator('shift_end')
|
||||
def validate_shift_end_after_start(cls, v, values):
|
||||
if 'shift_start' in values and v <= values['shift_start']:
|
||||
raise ValueError('shift_end must be after shift_start')
|
||||
return v
|
||||
|
||||
@validator('planned_capacity_hours')
|
||||
def validate_planned_capacity(cls, v, values):
|
||||
if 'total_capacity_hours' in values and v > values['total_capacity_hours']:
|
||||
raise ValueError('planned_capacity_hours cannot exceed total_capacity_hours')
|
||||
return v
|
||||
|
||||
|
||||
class ProductionScheduleCreate(ProductionScheduleBase):
|
||||
"""Schema for creating a production schedule"""
|
||||
pass
|
||||
|
||||
|
||||
class ProductionScheduleUpdate(BaseModel):
|
||||
"""Schema for updating a production schedule"""
|
||||
shift_start: Optional[datetime] = None
|
||||
shift_end: Optional[datetime] = None
|
||||
total_capacity_hours: Optional[float] = Field(None, gt=0)
|
||||
planned_capacity_hours: Optional[float] = Field(None, gt=0)
|
||||
staff_count: Optional[int] = Field(None, gt=0)
|
||||
overtime_hours: Optional[float] = Field(None, ge=0)
|
||||
equipment_capacity: Optional[Dict[str, Any]] = None
|
||||
station_assignments: Optional[Dict[str, Any]] = None
|
||||
schedule_notes: Optional[str] = None
|
||||
|
||||
|
||||
class ProductionScheduleResponse(BaseModel):
|
||||
"""Schema for production schedule response"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
schedule_date: date
|
||||
shift_start: datetime
|
||||
shift_end: datetime
|
||||
total_capacity_hours: float
|
||||
planned_capacity_hours: float
|
||||
actual_capacity_hours: Optional[float]
|
||||
overtime_hours: Optional[float]
|
||||
staff_count: int
|
||||
equipment_capacity: Optional[Dict[str, Any]]
|
||||
station_assignments: Optional[Dict[str, Any]]
|
||||
total_batches_planned: int
|
||||
total_batches_completed: Optional[int]
|
||||
total_quantity_planned: float
|
||||
total_quantity_produced: Optional[float]
|
||||
is_finalized: bool
|
||||
is_active: bool
|
||||
efficiency_percentage: Optional[float]
|
||||
utilization_percentage: Optional[float]
|
||||
on_time_completion_rate: Optional[float]
|
||||
schedule_notes: Optional[str]
|
||||
schedule_adjustments: Optional[Dict[str, Any]]
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
finalized_at: Optional[datetime]
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
# ================================================================
|
||||
# QUALITY CHECK SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class QualityCheckBase(BaseModel):
|
||||
"""Base schema for quality check"""
|
||||
batch_id: UUID
|
||||
check_type: str = Field(..., min_length=1, max_length=50)
|
||||
check_time: datetime
|
||||
quality_score: float = Field(..., ge=1, le=10)
|
||||
pass_fail: bool
|
||||
defect_count: int = Field(0, ge=0)
|
||||
defect_types: Optional[List[str]] = None
|
||||
check_notes: Optional[str] = None
|
||||
|
||||
|
||||
class QualityCheckCreate(QualityCheckBase):
|
||||
"""Schema for creating a quality check"""
|
||||
checker_id: Optional[str] = Field(None, max_length=100)
|
||||
measured_weight: Optional[float] = Field(None, gt=0)
|
||||
measured_temperature: Optional[float] = None
|
||||
measured_moisture: Optional[float] = Field(None, ge=0, le=100)
|
||||
measured_dimensions: Optional[Dict[str, float]] = None
|
||||
target_weight: Optional[float] = Field(None, gt=0)
|
||||
target_temperature: Optional[float] = None
|
||||
target_moisture: Optional[float] = Field(None, ge=0, le=100)
|
||||
tolerance_percentage: Optional[float] = Field(None, ge=0, le=100)
|
||||
corrective_actions: Optional[List[str]] = None
|
||||
|
||||
|
||||
class QualityCheckResponse(BaseModel):
|
||||
"""Schema for quality check response"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
batch_id: UUID
|
||||
check_type: str
|
||||
check_time: datetime
|
||||
checker_id: Optional[str]
|
||||
quality_score: float
|
||||
pass_fail: bool
|
||||
defect_count: int
|
||||
defect_types: Optional[List[str]]
|
||||
measured_weight: Optional[float]
|
||||
measured_temperature: Optional[float]
|
||||
measured_moisture: Optional[float]
|
||||
measured_dimensions: Optional[Dict[str, float]]
|
||||
target_weight: Optional[float]
|
||||
target_temperature: Optional[float]
|
||||
target_moisture: Optional[float]
|
||||
tolerance_percentage: Optional[float]
|
||||
within_tolerance: Optional[bool]
|
||||
corrective_action_needed: bool
|
||||
corrective_actions: Optional[List[str]]
|
||||
check_notes: Optional[str]
|
||||
photos_urls: Optional[List[str]]
|
||||
certificate_url: Optional[str]
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
|
||||
|
||||
# ================================================================
|
||||
# DASHBOARD AND ANALYTICS SCHEMAS
|
||||
# ================================================================
|
||||
|
||||
class ProductionDashboardSummary(BaseModel):
|
||||
"""Schema for production dashboard summary"""
|
||||
active_batches: int
|
||||
todays_production_plan: List[Dict[str, Any]]
|
||||
capacity_utilization: float
|
||||
on_time_completion_rate: float
|
||||
average_quality_score: float
|
||||
total_output_today: float
|
||||
efficiency_percentage: float
|
||||
|
||||
|
||||
class DailyProductionRequirements(BaseModel):
|
||||
"""Schema for daily production requirements"""
|
||||
date: date
|
||||
production_plan: List[Dict[str, Any]]
|
||||
total_capacity_needed: float
|
||||
available_capacity: float
|
||||
capacity_gap: float
|
||||
urgent_items: int
|
||||
recommended_schedule: Optional[Dict[str, Any]]
|
||||
|
||||
|
||||
class ProductionMetrics(BaseModel):
|
||||
"""Schema for production metrics"""
|
||||
period_start: date
|
||||
period_end: date
|
||||
total_batches: int
|
||||
completed_batches: int
|
||||
completion_rate: float
|
||||
average_yield_percentage: float
|
||||
on_time_completion_rate: float
|
||||
total_production_cost: float
|
||||
average_quality_score: float
|
||||
efficiency_trends: List[Dict[str, Any]]
|
||||
|
||||
|
||||
# ================================================================
|
||||
# REQUEST/RESPONSE WRAPPERS
|
||||
# ================================================================
|
||||
|
||||
class ProductionBatchListResponse(BaseModel):
|
||||
"""Schema for production batch list response"""
|
||||
batches: List[ProductionBatchResponse]
|
||||
total_count: int
|
||||
page: int
|
||||
page_size: int
|
||||
|
||||
|
||||
class ProductionScheduleListResponse(BaseModel):
|
||||
"""Schema for production schedule list response"""
|
||||
schedules: List[ProductionScheduleResponse]
|
||||
total_count: int
|
||||
page: int
|
||||
page_size: int
|
||||
|
||||
|
||||
class QualityCheckListResponse(BaseModel):
|
||||
"""Schema for quality check list response"""
|
||||
quality_checks: List[QualityCheckResponse]
|
||||
total_count: int
|
||||
page: int
|
||||
page_size: int
|
||||
180
services/production/app/schemas/quality_templates.py
Normal file
180
services/production/app/schemas/quality_templates.py
Normal file
@@ -0,0 +1,180 @@
|
||||
# services/production/app/schemas/quality_templates.py
|
||||
"""
|
||||
Quality Check Template Pydantic schemas for validation and serialization
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field, validator
|
||||
from typing import Optional, List, Dict, Any, Union
|
||||
from uuid import UUID
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
|
||||
from ..models.production import ProcessStage
|
||||
|
||||
|
||||
class QualityCheckType(str, Enum):
|
||||
"""Quality check types"""
|
||||
VISUAL = "visual"
|
||||
MEASUREMENT = "measurement"
|
||||
TEMPERATURE = "temperature"
|
||||
WEIGHT = "weight"
|
||||
BOOLEAN = "boolean"
|
||||
TIMING = "timing"
|
||||
CHECKLIST = "checklist"
|
||||
|
||||
|
||||
class QualityCheckTemplateBase(BaseModel):
|
||||
"""Base schema for quality check templates"""
|
||||
name: str = Field(..., min_length=1, max_length=255, description="Template name")
|
||||
template_code: Optional[str] = Field(None, max_length=100, description="Template code for reference")
|
||||
check_type: QualityCheckType = Field(..., description="Type of quality check")
|
||||
category: Optional[str] = Field(None, max_length=100, description="Check category (e.g., appearance, structure)")
|
||||
description: Optional[str] = Field(None, description="Template description")
|
||||
instructions: Optional[str] = Field(None, description="Check instructions for staff")
|
||||
|
||||
# Configuration
|
||||
parameters: Optional[Dict[str, Any]] = Field(None, description="Dynamic check parameters")
|
||||
thresholds: Optional[Dict[str, Any]] = Field(None, description="Pass/fail criteria")
|
||||
scoring_criteria: Optional[Dict[str, Any]] = Field(None, description="Scoring methodology")
|
||||
|
||||
# Settings
|
||||
is_active: bool = Field(True, description="Whether template is active")
|
||||
is_required: bool = Field(False, description="Whether check is required")
|
||||
is_critical: bool = Field(False, description="Whether failure blocks production")
|
||||
weight: float = Field(1.0, ge=0.0, le=10.0, description="Weight in overall quality score")
|
||||
|
||||
# Measurement specifications
|
||||
min_value: Optional[float] = Field(None, description="Minimum acceptable value")
|
||||
max_value: Optional[float] = Field(None, description="Maximum acceptable value")
|
||||
target_value: Optional[float] = Field(None, description="Target value")
|
||||
unit: Optional[str] = Field(None, max_length=20, description="Unit of measurement")
|
||||
tolerance_percentage: Optional[float] = Field(None, ge=0.0, le=100.0, description="Tolerance percentage")
|
||||
|
||||
# Process stage applicability
|
||||
applicable_stages: Optional[List[ProcessStage]] = Field(None, description="Applicable process stages")
|
||||
|
||||
@validator('applicable_stages')
|
||||
def validate_stages(cls, v):
|
||||
if v is not None:
|
||||
# Ensure all values are valid ProcessStage enums
|
||||
for stage in v:
|
||||
if stage not in ProcessStage:
|
||||
raise ValueError(f"Invalid process stage: {stage}")
|
||||
return v
|
||||
|
||||
@validator('min_value', 'max_value', 'target_value')
|
||||
def validate_measurement_values(cls, v, values):
|
||||
if v is not None and values.get('check_type') not in [QualityCheckType.MEASUREMENT, QualityCheckType.TEMPERATURE, QualityCheckType.WEIGHT]:
|
||||
return None # Clear values for non-measurement types
|
||||
return v
|
||||
|
||||
|
||||
class QualityCheckTemplateCreate(QualityCheckTemplateBase):
|
||||
"""Schema for creating quality check templates"""
|
||||
created_by: UUID = Field(..., description="User ID who created the template")
|
||||
|
||||
|
||||
class QualityCheckTemplateUpdate(BaseModel):
|
||||
"""Schema for updating quality check templates"""
|
||||
name: Optional[str] = Field(None, min_length=1, max_length=255)
|
||||
template_code: Optional[str] = Field(None, max_length=100)
|
||||
check_type: Optional[QualityCheckType] = None
|
||||
category: Optional[str] = Field(None, max_length=100)
|
||||
description: Optional[str] = None
|
||||
instructions: Optional[str] = None
|
||||
parameters: Optional[Dict[str, Any]] = None
|
||||
thresholds: Optional[Dict[str, Any]] = None
|
||||
scoring_criteria: Optional[Dict[str, Any]] = None
|
||||
is_active: Optional[bool] = None
|
||||
is_required: Optional[bool] = None
|
||||
is_critical: Optional[bool] = None
|
||||
weight: Optional[float] = Field(None, ge=0.0, le=10.0)
|
||||
min_value: Optional[float] = None
|
||||
max_value: Optional[float] = None
|
||||
target_value: Optional[float] = None
|
||||
unit: Optional[str] = Field(None, max_length=20)
|
||||
tolerance_percentage: Optional[float] = Field(None, ge=0.0, le=100.0)
|
||||
applicable_stages: Optional[List[ProcessStage]] = None
|
||||
|
||||
|
||||
class QualityCheckTemplateResponse(QualityCheckTemplateBase):
|
||||
"""Schema for quality check template responses"""
|
||||
id: UUID
|
||||
tenant_id: UUID
|
||||
created_by: UUID
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class QualityCheckTemplateList(BaseModel):
|
||||
"""Schema for paginated quality check template lists"""
|
||||
templates: List[QualityCheckTemplateResponse]
|
||||
total: int
|
||||
skip: int
|
||||
limit: int
|
||||
|
||||
|
||||
class QualityCheckCriterion(BaseModel):
|
||||
"""Individual quality check criterion within a template"""
|
||||
id: str = Field(..., description="Unique criterion identifier")
|
||||
name: str = Field(..., description="Criterion name")
|
||||
description: str = Field(..., description="Criterion description")
|
||||
check_type: QualityCheckType = Field(..., description="Type of check")
|
||||
required: bool = Field(True, description="Whether criterion is required")
|
||||
weight: float = Field(1.0, ge=0.0, le=10.0, description="Weight in template score")
|
||||
acceptable_criteria: str = Field(..., description="Description of acceptable criteria")
|
||||
min_value: Optional[float] = None
|
||||
max_value: Optional[float] = None
|
||||
unit: Optional[str] = None
|
||||
is_critical: bool = Field(False, description="Whether failure is critical")
|
||||
|
||||
|
||||
class QualityCheckResult(BaseModel):
|
||||
"""Result of a quality check criterion"""
|
||||
criterion_id: str = Field(..., description="Criterion identifier")
|
||||
value: Union[float, str, bool] = Field(..., description="Check result value")
|
||||
score: float = Field(..., ge=0.0, le=10.0, description="Score for this criterion")
|
||||
notes: Optional[str] = Field(None, description="Additional notes")
|
||||
photos: Optional[List[str]] = Field(None, description="Photo URLs")
|
||||
pass_check: bool = Field(..., description="Whether criterion passed")
|
||||
timestamp: datetime = Field(..., description="When check was performed")
|
||||
|
||||
|
||||
class QualityCheckExecutionRequest(BaseModel):
|
||||
"""Schema for executing a quality check using a template"""
|
||||
template_id: UUID = Field(..., description="Quality check template ID")
|
||||
batch_id: UUID = Field(..., description="Production batch ID")
|
||||
process_stage: ProcessStage = Field(..., description="Current process stage")
|
||||
checker_id: Optional[str] = Field(None, description="Staff member performing check")
|
||||
results: List[QualityCheckResult] = Field(..., description="Check results")
|
||||
final_notes: Optional[str] = Field(None, description="Final notes")
|
||||
photos: Optional[List[str]] = Field(None, description="Additional photo URLs")
|
||||
|
||||
|
||||
class QualityCheckExecutionResponse(BaseModel):
|
||||
"""Schema for quality check execution results"""
|
||||
check_id: UUID = Field(..., description="Created quality check ID")
|
||||
overall_score: float = Field(..., ge=0.0, le=10.0, description="Overall quality score")
|
||||
overall_pass: bool = Field(..., description="Whether check passed overall")
|
||||
critical_failures: List[str] = Field(..., description="List of critical failures")
|
||||
corrective_actions: List[str] = Field(..., description="Recommended corrective actions")
|
||||
timestamp: datetime = Field(..., description="When check was completed")
|
||||
|
||||
|
||||
class ProcessStageQualityConfig(BaseModel):
|
||||
"""Configuration for quality checks at a specific process stage"""
|
||||
stage: ProcessStage = Field(..., description="Process stage")
|
||||
template_ids: List[UUID] = Field(..., description="Required template IDs")
|
||||
custom_parameters: Optional[Dict[str, Any]] = Field(None, description="Stage-specific parameters")
|
||||
is_required: bool = Field(True, description="Whether stage requires quality checks")
|
||||
blocking: bool = Field(True, description="Whether stage blocks on failed checks")
|
||||
|
||||
|
||||
class RecipeQualityConfiguration(BaseModel):
|
||||
"""Quality check configuration for a recipe"""
|
||||
stages: Dict[str, ProcessStageQualityConfig] = Field(..., description="Stage configurations")
|
||||
global_parameters: Optional[Dict[str, Any]] = Field(None, description="Global quality parameters")
|
||||
default_templates: Optional[List[UUID]] = Field(None, description="Default template IDs")
|
||||
12
services/production/app/services/__init__.py
Normal file
12
services/production/app/services/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# ================================================================
|
||||
# services/production/app/services/__init__.py
|
||||
# ================================================================
|
||||
"""
|
||||
Business logic services
|
||||
"""
|
||||
|
||||
from .production_service import ProductionService
|
||||
|
||||
__all__ = [
|
||||
"ProductionService"
|
||||
]
|
||||
19
services/production/app/services/iot/__init__.py
Normal file
19
services/production/app/services/iot/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""
|
||||
IoT integration services for equipment connectivity
|
||||
"""
|
||||
|
||||
from .base_connector import (
|
||||
BaseIoTConnector,
|
||||
SensorReading,
|
||||
ConnectionStatus,
|
||||
EquipmentCapabilities,
|
||||
ConnectorFactory
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'BaseIoTConnector',
|
||||
'SensorReading',
|
||||
'ConnectionStatus',
|
||||
'EquipmentCapabilities',
|
||||
'ConnectorFactory',
|
||||
]
|
||||
242
services/production/app/services/iot/base_connector.py
Normal file
242
services/production/app/services/iot/base_connector.py
Normal file
@@ -0,0 +1,242 @@
|
||||
"""
|
||||
Base IoT connector interface for equipment integration
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class SensorReading:
|
||||
"""Standardized sensor reading data structure"""
|
||||
timestamp: datetime
|
||||
temperature: Optional[float] = None
|
||||
temperature_zones: Optional[Dict[str, float]] = None
|
||||
target_temperature: Optional[float] = None
|
||||
humidity: Optional[float] = None
|
||||
target_humidity: Optional[float] = None
|
||||
energy_consumption_kwh: Optional[float] = None
|
||||
power_current_kw: Optional[float] = None
|
||||
operational_status: Optional[str] = None
|
||||
cycle_stage: Optional[str] = None
|
||||
cycle_progress_percentage: Optional[float] = None
|
||||
time_remaining_minutes: Optional[int] = None
|
||||
motor_speed_rpm: Optional[float] = None
|
||||
door_status: Optional[str] = None
|
||||
steam_level: Optional[float] = None
|
||||
product_weight_kg: Optional[float] = None
|
||||
moisture_content: Optional[float] = None
|
||||
additional_sensors: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConnectionStatus:
|
||||
"""Connection status information"""
|
||||
is_connected: bool
|
||||
status: str # connected, disconnected, error, unknown
|
||||
message: str
|
||||
response_time_ms: Optional[int] = None
|
||||
error_details: Optional[str] = None
|
||||
last_successful_connection: Optional[datetime] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class EquipmentCapabilities:
|
||||
"""Equipment IoT capabilities"""
|
||||
supports_temperature: bool = False
|
||||
supports_humidity: bool = False
|
||||
supports_energy_monitoring: bool = False
|
||||
supports_remote_control: bool = False
|
||||
supports_realtime: bool = False
|
||||
temperature_zones: int = 1
|
||||
supported_protocols: List[str] = None
|
||||
manufacturer_specific_features: Optional[Dict[str, Any]] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if self.supported_protocols is None:
|
||||
self.supported_protocols = []
|
||||
|
||||
|
||||
class BaseIoTConnector(ABC):
|
||||
"""
|
||||
Base abstract class for IoT equipment connectors
|
||||
|
||||
All manufacturer-specific connectors must implement this interface
|
||||
"""
|
||||
|
||||
def __init__(self, equipment_id: str, config: Dict[str, Any]):
|
||||
"""
|
||||
Initialize the IoT connector
|
||||
|
||||
Args:
|
||||
equipment_id: Unique equipment identifier
|
||||
config: Connection configuration including endpoint, credentials, etc.
|
||||
"""
|
||||
self.equipment_id = equipment_id
|
||||
self.config = config
|
||||
self.endpoint = config.get('endpoint')
|
||||
self.port = config.get('port')
|
||||
self.credentials = config.get('credentials', {})
|
||||
self._is_connected = False
|
||||
self._last_error: Optional[str] = None
|
||||
|
||||
@abstractmethod
|
||||
async def connect(self) -> ConnectionStatus:
|
||||
"""
|
||||
Establish connection to the equipment
|
||||
|
||||
Returns:
|
||||
ConnectionStatus with connection details
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def disconnect(self) -> bool:
|
||||
"""
|
||||
Close connection to the equipment
|
||||
|
||||
Returns:
|
||||
True if disconnected successfully
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def test_connection(self) -> ConnectionStatus:
|
||||
"""
|
||||
Test connection without establishing persistent connection
|
||||
|
||||
Returns:
|
||||
ConnectionStatus with test results
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_current_reading(self) -> Optional[SensorReading]:
|
||||
"""
|
||||
Get current sensor readings from the equipment
|
||||
|
||||
Returns:
|
||||
SensorReading with current data or None if unavailable
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_capabilities(self) -> EquipmentCapabilities:
|
||||
"""
|
||||
Discover equipment capabilities
|
||||
|
||||
Returns:
|
||||
EquipmentCapabilities describing what the equipment supports
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_status(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get equipment status information
|
||||
|
||||
Returns:
|
||||
Dictionary with status details
|
||||
"""
|
||||
pass
|
||||
|
||||
async def set_target_temperature(self, temperature: float) -> bool:
|
||||
"""
|
||||
Set target temperature (if supported)
|
||||
|
||||
Args:
|
||||
temperature: Target temperature in Celsius
|
||||
|
||||
Returns:
|
||||
True if command sent successfully
|
||||
"""
|
||||
raise NotImplementedError("Remote control not supported by this equipment")
|
||||
|
||||
async def start_cycle(self, params: Dict[str, Any]) -> bool:
|
||||
"""
|
||||
Start production cycle (if supported)
|
||||
|
||||
Args:
|
||||
params: Cycle parameters
|
||||
|
||||
Returns:
|
||||
True if cycle started successfully
|
||||
"""
|
||||
raise NotImplementedError("Remote control not supported by this equipment")
|
||||
|
||||
async def stop_cycle(self) -> bool:
|
||||
"""
|
||||
Stop current production cycle (if supported)
|
||||
|
||||
Returns:
|
||||
True if cycle stopped successfully
|
||||
"""
|
||||
raise NotImplementedError("Remote control not supported by this equipment")
|
||||
|
||||
def get_protocol_name(self) -> str:
|
||||
"""Get the protocol name used by this connector"""
|
||||
return self.__class__.__name__.replace('Connector', '').lower()
|
||||
|
||||
def is_connected(self) -> bool:
|
||||
"""Check if currently connected"""
|
||||
return self._is_connected
|
||||
|
||||
def get_last_error(self) -> Optional[str]:
|
||||
"""Get last error message"""
|
||||
return self._last_error
|
||||
|
||||
def _set_error(self, error: str):
|
||||
"""Set error message"""
|
||||
self._last_error = error
|
||||
|
||||
def _clear_error(self):
|
||||
"""Clear error message"""
|
||||
self._last_error = None
|
||||
|
||||
|
||||
class ConnectorFactory:
|
||||
"""
|
||||
Factory for creating appropriate IoT connectors based on protocol
|
||||
"""
|
||||
|
||||
_connectors: Dict[str, type] = {}
|
||||
|
||||
@classmethod
|
||||
def register_connector(cls, protocol: str, connector_class: type):
|
||||
"""
|
||||
Register a connector implementation
|
||||
|
||||
Args:
|
||||
protocol: Protocol name (e.g., 'rest_api', 'opc_ua')
|
||||
connector_class: Connector class implementing BaseIoTConnector
|
||||
"""
|
||||
cls._connectors[protocol.lower()] = connector_class
|
||||
|
||||
@classmethod
|
||||
def create_connector(cls, protocol: str, equipment_id: str, config: Dict[str, Any]) -> BaseIoTConnector:
|
||||
"""
|
||||
Create connector instance for specified protocol
|
||||
|
||||
Args:
|
||||
protocol: Protocol name
|
||||
equipment_id: Equipment identifier
|
||||
config: Connection configuration
|
||||
|
||||
Returns:
|
||||
Connector instance
|
||||
|
||||
Raises:
|
||||
ValueError: If protocol not supported
|
||||
"""
|
||||
connector_class = cls._connectors.get(protocol.lower())
|
||||
if not connector_class:
|
||||
raise ValueError(f"Unsupported IoT protocol: {protocol}")
|
||||
|
||||
return connector_class(equipment_id, config)
|
||||
|
||||
@classmethod
|
||||
def get_supported_protocols(cls) -> List[str]:
|
||||
"""Get list of supported protocols"""
|
||||
return list(cls._connectors.keys())
|
||||
156
services/production/app/services/iot/rational_connector.py
Normal file
156
services/production/app/services/iot/rational_connector.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
Rational ConnectedCooking API connector
|
||||
For Rational iCombi ovens with ConnectedCooking cloud platform
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
from .rest_api_connector import GenericRESTAPIConnector
|
||||
from .base_connector import SensorReading, EquipmentCapabilities
|
||||
|
||||
|
||||
class RationalConnectedCookingConnector(GenericRESTAPIConnector):
|
||||
"""
|
||||
Connector for Rational iCombi ovens via ConnectedCooking platform
|
||||
|
||||
Expected configuration:
|
||||
{
|
||||
"endpoint": "https://www.connectedcooking.com/api/v1",
|
||||
"port": 443,
|
||||
"credentials": {
|
||||
"username": "your-email@example.com",
|
||||
"password": "your-password",
|
||||
# Or use API token if available
|
||||
"token": "your-bearer-token"
|
||||
},
|
||||
"additional_config": {
|
||||
"unit_id": "12345", # Rational unit ID from ConnectedCooking
|
||||
"data_endpoint": "/units/{unit_id}/status",
|
||||
"status_endpoint": "/units/{unit_id}",
|
||||
"timeout": 15
|
||||
}
|
||||
}
|
||||
|
||||
API Documentation: Contact Rational at cc-support@rational-online.com
|
||||
"""
|
||||
|
||||
def __init__(self, equipment_id: str, config: Dict[str, Any]):
|
||||
# Replace equipment_id with unit_id for Rational API
|
||||
self.unit_id = config.get('additional_config', {}).get('unit_id', equipment_id)
|
||||
|
||||
# Update endpoints to use unit_id
|
||||
if 'additional_config' not in config:
|
||||
config['additional_config'] = {}
|
||||
|
||||
config['additional_config'].setdefault(
|
||||
'data_endpoint', f'/units/{self.unit_id}/status'
|
||||
)
|
||||
config['additional_config'].setdefault(
|
||||
'status_endpoint', f'/units/{self.unit_id}'
|
||||
)
|
||||
|
||||
super().__init__(equipment_id, config)
|
||||
|
||||
def _parse_sensor_data(self, data: Dict[str, Any]) -> SensorReading:
|
||||
"""
|
||||
Parse Rational-specific API response
|
||||
|
||||
Expected Rational ConnectedCooking response format (example):
|
||||
{
|
||||
"timestamp": "2025-01-12T10:30:00Z",
|
||||
"unit_status": "cooking",
|
||||
"cooking_mode": "combi_steam",
|
||||
"cabinet_temperature": 185.0,
|
||||
"core_temperature": 72.0,
|
||||
"humidity": 65,
|
||||
"door_open": false,
|
||||
"time_remaining_seconds": 720,
|
||||
"energy_consumption": 12.5,
|
||||
...
|
||||
}
|
||||
"""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
# Map Rational fields to standard SensorReading
|
||||
cabinet_temp = data.get('cabinet_temperature')
|
||||
core_temp = data.get('core_temperature')
|
||||
|
||||
# Multi-zone temperature support
|
||||
temperature_zones = {}
|
||||
if cabinet_temp is not None:
|
||||
temperature_zones['cabinet'] = cabinet_temp
|
||||
if core_temp is not None:
|
||||
temperature_zones['core'] = core_temp
|
||||
|
||||
# Map Rational-specific statuses
|
||||
unit_status = data.get('unit_status', '').lower()
|
||||
operational_status = self._map_rational_status(unit_status)
|
||||
|
||||
# Convert time remaining from seconds to minutes
|
||||
time_remaining_seconds = data.get('time_remaining_seconds')
|
||||
time_remaining_minutes = int(time_remaining_seconds / 60) if time_remaining_seconds else None
|
||||
|
||||
return SensorReading(
|
||||
timestamp=self._parse_timestamp(data.get('timestamp')),
|
||||
temperature=cabinet_temp, # Primary temperature is cabinet
|
||||
temperature_zones=temperature_zones if temperature_zones else None,
|
||||
target_temperature=data.get('target_temperature') or data.get('cabinet_target_temperature'),
|
||||
humidity=data.get('humidity'),
|
||||
target_humidity=data.get('target_humidity'),
|
||||
energy_consumption_kwh=data.get('energy_consumption'),
|
||||
power_current_kw=data.get('current_power_kw'),
|
||||
operational_status=operational_status,
|
||||
cycle_stage=data.get('cooking_mode') or data.get('program_name'),
|
||||
cycle_progress_percentage=data.get('progress_percentage'),
|
||||
time_remaining_minutes=time_remaining_minutes,
|
||||
door_status='open' if data.get('door_open') else 'closed',
|
||||
steam_level=data.get('steam_level'),
|
||||
additional_sensors={
|
||||
'cooking_mode': data.get('cooking_mode'),
|
||||
'program_name': data.get('program_name'),
|
||||
'fan_speed': data.get('fan_speed'),
|
||||
'core_temperature': core_temp,
|
||||
}
|
||||
)
|
||||
|
||||
def _map_rational_status(self, rational_status: str) -> str:
|
||||
"""Map Rational-specific status to standard operational status"""
|
||||
status_map = {
|
||||
'idle': 'idle',
|
||||
'preheating': 'warming_up',
|
||||
'cooking': 'running',
|
||||
'cooling': 'cooling_down',
|
||||
'cleaning': 'maintenance',
|
||||
'error': 'error',
|
||||
'off': 'idle'
|
||||
}
|
||||
return status_map.get(rational_status, 'unknown')
|
||||
|
||||
async def get_capabilities(self) -> EquipmentCapabilities:
|
||||
"""Get Rational iCombi capabilities"""
|
||||
return EquipmentCapabilities(
|
||||
supports_temperature=True,
|
||||
supports_humidity=True,
|
||||
supports_energy_monitoring=True,
|
||||
supports_remote_control=True, # ConnectedCooking supports remote operation
|
||||
supports_realtime=True,
|
||||
temperature_zones=2, # Cabinet + Core
|
||||
supported_protocols=['rest_api'],
|
||||
manufacturer_specific_features={
|
||||
'manufacturer': 'Rational',
|
||||
'product_line': 'iCombi',
|
||||
'platform': 'ConnectedCooking',
|
||||
'features': [
|
||||
'HACCP_documentation',
|
||||
'recipe_management',
|
||||
'remote_start',
|
||||
'cooking_programs',
|
||||
'automatic_cleaning'
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# Register connector
|
||||
from .base_connector import ConnectorFactory
|
||||
ConnectorFactory.register_connector('rational_connected_cooking', RationalConnectedCookingConnector)
|
||||
ConnectorFactory.register_connector('rational', RationalConnectedCookingConnector) # Alias
|
||||
328
services/production/app/services/iot/rest_api_connector.py
Normal file
328
services/production/app/services/iot/rest_api_connector.py
Normal file
@@ -0,0 +1,328 @@
|
||||
"""
|
||||
Generic REST API connector for IoT equipment
|
||||
Supports standard REST endpoints with JSON responses
|
||||
"""
|
||||
|
||||
import httpx
|
||||
import time
|
||||
from typing import Dict, Any, Optional
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from .base_connector import (
|
||||
BaseIoTConnector,
|
||||
SensorReading,
|
||||
ConnectionStatus,
|
||||
EquipmentCapabilities
|
||||
)
|
||||
|
||||
|
||||
class GenericRESTAPIConnector(BaseIoTConnector):
|
||||
"""
|
||||
Generic REST API connector for equipment with standard REST interfaces
|
||||
|
||||
Expected configuration:
|
||||
{
|
||||
"endpoint": "https://api.example.com",
|
||||
"port": 443,
|
||||
"credentials": {
|
||||
"api_key": "your-api-key",
|
||||
"token": "bearer-token", # Optional
|
||||
"username": "user", # Optional
|
||||
"password": "pass" # Optional
|
||||
},
|
||||
"additional_config": {
|
||||
"data_endpoint": "/api/v1/equipment/{equipment_id}/data",
|
||||
"status_endpoint": "/api/v1/equipment/{equipment_id}/status",
|
||||
"capabilities_endpoint": "/api/v1/equipment/{equipment_id}/capabilities",
|
||||
"timeout": 10,
|
||||
"verify_ssl": true
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, equipment_id: str, config: Dict[str, Any]):
|
||||
super().__init__(equipment_id, config)
|
||||
|
||||
self.timeout = config.get('additional_config', {}).get('timeout', 10)
|
||||
self.verify_ssl = config.get('additional_config', {}).get('verify_ssl', True)
|
||||
|
||||
# API endpoints (support templating with {equipment_id})
|
||||
self.data_endpoint = config.get('additional_config', {}).get(
|
||||
'data_endpoint', '/data'
|
||||
).replace('{equipment_id}', equipment_id)
|
||||
|
||||
self.status_endpoint = config.get('additional_config', {}).get(
|
||||
'status_endpoint', '/status'
|
||||
).replace('{equipment_id}', equipment_id)
|
||||
|
||||
self.capabilities_endpoint = config.get('additional_config', {}).get(
|
||||
'capabilities_endpoint', '/capabilities'
|
||||
).replace('{equipment_id}', equipment_id)
|
||||
|
||||
# Build full base URL
|
||||
port_str = f":{self.port}" if self.port and self.port not in [80, 443] else ""
|
||||
self.base_url = f"{self.endpoint}{port_str}"
|
||||
|
||||
# Authentication headers
|
||||
self._headers = self._build_auth_headers()
|
||||
|
||||
# HTTP client (will be created on demand)
|
||||
self._client: Optional[httpx.AsyncClient] = None
|
||||
|
||||
def _build_auth_headers(self) -> Dict[str, str]:
|
||||
"""Build authentication headers from credentials"""
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
|
||||
# API Key authentication
|
||||
if 'api_key' in self.credentials:
|
||||
headers['X-API-Key'] = self.credentials['api_key']
|
||||
|
||||
# Bearer token authentication
|
||||
if 'token' in self.credentials:
|
||||
headers['Authorization'] = f"Bearer {self.credentials['token']}"
|
||||
|
||||
# Basic auth (will be handled by httpx.BasicAuth if needed)
|
||||
|
||||
return headers
|
||||
|
||||
async def _get_client(self) -> httpx.AsyncClient:
|
||||
"""Get or create HTTP client"""
|
||||
if self._client is None:
|
||||
auth = None
|
||||
if 'username' in self.credentials and 'password' in self.credentials:
|
||||
auth = httpx.BasicAuth(
|
||||
username=self.credentials['username'],
|
||||
password=self.credentials['password']
|
||||
)
|
||||
|
||||
self._client = httpx.AsyncClient(
|
||||
base_url=self.base_url,
|
||||
headers=self._headers,
|
||||
auth=auth,
|
||||
timeout=self.timeout,
|
||||
verify=self.verify_ssl
|
||||
)
|
||||
|
||||
return self._client
|
||||
|
||||
async def connect(self) -> ConnectionStatus:
|
||||
"""Establish connection (test connectivity)"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
start_time = time.time()
|
||||
|
||||
# Try to fetch status to verify connection
|
||||
response = await client.get(self.status_endpoint)
|
||||
|
||||
response_time = int((time.time() - start_time) * 1000)
|
||||
|
||||
if response.status_code == 200:
|
||||
self._is_connected = True
|
||||
self._clear_error()
|
||||
return ConnectionStatus(
|
||||
is_connected=True,
|
||||
status="connected",
|
||||
message="Successfully connected to equipment API",
|
||||
response_time_ms=response_time,
|
||||
last_successful_connection=datetime.now(timezone.utc)
|
||||
)
|
||||
else:
|
||||
self._is_connected = False
|
||||
error_msg = f"HTTP {response.status_code}: {response.text}"
|
||||
self._set_error(error_msg)
|
||||
return ConnectionStatus(
|
||||
is_connected=False,
|
||||
status="error",
|
||||
message="Failed to connect to equipment API",
|
||||
response_time_ms=response_time,
|
||||
error_details=error_msg
|
||||
)
|
||||
|
||||
except httpx.TimeoutException as e:
|
||||
self._is_connected = False
|
||||
error_msg = f"Connection timeout: {str(e)}"
|
||||
self._set_error(error_msg)
|
||||
return ConnectionStatus(
|
||||
is_connected=False,
|
||||
status="error",
|
||||
message="Connection timeout",
|
||||
error_details=error_msg
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self._is_connected = False
|
||||
error_msg = f"Connection error: {str(e)}"
|
||||
self._set_error(error_msg)
|
||||
return ConnectionStatus(
|
||||
is_connected=False,
|
||||
status="error",
|
||||
message="Failed to connect",
|
||||
error_details=error_msg
|
||||
)
|
||||
|
||||
async def disconnect(self) -> bool:
|
||||
"""Close connection"""
|
||||
if self._client:
|
||||
await self._client.aclose()
|
||||
self._client = None
|
||||
|
||||
self._is_connected = False
|
||||
return True
|
||||
|
||||
async def test_connection(self) -> ConnectionStatus:
|
||||
"""Test connection without persisting client"""
|
||||
result = await self.connect()
|
||||
await self.disconnect()
|
||||
return result
|
||||
|
||||
async def get_current_reading(self) -> Optional[SensorReading]:
|
||||
"""Get current sensor readings from equipment"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
response = await client.get(self.data_endpoint)
|
||||
|
||||
if response.status_code != 200:
|
||||
self._set_error(f"Failed to fetch data: HTTP {response.status_code}")
|
||||
return None
|
||||
|
||||
data = response.json()
|
||||
|
||||
# Parse response into SensorReading
|
||||
# This mapping can be customized per manufacturer
|
||||
return self._parse_sensor_data(data)
|
||||
|
||||
except Exception as e:
|
||||
self._set_error(f"Error fetching sensor data: {str(e)}")
|
||||
return None
|
||||
|
||||
def _parse_sensor_data(self, data: Dict[str, Any]) -> SensorReading:
|
||||
"""
|
||||
Parse API response into standardized SensorReading
|
||||
Override this method for manufacturer-specific parsing
|
||||
"""
|
||||
# Default parsing - assumes standard field names
|
||||
return SensorReading(
|
||||
timestamp=self._parse_timestamp(data.get('timestamp')),
|
||||
temperature=data.get('temperature'),
|
||||
temperature_zones=data.get('temperature_zones'),
|
||||
target_temperature=data.get('target_temperature'),
|
||||
humidity=data.get('humidity'),
|
||||
target_humidity=data.get('target_humidity'),
|
||||
energy_consumption_kwh=data.get('energy_consumption_kwh'),
|
||||
power_current_kw=data.get('power_current_kw') or data.get('power_kw'),
|
||||
operational_status=data.get('operational_status') or data.get('status'),
|
||||
cycle_stage=data.get('cycle_stage') or data.get('stage'),
|
||||
cycle_progress_percentage=data.get('cycle_progress_percentage') or data.get('progress'),
|
||||
time_remaining_minutes=data.get('time_remaining_minutes') or data.get('time_remaining'),
|
||||
motor_speed_rpm=data.get('motor_speed_rpm'),
|
||||
door_status=data.get('door_status'),
|
||||
steam_level=data.get('steam_level'),
|
||||
product_weight_kg=data.get('product_weight_kg'),
|
||||
moisture_content=data.get('moisture_content'),
|
||||
additional_sensors=data.get('additional_sensors') or {}
|
||||
)
|
||||
|
||||
def _parse_timestamp(self, timestamp_value: Any) -> datetime:
|
||||
"""Parse timestamp from various formats"""
|
||||
if timestamp_value is None:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
if isinstance(timestamp_value, datetime):
|
||||
return timestamp_value
|
||||
|
||||
if isinstance(timestamp_value, str):
|
||||
# Try ISO format
|
||||
try:
|
||||
return datetime.fromisoformat(timestamp_value.replace('Z', '+00:00'))
|
||||
except:
|
||||
pass
|
||||
|
||||
if isinstance(timestamp_value, (int, float)):
|
||||
# Unix timestamp
|
||||
return datetime.fromtimestamp(timestamp_value, tz=timezone.utc)
|
||||
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
async def get_capabilities(self) -> EquipmentCapabilities:
|
||||
"""Discover equipment capabilities"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
response = await client.get(self.capabilities_endpoint)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return EquipmentCapabilities(
|
||||
supports_temperature=data.get('supports_temperature', True),
|
||||
supports_humidity=data.get('supports_humidity', False),
|
||||
supports_energy_monitoring=data.get('supports_energy_monitoring', False),
|
||||
supports_remote_control=data.get('supports_remote_control', False),
|
||||
supports_realtime=data.get('supports_realtime', True),
|
||||
temperature_zones=data.get('temperature_zones', 1),
|
||||
supported_protocols=['rest_api'],
|
||||
manufacturer_specific_features=data.get('additional_features')
|
||||
)
|
||||
else:
|
||||
# Return default capabilities if endpoint not available
|
||||
return EquipmentCapabilities(
|
||||
supports_temperature=True,
|
||||
supports_realtime=True,
|
||||
supported_protocols=['rest_api']
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Return minimal capabilities on error
|
||||
self._set_error(f"Error fetching capabilities: {str(e)}")
|
||||
return EquipmentCapabilities(
|
||||
supports_temperature=True,
|
||||
supports_realtime=True,
|
||||
supported_protocols=['rest_api']
|
||||
)
|
||||
|
||||
async def get_status(self) -> Dict[str, Any]:
|
||||
"""Get equipment status"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
response = await client.get(self.status_endpoint)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
return {
|
||||
"error": f"HTTP {response.status_code}",
|
||||
"connected": False
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"error": str(e),
|
||||
"connected": False
|
||||
}
|
||||
|
||||
async def set_target_temperature(self, temperature: float) -> bool:
|
||||
"""Set target temperature (if supported)"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
|
||||
# POST to control endpoint
|
||||
control_endpoint = self.config.get('additional_config', {}).get(
|
||||
'control_endpoint', '/control'
|
||||
).replace('{equipment_id}', self.equipment_id)
|
||||
|
||||
response = await client.post(
|
||||
control_endpoint,
|
||||
json={"target_temperature": temperature}
|
||||
)
|
||||
|
||||
return response.status_code in [200, 201, 202]
|
||||
|
||||
except Exception as e:
|
||||
self._set_error(f"Error setting temperature: {str(e)}")
|
||||
return False
|
||||
|
||||
|
||||
# Register this connector with the factory
|
||||
from .base_connector import ConnectorFactory
|
||||
ConnectorFactory.register_connector('rest_api', GenericRESTAPIConnector)
|
||||
149
services/production/app/services/iot/wachtel_connector.py
Normal file
149
services/production/app/services/iot/wachtel_connector.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""
|
||||
Wachtel REMOTE connector
|
||||
For Wachtel bakery ovens with REMOTE monitoring system
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
from .rest_api_connector import GenericRESTAPIConnector
|
||||
from .base_connector import SensorReading, EquipmentCapabilities
|
||||
|
||||
|
||||
class WachtelREMOTEConnector(GenericRESTAPIConnector):
|
||||
"""
|
||||
Connector for Wachtel ovens via REMOTE monitoring system
|
||||
|
||||
Expected configuration:
|
||||
{
|
||||
"endpoint": "https://remote.wachtel.de/api", # Example endpoint
|
||||
"port": 443,
|
||||
"credentials": {
|
||||
"username": "bakery-username",
|
||||
"password": "bakery-password"
|
||||
},
|
||||
"additional_config": {
|
||||
"oven_id": "oven-serial-number",
|
||||
"data_endpoint": "/ovens/{oven_id}/readings",
|
||||
"status_endpoint": "/ovens/{oven_id}/status",
|
||||
"timeout": 10
|
||||
}
|
||||
}
|
||||
|
||||
Note: Actual API endpoints need to be obtained from Wachtel
|
||||
Contact: support@wachtel.de or visit https://www.wachtel.de
|
||||
"""
|
||||
|
||||
def __init__(self, equipment_id: str, config: Dict[str, Any]):
|
||||
self.oven_id = config.get('additional_config', {}).get('oven_id', equipment_id)
|
||||
|
||||
if 'additional_config' not in config:
|
||||
config['additional_config'] = {}
|
||||
|
||||
config['additional_config'].setdefault(
|
||||
'data_endpoint', f'/ovens/{self.oven_id}/readings'
|
||||
)
|
||||
config['additional_config'].setdefault(
|
||||
'status_endpoint', f'/ovens/{self.oven_id}/status'
|
||||
)
|
||||
|
||||
super().__init__(equipment_id, config)
|
||||
|
||||
def _parse_sensor_data(self, data: Dict[str, Any]) -> SensorReading:
|
||||
"""
|
||||
Parse Wachtel REMOTE API response
|
||||
|
||||
Expected format (to be confirmed with actual API):
|
||||
{
|
||||
"timestamp": "2025-01-12T10:30:00Z",
|
||||
"oven_status": "baking",
|
||||
"deck_temperatures": [180, 185, 190], # Multiple deck support
|
||||
"target_temperatures": [180, 185, 190],
|
||||
"energy_consumption_kwh": 15.2,
|
||||
"current_power_kw": 18.5,
|
||||
"operation_hours": 1245,
|
||||
...
|
||||
}
|
||||
"""
|
||||
# Parse deck temperatures (Wachtel ovens typically have multiple decks)
|
||||
deck_temps = data.get('deck_temperatures', [])
|
||||
temperature_zones = {}
|
||||
|
||||
if deck_temps:
|
||||
for i, temp in enumerate(deck_temps, 1):
|
||||
temperature_zones[f'deck_{i}'] = temp
|
||||
|
||||
# Primary temperature is average or first deck
|
||||
primary_temp = deck_temps[0] if deck_temps else data.get('temperature')
|
||||
|
||||
# Map Wachtel status to standard status
|
||||
oven_status = data.get('oven_status', '').lower()
|
||||
operational_status = self._map_wachtel_status(oven_status)
|
||||
|
||||
return SensorReading(
|
||||
timestamp=self._parse_timestamp(data.get('timestamp')),
|
||||
temperature=primary_temp,
|
||||
temperature_zones=temperature_zones if temperature_zones else None,
|
||||
target_temperature=data.get('target_temperature'),
|
||||
humidity=None, # Wachtel deck ovens typically don't have humidity sensors
|
||||
target_humidity=None,
|
||||
energy_consumption_kwh=data.get('energy_consumption_kwh'),
|
||||
power_current_kw=data.get('current_power_kw'),
|
||||
operational_status=operational_status,
|
||||
cycle_stage=data.get('baking_program'),
|
||||
cycle_progress_percentage=data.get('cycle_progress'),
|
||||
time_remaining_minutes=data.get('time_remaining_minutes'),
|
||||
door_status=None, # Deck ovens don't typically report door status
|
||||
steam_level=data.get('steam_injection_active'),
|
||||
additional_sensors={
|
||||
'deck_count': len(deck_temps),
|
||||
'operation_hours': data.get('operation_hours'),
|
||||
'maintenance_due': data.get('maintenance_due'),
|
||||
'deck_temperatures': deck_temps,
|
||||
'target_temperatures': data.get('target_temperatures'),
|
||||
}
|
||||
)
|
||||
|
||||
def _map_wachtel_status(self, wachtel_status: str) -> str:
|
||||
"""Map Wachtel-specific status to standard operational status"""
|
||||
status_map = {
|
||||
'off': 'idle',
|
||||
'standby': 'idle',
|
||||
'preheating': 'warming_up',
|
||||
'baking': 'running',
|
||||
'ready': 'idle',
|
||||
'error': 'error',
|
||||
'maintenance': 'maintenance'
|
||||
}
|
||||
return status_map.get(wachtel_status, 'unknown')
|
||||
|
||||
async def get_capabilities(self) -> EquipmentCapabilities:
|
||||
"""Get Wachtel oven capabilities"""
|
||||
# Try to determine number of decks from config or API
|
||||
deck_count = self.config.get('additional_config', {}).get('deck_count', 3)
|
||||
|
||||
return EquipmentCapabilities(
|
||||
supports_temperature=True,
|
||||
supports_humidity=False, # Typically not available on deck ovens
|
||||
supports_energy_monitoring=True,
|
||||
supports_remote_control=False, # REMOTE is monitoring only
|
||||
supports_realtime=True,
|
||||
temperature_zones=deck_count,
|
||||
supported_protocols=['rest_api'],
|
||||
manufacturer_specific_features={
|
||||
'manufacturer': 'Wachtel',
|
||||
'product_line': 'Deck Ovens',
|
||||
'platform': 'REMOTE',
|
||||
'features': [
|
||||
'multi_deck_monitoring',
|
||||
'energy_consumption_tracking',
|
||||
'maintenance_alerts',
|
||||
'operation_hours_tracking',
|
||||
'deck_specific_temperature_control'
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# Register connector
|
||||
from .base_connector import ConnectorFactory
|
||||
ConnectorFactory.register_connector('wachtel_remote', WachtelREMOTEConnector)
|
||||
ConnectorFactory.register_connector('wachtel', WachtelREMOTEConnector) # Alias
|
||||
413
services/production/app/services/production_alert_service.py
Normal file
413
services/production/app/services/production_alert_service.py
Normal file
@@ -0,0 +1,413 @@
|
||||
"""
|
||||
Production Alert Service - Simplified
|
||||
|
||||
Emits minimal events using EventPublisher.
|
||||
All enrichment handled by alert_processor.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import List, Dict, Any, Optional
|
||||
from uuid import UUID
|
||||
from datetime import datetime
|
||||
import structlog
|
||||
|
||||
from shared.messaging import UnifiedEventPublisher
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionAlertService:
|
||||
"""Production alert service using EventPublisher with database access for delay checks"""
|
||||
|
||||
def __init__(self, event_publisher: UnifiedEventPublisher, database_manager=None):
|
||||
self.publisher = event_publisher
|
||||
self.database_manager = database_manager
|
||||
|
||||
async def start(self):
|
||||
"""Start the production alert service"""
|
||||
logger.info("ProductionAlertService started")
|
||||
# Add any initialization logic here if needed
|
||||
|
||||
async def stop(self):
|
||||
"""Stop the production alert service"""
|
||||
logger.info("ProductionAlertService stopped")
|
||||
# Add any cleanup logic here if needed
|
||||
|
||||
async def emit_production_delay(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
product_name: str,
|
||||
batch_number: str,
|
||||
delay_minutes: int,
|
||||
affected_orders: int = 0,
|
||||
customer_names: Optional[List[str]] = None
|
||||
):
|
||||
"""Emit production delay event"""
|
||||
|
||||
# Determine severity based on delay
|
||||
if delay_minutes > 120:
|
||||
severity = "urgent"
|
||||
elif delay_minutes > 60:
|
||||
severity = "high"
|
||||
else:
|
||||
severity = "medium"
|
||||
|
||||
metadata = {
|
||||
"batch_id": str(batch_id),
|
||||
"product_name": product_name,
|
||||
"batch_number": batch_number,
|
||||
"delay_minutes": delay_minutes,
|
||||
"affected_orders": affected_orders
|
||||
}
|
||||
|
||||
if customer_names:
|
||||
metadata["customer_names"] = customer_names
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.production_delay",
|
||||
tenant_id=tenant_id,
|
||||
severity=severity,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"production_delay_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_number=batch_number,
|
||||
delay_minutes=delay_minutes
|
||||
)
|
||||
|
||||
async def emit_equipment_failure(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
equipment_id: UUID,
|
||||
equipment_name: str,
|
||||
equipment_type: str,
|
||||
affected_batches: int = 0
|
||||
):
|
||||
"""Emit equipment failure event"""
|
||||
|
||||
metadata = {
|
||||
"equipment_id": str(equipment_id),
|
||||
"equipment_name": equipment_name,
|
||||
"equipment_type": equipment_type,
|
||||
"affected_batches": affected_batches
|
||||
}
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.equipment_failure",
|
||||
tenant_id=tenant_id,
|
||||
severity="urgent",
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"equipment_failure_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_name=equipment_name
|
||||
)
|
||||
|
||||
async def emit_capacity_overload(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
current_load_percent: float,
|
||||
planned_batches: int,
|
||||
available_capacity: int,
|
||||
affected_date: str
|
||||
):
|
||||
"""Emit capacity overload warning"""
|
||||
|
||||
metadata = {
|
||||
"current_load_percent": current_load_percent,
|
||||
"planned_batches": planned_batches,
|
||||
"available_capacity": available_capacity,
|
||||
"affected_date": affected_date
|
||||
}
|
||||
|
||||
# Determine severity based on overload
|
||||
if current_load_percent > 120:
|
||||
severity = "urgent"
|
||||
elif current_load_percent > 100:
|
||||
severity = "high"
|
||||
else:
|
||||
severity = "medium"
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.capacity_overload",
|
||||
tenant_id=tenant_id,
|
||||
severity=severity,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"capacity_overload_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
current_load_percent=current_load_percent
|
||||
)
|
||||
|
||||
async def emit_quality_issue(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
product_name: str,
|
||||
batch_number: str,
|
||||
issue_type: str,
|
||||
issue_description: str,
|
||||
affected_quantity: float
|
||||
):
|
||||
"""Emit quality issue alert"""
|
||||
|
||||
metadata = {
|
||||
"batch_id": str(batch_id),
|
||||
"product_name": product_name,
|
||||
"batch_number": batch_number,
|
||||
"issue_type": issue_type,
|
||||
"issue_description": issue_description,
|
||||
"affected_quantity": affected_quantity
|
||||
}
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.quality_issue",
|
||||
tenant_id=tenant_id,
|
||||
severity="high",
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"quality_issue_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_number=batch_number,
|
||||
issue_type=issue_type
|
||||
)
|
||||
|
||||
async def emit_start_production_alert(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
product_name: str,
|
||||
batch_number: str,
|
||||
reasoning_data: Optional[Dict[str, Any]] = None,
|
||||
planned_start_time: Optional[str] = None
|
||||
):
|
||||
"""Emit start production alert when a new batch is created"""
|
||||
|
||||
metadata = {
|
||||
"batch_id": str(batch_id),
|
||||
"product_name": product_name,
|
||||
"batch_number": batch_number,
|
||||
"reasoning_data": reasoning_data
|
||||
}
|
||||
|
||||
if planned_start_time:
|
||||
metadata["planned_start_time"] = planned_start_time
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.start_production",
|
||||
tenant_id=tenant_id,
|
||||
severity="medium",
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"start_production_alert_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_number=batch_number,
|
||||
reasoning_type=reasoning_data.get("type") if reasoning_data else None
|
||||
)
|
||||
|
||||
async def emit_batch_start_delayed(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
product_name: str,
|
||||
batch_number: str,
|
||||
scheduled_start: str,
|
||||
delay_reason: Optional[str] = None
|
||||
):
|
||||
"""Emit batch start delay alert"""
|
||||
|
||||
metadata = {
|
||||
"batch_id": str(batch_id),
|
||||
"product_name": product_name,
|
||||
"batch_number": batch_number,
|
||||
"scheduled_start": scheduled_start
|
||||
}
|
||||
|
||||
if delay_reason:
|
||||
metadata["delay_reason"] = delay_reason
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.batch_start_delayed",
|
||||
tenant_id=tenant_id,
|
||||
severity="high",
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"batch_start_delayed_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_number=batch_number
|
||||
)
|
||||
|
||||
async def emit_missing_ingredients(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: UUID,
|
||||
product_name: str,
|
||||
batch_number: str,
|
||||
missing_ingredients: List[Dict[str, Any]]
|
||||
):
|
||||
"""Emit missing ingredients alert"""
|
||||
|
||||
metadata = {
|
||||
"batch_id": str(batch_id),
|
||||
"product_name": product_name,
|
||||
"batch_number": batch_number,
|
||||
"missing_ingredients": missing_ingredients,
|
||||
"missing_count": len(missing_ingredients)
|
||||
}
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.missing_ingredients",
|
||||
tenant_id=tenant_id,
|
||||
severity="urgent",
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"missing_ingredients_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_number=batch_number,
|
||||
missing_count=len(missing_ingredients)
|
||||
)
|
||||
|
||||
async def emit_equipment_maintenance_due(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
equipment_id: UUID,
|
||||
equipment_name: str,
|
||||
equipment_type: str,
|
||||
last_maintenance_date: Optional[str] = None,
|
||||
days_overdue: Optional[int] = None
|
||||
):
|
||||
"""Emit equipment maintenance due alert"""
|
||||
|
||||
metadata = {
|
||||
"equipment_id": str(equipment_id),
|
||||
"equipment_name": equipment_name,
|
||||
"equipment_type": equipment_type
|
||||
}
|
||||
|
||||
if last_maintenance_date:
|
||||
metadata["last_maintenance_date"] = last_maintenance_date
|
||||
if days_overdue:
|
||||
metadata["days_overdue"] = days_overdue
|
||||
|
||||
# Determine severity based on overdue days
|
||||
if days_overdue and days_overdue > 30:
|
||||
severity = "high"
|
||||
else:
|
||||
severity = "medium"
|
||||
|
||||
await self.publisher.publish_alert(
|
||||
event_type="production.equipment_maintenance_due",
|
||||
tenant_id=tenant_id,
|
||||
severity=severity,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"equipment_maintenance_due_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_name=equipment_name
|
||||
)
|
||||
|
||||
# Recommendation methods
|
||||
|
||||
async def emit_efficiency_recommendation(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
recommendation_type: str,
|
||||
description: str,
|
||||
potential_improvement_percent: float,
|
||||
affected_batches: Optional[int] = None
|
||||
):
|
||||
"""Emit production efficiency recommendation"""
|
||||
|
||||
metadata = {
|
||||
"recommendation_type": recommendation_type,
|
||||
"description": description,
|
||||
"potential_improvement_percent": potential_improvement_percent
|
||||
}
|
||||
|
||||
if affected_batches:
|
||||
metadata["affected_batches"] = affected_batches
|
||||
|
||||
await self.publisher.publish_recommendation(
|
||||
event_type="production.efficiency_recommendation",
|
||||
tenant_id=tenant_id,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"efficiency_recommendation_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
recommendation_type=recommendation_type
|
||||
)
|
||||
|
||||
async def emit_energy_optimization(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
current_usage_kwh: float,
|
||||
potential_savings_kwh: float,
|
||||
potential_savings_eur: float,
|
||||
optimization_suggestions: List[str]
|
||||
):
|
||||
"""Emit energy optimization recommendation"""
|
||||
|
||||
metadata = {
|
||||
"current_usage_kwh": current_usage_kwh,
|
||||
"potential_savings_kwh": potential_savings_kwh,
|
||||
"potential_savings_eur": potential_savings_eur,
|
||||
"optimization_suggestions": optimization_suggestions
|
||||
}
|
||||
|
||||
await self.publisher.publish_recommendation(
|
||||
event_type="production.energy_optimization",
|
||||
tenant_id=tenant_id,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"energy_optimization_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
potential_savings_eur=potential_savings_eur
|
||||
)
|
||||
|
||||
async def emit_batch_sequence_optimization(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
current_sequence: List[str],
|
||||
optimized_sequence: List[str],
|
||||
estimated_time_savings_minutes: int
|
||||
):
|
||||
"""Emit batch sequence optimization recommendation"""
|
||||
|
||||
metadata = {
|
||||
"current_sequence": current_sequence,
|
||||
"optimized_sequence": optimized_sequence,
|
||||
"estimated_time_savings_minutes": estimated_time_savings_minutes
|
||||
}
|
||||
|
||||
await self.publisher.publish_recommendation(
|
||||
event_type="production.batch_sequence_optimization",
|
||||
tenant_id=tenant_id,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"batch_sequence_optimization_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
time_savings=estimated_time_savings_minutes
|
||||
)
|
||||
@@ -0,0 +1,210 @@
|
||||
"""
|
||||
Production Notification Service - Simplified
|
||||
|
||||
Emits minimal events using EventPublisher.
|
||||
All enrichment handled by alert_processor.
|
||||
|
||||
These are NOTIFICATIONS (not alerts) - informational state changes that don't require user action.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional, Dict, Any
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from shared.messaging import UnifiedEventPublisher
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionNotificationService:
|
||||
"""
|
||||
Service for emitting production notifications using EventPublisher.
|
||||
"""
|
||||
|
||||
def __init__(self, event_publisher: UnifiedEventPublisher):
|
||||
self.publisher = event_publisher
|
||||
|
||||
async def emit_batch_state_changed_notification(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: str,
|
||||
product_sku: str,
|
||||
product_name: str,
|
||||
old_status: str,
|
||||
new_status: str,
|
||||
quantity: float,
|
||||
unit: str,
|
||||
assigned_to: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Emit notification when a production batch changes state.
|
||||
"""
|
||||
# Build message based on state transition
|
||||
transition_messages = {
|
||||
("PENDING", "IN_PROGRESS"): f"Production started for {product_name}",
|
||||
("IN_PROGRESS", "COMPLETED"): f"Production completed for {product_name}",
|
||||
("IN_PROGRESS", "PAUSED"): f"Production paused for {product_name}",
|
||||
("PAUSED", "IN_PROGRESS"): f"Production resumed for {product_name}",
|
||||
("IN_PROGRESS", "FAILED"): f"Production failed for {product_name}",
|
||||
}
|
||||
|
||||
message = transition_messages.get(
|
||||
(old_status, new_status),
|
||||
f"{product_name} status changed from {old_status} to {new_status}"
|
||||
)
|
||||
|
||||
metadata = {
|
||||
"batch_id": batch_id,
|
||||
"product_sku": product_sku,
|
||||
"product_name": product_name,
|
||||
"old_status": old_status,
|
||||
"new_status": new_status,
|
||||
"quantity": float(quantity),
|
||||
"unit": unit,
|
||||
"assigned_to": assigned_to,
|
||||
"state_changed_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
await self.publisher.publish_notification(
|
||||
event_type="production.batch_state_changed",
|
||||
tenant_id=tenant_id,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"batch_state_changed_notification_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=batch_id,
|
||||
old_status=old_status,
|
||||
new_status=new_status
|
||||
)
|
||||
|
||||
async def emit_batch_completed_notification(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: str,
|
||||
product_sku: str,
|
||||
product_name: str,
|
||||
quantity_produced: float,
|
||||
unit: str,
|
||||
production_duration_minutes: Optional[int] = None,
|
||||
quality_score: Optional[float] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Emit notification when a production batch is completed.
|
||||
"""
|
||||
message_parts = [f"Produced {quantity_produced} {unit} of {product_name}"]
|
||||
if production_duration_minutes:
|
||||
message_parts.append(f"in {production_duration_minutes} minutes")
|
||||
if quality_score:
|
||||
message_parts.append(f"(Quality: {quality_score:.1f}%)")
|
||||
|
||||
message = " ".join(message_parts)
|
||||
|
||||
metadata = {
|
||||
"batch_id": batch_id,
|
||||
"product_sku": product_sku,
|
||||
"product_name": product_name,
|
||||
"quantity_produced": float(quantity_produced),
|
||||
"unit": unit,
|
||||
"production_duration_minutes": production_duration_minutes,
|
||||
"quality_score": quality_score,
|
||||
"completed_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
await self.publisher.publish_notification(
|
||||
event_type="production.batch_completed",
|
||||
tenant_id=tenant_id,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"batch_completed_notification_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=batch_id,
|
||||
quantity_produced=quantity_produced
|
||||
)
|
||||
|
||||
async def emit_batch_started_notification(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
batch_id: str,
|
||||
product_sku: str,
|
||||
product_name: str,
|
||||
quantity_planned: float,
|
||||
unit: str,
|
||||
estimated_duration_minutes: Optional[int] = None,
|
||||
assigned_to: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Emit notification when a production batch is started.
|
||||
"""
|
||||
message_parts = [f"Started production of {quantity_planned} {unit} of {product_name}"]
|
||||
if estimated_duration_minutes:
|
||||
message_parts.append(f"(Est. {estimated_duration_minutes} min)")
|
||||
if assigned_to:
|
||||
message_parts.append(f"- Assigned to {assigned_to}")
|
||||
|
||||
message = " ".join(message_parts)
|
||||
|
||||
metadata = {
|
||||
"batch_id": batch_id,
|
||||
"product_sku": product_sku,
|
||||
"product_name": product_name,
|
||||
"quantity_planned": float(quantity_planned),
|
||||
"unit": unit,
|
||||
"estimated_duration_minutes": estimated_duration_minutes,
|
||||
"assigned_to": assigned_to,
|
||||
"started_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
await self.publisher.publish_notification(
|
||||
event_type="production.batch_started",
|
||||
tenant_id=tenant_id,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"batch_started_notification_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=batch_id
|
||||
)
|
||||
|
||||
async def emit_equipment_status_notification(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
equipment_id: str,
|
||||
equipment_name: str,
|
||||
old_status: str,
|
||||
new_status: str,
|
||||
reason: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Emit notification when equipment status changes.
|
||||
"""
|
||||
message = f"{equipment_name} status: {old_status} → {new_status}"
|
||||
if reason:
|
||||
message += f" - {reason}"
|
||||
|
||||
metadata = {
|
||||
"equipment_id": equipment_id,
|
||||
"equipment_name": equipment_name,
|
||||
"old_status": old_status,
|
||||
"new_status": new_status,
|
||||
"reason": reason,
|
||||
"status_changed_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
await self.publisher.publish_notification(
|
||||
event_type="production.equipment_status_changed",
|
||||
tenant_id=tenant_id,
|
||||
data=metadata
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"equipment_status_notification_emitted",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_id=equipment_id,
|
||||
new_status=new_status
|
||||
)
|
||||
652
services/production/app/services/production_scheduler.py
Normal file
652
services/production/app/services/production_scheduler.py
Normal file
@@ -0,0 +1,652 @@
|
||||
"""
|
||||
Production Scheduler Service
|
||||
Background task that periodically checks for production alert conditions
|
||||
and triggers appropriate alerts.
|
||||
|
||||
Uses shared leader election for horizontal scaling - only one pod runs the scheduler.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import Dict, Any, List, Optional
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timedelta
|
||||
import structlog
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import text
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from apscheduler.triggers.interval import IntervalTrigger
|
||||
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
from app.repositories.equipment_repository import EquipmentRepository
|
||||
from app.services.production_alert_service import ProductionAlertService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionScheduler:
|
||||
"""Production scheduler service that checks for alert conditions.
|
||||
|
||||
Uses Redis-based leader election to ensure only one pod runs the scheduler.
|
||||
"""
|
||||
|
||||
def __init__(self, alert_service: ProductionAlertService, database_manager: Any, redis_url: str = None):
|
||||
self.alert_service = alert_service
|
||||
self.database_manager = database_manager
|
||||
self.redis_url = redis_url
|
||||
self.scheduler = AsyncIOScheduler()
|
||||
self.check_interval = 300 # 5 minutes
|
||||
self.job_id = 'production_scheduler'
|
||||
|
||||
# Leader election
|
||||
self._leader_election = None
|
||||
self._redis_client = None
|
||||
self._scheduler_started = False
|
||||
|
||||
# Cache de alertas emitidas para evitar duplicados
|
||||
self._emitted_alerts: set = set()
|
||||
self._alert_cache_ttl = 3600 # 1 hora
|
||||
self._last_cache_clear = datetime.utcnow()
|
||||
|
||||
async def start(self):
|
||||
"""Start the production scheduler with leader election"""
|
||||
try:
|
||||
# Initialize leader election if Redis URL is provided
|
||||
if self.redis_url:
|
||||
await self._setup_leader_election()
|
||||
else:
|
||||
# No Redis, start scheduler directly (standalone mode)
|
||||
logger.warning("No Redis URL provided, starting scheduler in standalone mode")
|
||||
await self._start_scheduler()
|
||||
except Exception as e:
|
||||
logger.error("Failed to setup leader election, starting in standalone mode",
|
||||
error=str(e))
|
||||
await self._start_scheduler()
|
||||
|
||||
async def _setup_leader_election(self):
|
||||
"""Setup Redis-based leader election"""
|
||||
from shared.leader_election import LeaderElectionService
|
||||
import redis.asyncio as redis
|
||||
|
||||
self._redis_client = redis.from_url(self.redis_url, decode_responses=False)
|
||||
await self._redis_client.ping()
|
||||
|
||||
self._leader_election = LeaderElectionService(
|
||||
self._redis_client,
|
||||
service_name="production-scheduler"
|
||||
)
|
||||
|
||||
await self._leader_election.start(
|
||||
on_become_leader=self._on_become_leader,
|
||||
on_lose_leader=self._on_lose_leader
|
||||
)
|
||||
|
||||
logger.info("Leader election initialized for production scheduler",
|
||||
is_leader=self._leader_election.is_leader)
|
||||
|
||||
async def _on_become_leader(self):
|
||||
"""Called when this instance becomes the leader"""
|
||||
logger.info("Became leader for production scheduler - starting scheduler")
|
||||
await self._start_scheduler()
|
||||
|
||||
async def _on_lose_leader(self):
|
||||
"""Called when this instance loses leadership"""
|
||||
logger.warning("Lost leadership for production scheduler - stopping scheduler")
|
||||
await self._stop_scheduler()
|
||||
|
||||
async def _start_scheduler(self):
|
||||
"""Start the APScheduler"""
|
||||
if self._scheduler_started:
|
||||
logger.debug("Production scheduler already started")
|
||||
return
|
||||
|
||||
if not self.scheduler.running:
|
||||
trigger = IntervalTrigger(seconds=self.check_interval)
|
||||
self.scheduler.add_job(
|
||||
self._run_scheduler_task,
|
||||
trigger=trigger,
|
||||
id=self.job_id,
|
||||
name="Production Alert Checks",
|
||||
max_instances=1
|
||||
)
|
||||
|
||||
self.scheduler.start()
|
||||
self._scheduler_started = True
|
||||
logger.info("Production scheduler started", interval_seconds=self.check_interval)
|
||||
|
||||
async def _stop_scheduler(self):
|
||||
"""Stop the APScheduler"""
|
||||
if not self._scheduler_started:
|
||||
return
|
||||
|
||||
if self.scheduler.running:
|
||||
self.scheduler.shutdown(wait=False)
|
||||
self._scheduler_started = False
|
||||
logger.info("Production scheduler stopped")
|
||||
|
||||
async def stop(self):
|
||||
"""Stop the production scheduler and leader election"""
|
||||
if self._leader_election:
|
||||
await self._leader_election.stop()
|
||||
|
||||
await self._stop_scheduler()
|
||||
|
||||
if self._redis_client:
|
||||
await self._redis_client.close()
|
||||
|
||||
@property
|
||||
def is_leader(self) -> bool:
|
||||
"""Check if this instance is the leader"""
|
||||
return self._leader_election.is_leader if self._leader_election else True
|
||||
|
||||
async def _run_scheduler_task(self):
|
||||
"""Run scheduled production alert checks"""
|
||||
start_time = datetime.now()
|
||||
logger.info("Running scheduled production alert checks")
|
||||
|
||||
try:
|
||||
# Run all alert checks
|
||||
alerts_generated = await self.check_all_conditions()
|
||||
|
||||
duration = (datetime.now() - start_time).total_seconds()
|
||||
logger.info(
|
||||
"Completed scheduled production alert checks",
|
||||
alerts_generated=alerts_generated,
|
||||
duration_seconds=round(duration, 2)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error in production scheduler task",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
async def check_all_conditions(self) -> int:
|
||||
"""
|
||||
Check all production alert conditions and trigger alerts.
|
||||
|
||||
Returns:
|
||||
int: Total number of alerts generated
|
||||
"""
|
||||
if not self.database_manager:
|
||||
logger.warning("Database manager not available for production checks")
|
||||
return 0
|
||||
|
||||
total_alerts = 0
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Get repositories
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
equipment_repo = EquipmentRepository(session)
|
||||
|
||||
# Check production delays
|
||||
delay_alerts = await self._check_production_delays(batch_repo)
|
||||
total_alerts += delay_alerts
|
||||
|
||||
# Check equipment maintenance
|
||||
maintenance_alerts = await self._check_equipment_maintenance(equipment_repo)
|
||||
total_alerts += maintenance_alerts
|
||||
|
||||
# Check batch start delays (batches that should have started but haven't)
|
||||
start_delay_alerts = await self._check_batch_start_delays(batch_repo)
|
||||
total_alerts += start_delay_alerts
|
||||
|
||||
logger.info(
|
||||
"Production alert checks completed",
|
||||
total_alerts=total_alerts,
|
||||
production_delays=delay_alerts,
|
||||
equipment_maintenance=maintenance_alerts,
|
||||
batch_start_delays=start_delay_alerts
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error during production alert checks",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
return total_alerts
|
||||
|
||||
async def _check_production_delays(self, batch_repo: ProductionBatchRepository) -> int:
|
||||
"""
|
||||
Check for production delays and trigger alerts.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
|
||||
Returns:
|
||||
int: Number of delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get delayed batches from repository
|
||||
delayed_batches = await batch_repo.get_production_delays()
|
||||
|
||||
logger.info("Found delayed production batches", count=len(delayed_batches))
|
||||
|
||||
# Limpiar cache si expiró
|
||||
if (datetime.utcnow() - self._last_cache_clear).total_seconds() > self._alert_cache_ttl:
|
||||
self._emitted_alerts.clear()
|
||||
self._last_cache_clear = datetime.utcnow()
|
||||
logger.info("Cleared alert cache due to TTL expiration")
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_batches:
|
||||
try:
|
||||
batch_id = UUID(str(batch["id"]))
|
||||
|
||||
# Verificar si ya emitimos alerta para este batch
|
||||
alert_key = f"delay:{batch_id}"
|
||||
if alert_key in self._emitted_alerts:
|
||||
logger.debug("Skipping duplicate delay alert", batch_id=str(batch_id))
|
||||
continue
|
||||
|
||||
tenant_id = UUID(str(batch["tenant_id"]))
|
||||
delay_minutes = int(batch["delay_minutes"]) if batch.get("delay_minutes") else 0
|
||||
affected_orders = int(batch.get("affected_orders", 0))
|
||||
|
||||
# Emit production delay alert
|
||||
await self.alert_service.emit_production_delay(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
delay_minutes=delay_minutes,
|
||||
affected_orders=affected_orders
|
||||
)
|
||||
|
||||
# Registrar en cache
|
||||
self._emitted_alerts.add(alert_key)
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting production delay alert",
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking production delays", error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_equipment_maintenance(self, equipment_repo: EquipmentRepository) -> int:
|
||||
"""
|
||||
Check for equipment needing maintenance and trigger alerts.
|
||||
|
||||
Args:
|
||||
equipment_repo: Equipment repository
|
||||
|
||||
Returns:
|
||||
int: Number of maintenance alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get equipment that needs maintenance using repository method
|
||||
equipment_needing_maintenance = await equipment_repo.get_equipment_needing_maintenance()
|
||||
|
||||
logger.info(
|
||||
"Found equipment needing maintenance",
|
||||
count=len(equipment_needing_maintenance)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for equipment in equipment_needing_maintenance:
|
||||
try:
|
||||
equipment_id = UUID(equipment["id"])
|
||||
tenant_id = UUID(equipment["tenant_id"])
|
||||
days_overdue = int(equipment.get("days_overdue", 0))
|
||||
|
||||
# Emit equipment maintenance alert
|
||||
await self.alert_service.emit_equipment_maintenance_due(
|
||||
tenant_id=tenant_id,
|
||||
equipment_id=equipment_id,
|
||||
equipment_name=equipment.get("name", "Unknown Equipment"),
|
||||
equipment_type=equipment.get("type", "unknown"),
|
||||
last_maintenance_date=equipment.get("last_maintenance_date"),
|
||||
days_overdue=days_overdue
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting equipment maintenance alert",
|
||||
equipment_id=equipment.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking equipment maintenance", error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_batch_start_delays(self, batch_repo: ProductionBatchRepository) -> int:
|
||||
"""
|
||||
Check for batches that should have started but haven't.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
|
||||
Returns:
|
||||
int: Number of start delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get batches that should have started using repository method
|
||||
delayed_start_batches = await batch_repo.get_batches_with_delayed_start()
|
||||
|
||||
logger.info(
|
||||
"Found batches with delayed start",
|
||||
count=len(delayed_start_batches)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_start_batches:
|
||||
try:
|
||||
batch_id = UUID(batch["id"])
|
||||
|
||||
# Verificar si ya emitimos alerta para este batch
|
||||
alert_key = f"start_delay:{batch_id}"
|
||||
if alert_key in self._emitted_alerts:
|
||||
logger.debug("Skipping duplicate start delay alert", batch_id=str(batch_id))
|
||||
continue
|
||||
|
||||
tenant_id = UUID(batch["tenant_id"])
|
||||
scheduled_start = batch.get("scheduled_start_time")
|
||||
|
||||
# Emit batch start delayed alert
|
||||
await self.alert_service.emit_batch_start_delayed(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
scheduled_start=scheduled_start,
|
||||
delay_reason="Batch has not started on time"
|
||||
)
|
||||
|
||||
# Registrar en cache
|
||||
self._emitted_alerts.add(alert_key)
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting batch start delay alert",
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking batch start delays", error=str(e))
|
||||
return 0
|
||||
|
||||
async def trigger_manual_check(self, tenant_id: Optional[UUID] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Manually trigger production alert checks for a specific tenant or all tenants.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to check. If None, checks all tenants.
|
||||
|
||||
Returns:
|
||||
Dict with alert generation results
|
||||
"""
|
||||
logger.info(
|
||||
"Manually triggering production alert checks",
|
||||
tenant_id=str(tenant_id) if tenant_id else "all_tenants"
|
||||
)
|
||||
|
||||
try:
|
||||
if tenant_id:
|
||||
# Run tenant-specific alert checks
|
||||
alerts_generated = await self.check_all_conditions_for_tenant(tenant_id)
|
||||
else:
|
||||
# Run all alert checks across all tenants
|
||||
alerts_generated = await self.check_all_conditions()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"tenant_id": str(tenant_id) if tenant_id else None,
|
||||
"alerts_generated": alerts_generated,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"message": "Production alert checks completed successfully"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error during manual production alert check",
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"tenant_id": str(tenant_id) if tenant_id else None,
|
||||
"alerts_generated": 0,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def check_all_conditions_for_tenant(self, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check all production alert conditions for a specific tenant and trigger alerts.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID to check conditions for
|
||||
|
||||
Returns:
|
||||
int: Total number of alerts generated
|
||||
"""
|
||||
if not self.database_manager:
|
||||
logger.warning("Database manager not available for production checks")
|
||||
return 0
|
||||
|
||||
total_alerts = 0
|
||||
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
# Get repositories
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
equipment_repo = EquipmentRepository(session)
|
||||
|
||||
# Check production delays for specific tenant
|
||||
delay_alerts = await self._check_production_delays_for_tenant(batch_repo, tenant_id)
|
||||
total_alerts += delay_alerts
|
||||
|
||||
# Check equipment maintenance for specific tenant
|
||||
maintenance_alerts = await self._check_equipment_maintenance_for_tenant(equipment_repo, tenant_id)
|
||||
total_alerts += maintenance_alerts
|
||||
|
||||
# Check batch start delays for specific tenant
|
||||
start_delay_alerts = await self._check_batch_start_delays_for_tenant(batch_repo, tenant_id)
|
||||
total_alerts += start_delay_alerts
|
||||
|
||||
logger.info(
|
||||
"Tenant-specific production alert checks completed",
|
||||
tenant_id=str(tenant_id),
|
||||
total_alerts=total_alerts,
|
||||
production_delays=delay_alerts,
|
||||
equipment_maintenance=maintenance_alerts,
|
||||
batch_start_delays=start_delay_alerts
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error during tenant-specific production alert checks",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
return total_alerts
|
||||
|
||||
async def _check_production_delays_for_tenant(self, batch_repo: ProductionBatchRepository, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check for production delays for a specific tenant and trigger alerts.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
tenant_id: Tenant ID to check for
|
||||
|
||||
Returns:
|
||||
int: Number of delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get delayed batches for the specific tenant using repository method
|
||||
delayed_batches = await batch_repo.get_production_delays(tenant_id)
|
||||
|
||||
logger.info("Found delayed production batches for tenant", count=len(delayed_batches), tenant_id=str(tenant_id))
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_batches:
|
||||
try:
|
||||
batch_id = UUID(str(batch["id"]))
|
||||
delay_minutes = int(batch["delay_minutes"]) if batch.get("delay_minutes") else 0
|
||||
affected_orders = int(batch.get("affected_orders", 0))
|
||||
|
||||
# Emit production delay alert
|
||||
await self.alert_service.emit_production_delay(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
delay_minutes=delay_minutes,
|
||||
affected_orders=affected_orders
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting production delay alert",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking production delays for tenant", tenant_id=str(tenant_id), error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_equipment_maintenance_for_tenant(self, equipment_repo: EquipmentRepository, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check for equipment needing maintenance for a specific tenant and trigger alerts.
|
||||
|
||||
Args:
|
||||
equipment_repo: Equipment repository
|
||||
tenant_id: Tenant ID to check for
|
||||
|
||||
Returns:
|
||||
int: Number of maintenance alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get equipment that needs maintenance for specific tenant using repository method
|
||||
equipment_needing_maintenance = await equipment_repo.get_equipment_needing_maintenance(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Found equipment needing maintenance for tenant",
|
||||
count=len(equipment_needing_maintenance),
|
||||
tenant_id=str(tenant_id)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for equipment in equipment_needing_maintenance:
|
||||
try:
|
||||
equipment_id = UUID(equipment["id"])
|
||||
days_overdue = int(equipment.get("days_overdue", 0))
|
||||
|
||||
# Emit equipment maintenance alert
|
||||
await self.alert_service.emit_equipment_maintenance_due(
|
||||
tenant_id=tenant_id,
|
||||
equipment_id=equipment_id,
|
||||
equipment_name=equipment.get("name", "Unknown Equipment"),
|
||||
equipment_type=equipment.get("type", "unknown"),
|
||||
last_maintenance_date=equipment.get("last_maintenance_date"),
|
||||
days_overdue=days_overdue
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting equipment maintenance alert",
|
||||
tenant_id=str(tenant_id),
|
||||
equipment_id=equipment.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking equipment maintenance for tenant", tenant_id=str(tenant_id), error=str(e))
|
||||
return 0
|
||||
|
||||
async def _check_batch_start_delays_for_tenant(self, batch_repo: ProductionBatchRepository, tenant_id: UUID) -> int:
|
||||
"""
|
||||
Check for batches that should have started but haven't for a specific tenant.
|
||||
|
||||
Args:
|
||||
batch_repo: Production batch repository
|
||||
tenant_id: Tenant ID to check for
|
||||
|
||||
Returns:
|
||||
int: Number of start delay alerts generated
|
||||
"""
|
||||
try:
|
||||
# Get batches that should have started for specific tenant using repository method
|
||||
delayed_start_batches = await batch_repo.get_batches_with_delayed_start(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Found batches with delayed start for tenant",
|
||||
count=len(delayed_start_batches),
|
||||
tenant_id=str(tenant_id)
|
||||
)
|
||||
|
||||
alerts_generated = 0
|
||||
|
||||
for batch in delayed_start_batches:
|
||||
try:
|
||||
batch_id = UUID(batch["id"])
|
||||
scheduled_start = batch.get("scheduled_start_time")
|
||||
|
||||
# Emit batch start delayed alert
|
||||
await self.alert_service.emit_batch_start_delayed(
|
||||
tenant_id=tenant_id,
|
||||
batch_id=batch_id,
|
||||
product_name=batch.get("product_name", "Unknown Product"),
|
||||
batch_number=batch.get("batch_number", "Unknown Batch"),
|
||||
scheduled_start=scheduled_start,
|
||||
delay_reason="Batch has not started on time"
|
||||
)
|
||||
|
||||
alerts_generated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error emitting batch start delay alert",
|
||||
tenant_id=str(tenant_id),
|
||||
batch_id=batch.get("id", "unknown"),
|
||||
error=str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return alerts_generated
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking batch start delays for tenant", tenant_id=str(tenant_id), error=str(e))
|
||||
return 0
|
||||
2410
services/production/app/services/production_service.py
Normal file
2410
services/production/app/services/production_service.py
Normal file
File diff suppressed because it is too large
Load Diff
563
services/production/app/services/quality_template_service.py
Normal file
563
services/production/app/services/quality_template_service.py
Normal file
@@ -0,0 +1,563 @@
|
||||
# services/production/app/services/quality_template_service.py
|
||||
"""
|
||||
Quality Check Template Service - Business Logic Layer
|
||||
Handles quality template operations with business rules and validation
|
||||
"""
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, func
|
||||
from typing import List, Optional, Tuple
|
||||
from uuid import UUID, uuid4
|
||||
from datetime import datetime, timezone
|
||||
import structlog
|
||||
|
||||
from app.models.production import QualityCheckTemplate, ProcessStage
|
||||
from app.schemas.quality_templates import QualityCheckTemplateCreate, QualityCheckTemplateUpdate
|
||||
from app.repositories.quality_template_repository import QualityTemplateRepository
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class QualityTemplateService:
|
||||
"""Service for managing quality check templates with business logic"""
|
||||
|
||||
def __init__(self, db: AsyncSession):
|
||||
self.db = db
|
||||
self.repository = QualityTemplateRepository(db)
|
||||
|
||||
async def create_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_data: QualityCheckTemplateCreate
|
||||
) -> QualityCheckTemplate:
|
||||
"""
|
||||
Create a new quality check template
|
||||
|
||||
Business Rules:
|
||||
- Template code must be unique within tenant
|
||||
- Validates template configuration
|
||||
"""
|
||||
try:
|
||||
# Auto-generate template code if not provided
|
||||
if not template_data.template_code:
|
||||
template_data.template_code = await self._generate_template_code(
|
||||
tenant_id,
|
||||
template_data.check_type,
|
||||
template_data.name
|
||||
)
|
||||
logger.info("Auto-generated template code",
|
||||
template_code=template_data.template_code,
|
||||
check_type=template_data.check_type)
|
||||
|
||||
# Business Rule: Validate template code uniqueness
|
||||
if template_data.template_code:
|
||||
exists = await self.repository.check_template_code_exists(
|
||||
tenant_id,
|
||||
template_data.template_code
|
||||
)
|
||||
if exists:
|
||||
raise ValueError(f"Template code '{template_data.template_code}' already exists")
|
||||
|
||||
# Business Rule: Validate template configuration
|
||||
is_valid, errors = self._validate_template_configuration(template_data.dict())
|
||||
if not is_valid:
|
||||
raise ValueError(f"Invalid template configuration: {', '.join(errors)}")
|
||||
|
||||
# Create template via repository
|
||||
template_dict = template_data.dict()
|
||||
template_dict['id'] = uuid4()
|
||||
template_dict['tenant_id'] = UUID(tenant_id)
|
||||
|
||||
template = await self.repository.create(template_dict)
|
||||
|
||||
logger.info("Quality template created",
|
||||
template_id=str(template.id),
|
||||
tenant_id=tenant_id,
|
||||
template_code=template.template_code)
|
||||
|
||||
return template
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Template creation validation failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Failed to create quality template",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def get_templates(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: Optional[ProcessStage] = None,
|
||||
check_type: Optional[str] = None,
|
||||
is_active: Optional[bool] = True,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> Tuple[List[QualityCheckTemplate], int]:
|
||||
"""
|
||||
Get quality check templates with filtering and pagination
|
||||
|
||||
Business Rules:
|
||||
- Default to active templates only
|
||||
- Limit maximum results per page
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Enforce maximum limit
|
||||
if limit > 1000:
|
||||
limit = 1000
|
||||
logger.warning("Template list limit capped at 1000",
|
||||
tenant_id=tenant_id,
|
||||
requested_limit=limit)
|
||||
|
||||
templates, total = await self.repository.get_templates_by_tenant(
|
||||
tenant_id=tenant_id,
|
||||
stage=stage,
|
||||
check_type=check_type,
|
||||
is_active=is_active,
|
||||
skip=skip,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
logger.debug("Retrieved quality templates",
|
||||
tenant_id=tenant_id,
|
||||
total=total,
|
||||
returned=len(templates))
|
||||
|
||||
return templates, total
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality templates",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def get_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID
|
||||
) -> Optional[QualityCheckTemplate]:
|
||||
"""
|
||||
Get a specific quality check template
|
||||
|
||||
Business Rules:
|
||||
- Template must belong to tenant
|
||||
"""
|
||||
try:
|
||||
template = await self.repository.get_by_tenant_and_id(tenant_id, template_id)
|
||||
|
||||
if template:
|
||||
logger.debug("Retrieved quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
else:
|
||||
logger.warning("Quality template not found",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return template
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def update_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID,
|
||||
template_data: QualityCheckTemplateUpdate
|
||||
) -> Optional[QualityCheckTemplate]:
|
||||
"""
|
||||
Update a quality check template
|
||||
|
||||
Business Rules:
|
||||
- Template must exist and belong to tenant
|
||||
- Template code must remain unique if changed
|
||||
- Validates updated configuration
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Template must exist
|
||||
template = await self.repository.get_by_tenant_and_id(tenant_id, template_id)
|
||||
if not template:
|
||||
logger.warning("Cannot update non-existent template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# Business Rule: Validate template code uniqueness if being updated
|
||||
if template_data.template_code and template_data.template_code != template.template_code:
|
||||
exists = await self.repository.check_template_code_exists(
|
||||
tenant_id,
|
||||
template_data.template_code,
|
||||
exclude_id=template_id
|
||||
)
|
||||
if exists:
|
||||
raise ValueError(f"Template code '{template_data.template_code}' already exists")
|
||||
|
||||
# Business Rule: Validate updated configuration
|
||||
update_dict = template_data.dict(exclude_unset=True)
|
||||
if update_dict:
|
||||
# Merge with existing data for validation
|
||||
full_data = template.__dict__.copy()
|
||||
full_data.update(update_dict)
|
||||
is_valid, errors = self._validate_template_configuration(full_data)
|
||||
if not is_valid:
|
||||
raise ValueError(f"Invalid template configuration: {', '.join(errors)}")
|
||||
|
||||
# Update via repository
|
||||
update_dict['updated_at'] = datetime.now(timezone.utc)
|
||||
updated_template = await self.repository.update(template_id, update_dict)
|
||||
|
||||
logger.info("Quality template updated",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return updated_template
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Template update validation failed",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Failed to update quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def delete_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID
|
||||
) -> bool:
|
||||
"""
|
||||
Delete a quality check template
|
||||
|
||||
Business Rules:
|
||||
- Template must exist and belong to tenant
|
||||
- Consider soft delete for audit trail (future enhancement)
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Template must exist
|
||||
template = await self.repository.get_by_tenant_and_id(tenant_id, template_id)
|
||||
if not template:
|
||||
logger.warning("Cannot delete non-existent template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
return False
|
||||
|
||||
# Business Rule: Check if template is in use before deletion
|
||||
# Check for quality checks using this template
|
||||
from app.models.production import QualityCheck
|
||||
from sqlalchemy import select, func
|
||||
|
||||
usage_query = select(func.count(QualityCheck.id)).where(
|
||||
QualityCheck.template_id == template_id
|
||||
)
|
||||
usage_result = await self.repository.session.execute(usage_query)
|
||||
usage_count = usage_result.scalar() or 0
|
||||
|
||||
if usage_count > 0:
|
||||
logger.warning("Cannot delete template in use",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
usage_count=usage_count)
|
||||
# Instead of deleting, soft delete by setting is_active = False
|
||||
template.is_active = False
|
||||
await self.repository.session.commit()
|
||||
logger.info("Quality template soft deleted (set to inactive)",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
usage_count=usage_count)
|
||||
return True
|
||||
|
||||
# Template is not in use, safe to delete
|
||||
success = await self.repository.delete(template_id)
|
||||
|
||||
if success:
|
||||
logger.info("Quality template deleted",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
else:
|
||||
logger.warning("Failed to delete quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to delete quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def get_templates_for_stage(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: ProcessStage,
|
||||
is_active: Optional[bool] = True
|
||||
) -> List[QualityCheckTemplate]:
|
||||
"""
|
||||
Get all quality check templates applicable to a specific process stage
|
||||
|
||||
Business Rules:
|
||||
- Returns templates ordered by criticality
|
||||
- Required templates come first
|
||||
"""
|
||||
try:
|
||||
templates = await self.repository.get_templates_for_stage(
|
||||
tenant_id=tenant_id,
|
||||
stage=stage,
|
||||
is_active=is_active
|
||||
)
|
||||
|
||||
logger.debug("Retrieved templates for stage",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value,
|
||||
count=len(templates))
|
||||
|
||||
return templates
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get templates for stage",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value if stage else None,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def duplicate_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID
|
||||
) -> Optional[QualityCheckTemplate]:
|
||||
"""
|
||||
Duplicate an existing quality check template
|
||||
|
||||
Business Rules:
|
||||
- Original template must exist
|
||||
- Duplicate gets modified name and code
|
||||
- All other attributes copied
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Original must exist
|
||||
original = await self.repository.get_by_tenant_and_id(tenant_id, template_id)
|
||||
if not original:
|
||||
logger.warning("Cannot duplicate non-existent template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# Business Rule: Create duplicate with modified identifiers
|
||||
duplicate_data = {
|
||||
'name': f"{original.name} (Copy)",
|
||||
'template_code': f"{original.template_code}_copy" if original.template_code else None,
|
||||
'check_type': original.check_type,
|
||||
'category': original.category,
|
||||
'description': original.description,
|
||||
'instructions': original.instructions,
|
||||
'parameters': original.parameters,
|
||||
'thresholds': original.thresholds,
|
||||
'scoring_criteria': original.scoring_criteria,
|
||||
'is_active': original.is_active,
|
||||
'is_required': original.is_required,
|
||||
'is_critical': original.is_critical,
|
||||
'weight': original.weight,
|
||||
'min_value': original.min_value,
|
||||
'max_value': original.max_value,
|
||||
'target_value': original.target_value,
|
||||
'unit': original.unit,
|
||||
'tolerance_percentage': original.tolerance_percentage,
|
||||
'applicable_stages': original.applicable_stages,
|
||||
'created_by': original.created_by
|
||||
}
|
||||
|
||||
create_data = QualityCheckTemplateCreate(**duplicate_data)
|
||||
duplicate = await self.create_template(tenant_id, create_data)
|
||||
|
||||
logger.info("Quality template duplicated",
|
||||
original_id=str(template_id),
|
||||
duplicate_id=str(duplicate.id),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return duplicate
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to duplicate quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def get_templates_by_recipe_config(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: ProcessStage,
|
||||
recipe_quality_config: dict
|
||||
) -> List[QualityCheckTemplate]:
|
||||
"""
|
||||
Get quality check templates based on recipe configuration
|
||||
|
||||
Business Rules:
|
||||
- Returns only active templates
|
||||
- Filters by template IDs specified in recipe config
|
||||
- Ordered by criticality
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Extract template IDs from recipe config
|
||||
stage_config = recipe_quality_config.get('stages', {}).get(stage.value)
|
||||
if not stage_config:
|
||||
logger.debug("No quality config for stage",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value)
|
||||
return []
|
||||
|
||||
template_ids = stage_config.get('template_ids', [])
|
||||
if not template_ids:
|
||||
logger.debug("No template IDs in config",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value)
|
||||
return []
|
||||
|
||||
# Get templates by IDs via repository
|
||||
template_ids_uuid = [UUID(tid) for tid in template_ids]
|
||||
templates = await self.repository.get_templates_by_ids(tenant_id, template_ids_uuid)
|
||||
|
||||
logger.debug("Retrieved templates by recipe config",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value,
|
||||
count=len(templates))
|
||||
|
||||
return templates
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get templates by recipe config",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value if stage else None,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def _generate_template_code(
|
||||
self,
|
||||
tenant_id: str,
|
||||
check_type: str,
|
||||
template_name: str
|
||||
) -> str:
|
||||
"""
|
||||
Generate unique template code for quality check template
|
||||
Format: TPL-{TYPE}-{SEQUENCE}
|
||||
Examples:
|
||||
- Product Quality → TPL-PQ-0001
|
||||
- Process Hygiene → TPL-PH-0001
|
||||
- Equipment → TPL-EQ-0001
|
||||
- Safety → TPL-SA-0001
|
||||
- Temperature Control → TPL-TC-0001
|
||||
|
||||
Following the same pattern as inventory SKU and order number generation
|
||||
"""
|
||||
try:
|
||||
# Map check_type to 2-letter prefix
|
||||
type_map = {
|
||||
'product_quality': 'PQ',
|
||||
'process_hygiene': 'PH',
|
||||
'equipment': 'EQ',
|
||||
'safety': 'SA',
|
||||
'cleaning': 'CL',
|
||||
'temperature': 'TC',
|
||||
'documentation': 'DC'
|
||||
}
|
||||
|
||||
# Get prefix from check_type, fallback to first 2 chars of name
|
||||
type_prefix = type_map.get(check_type.lower())
|
||||
if not type_prefix:
|
||||
# Fallback: use first 2 chars of template name or check_type
|
||||
name_for_prefix = template_name or check_type
|
||||
type_prefix = name_for_prefix[:2].upper() if len(name_for_prefix) >= 2 else "TP"
|
||||
|
||||
tenant_uuid = UUID(tenant_id)
|
||||
|
||||
# Count existing templates with this prefix for this tenant
|
||||
stmt = select(func.count(QualityCheckTemplate.id)).where(
|
||||
QualityCheckTemplate.tenant_id == tenant_uuid,
|
||||
QualityCheckTemplate.template_code.like(f"TPL-{type_prefix}-%")
|
||||
)
|
||||
result = await self.db.execute(stmt)
|
||||
count = result.scalar() or 0
|
||||
|
||||
# Generate sequential number
|
||||
sequence = count + 1
|
||||
template_code = f"TPL-{type_prefix}-{sequence:04d}"
|
||||
|
||||
logger.info("Generated template code",
|
||||
template_code=template_code,
|
||||
type_prefix=type_prefix,
|
||||
sequence=sequence,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return template_code
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating template code, using fallback",
|
||||
error=str(e),
|
||||
check_type=check_type)
|
||||
# Fallback to UUID-based code
|
||||
fallback_code = f"TPL-{uuid4().hex[:8].upper()}"
|
||||
logger.warning("Using fallback template code", template_code=fallback_code)
|
||||
return fallback_code
|
||||
|
||||
def _validate_template_configuration(
|
||||
self,
|
||||
template_data: dict
|
||||
) -> Tuple[bool, List[str]]:
|
||||
"""
|
||||
Validate quality check template configuration (business rules)
|
||||
|
||||
Business Rules:
|
||||
- Measurement checks require unit
|
||||
- Min value must be less than max value
|
||||
- Visual checks require scoring criteria
|
||||
- Process stages must be valid
|
||||
"""
|
||||
errors = []
|
||||
|
||||
# Business Rule: Type-specific validation
|
||||
check_type = template_data.get('check_type')
|
||||
|
||||
if check_type in ['measurement', 'temperature', 'weight']:
|
||||
if not template_data.get('unit'):
|
||||
errors.append(f"Unit is required for {check_type} checks")
|
||||
|
||||
min_val = template_data.get('min_value')
|
||||
max_val = template_data.get('max_value')
|
||||
|
||||
if min_val is not None and max_val is not None and min_val >= max_val:
|
||||
errors.append("Minimum value must be less than maximum value")
|
||||
|
||||
# Business Rule: Visual checks need scoring criteria
|
||||
scoring = template_data.get('scoring_criteria', {})
|
||||
if check_type == 'visual' and not scoring:
|
||||
errors.append("Visual checks require scoring criteria")
|
||||
|
||||
# Business Rule: Validate process stages
|
||||
stages = template_data.get('applicable_stages', [])
|
||||
if stages:
|
||||
valid_stages = [stage.value for stage in ProcessStage]
|
||||
invalid_stages = [s for s in stages if s not in valid_stages]
|
||||
if invalid_stages:
|
||||
errors.append(f"Invalid process stages: {invalid_stages}")
|
||||
|
||||
is_valid = len(errors) == 0
|
||||
|
||||
if not is_valid:
|
||||
logger.warning("Template configuration validation failed",
|
||||
check_type=check_type,
|
||||
errors=errors)
|
||||
|
||||
return is_valid, errors
|
||||
161
services/production/app/services/tenant_deletion_service.py
Normal file
161
services/production/app/services/tenant_deletion_service.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""
|
||||
Production Service - Tenant Data Deletion
|
||||
Handles deletion of all production-related data for a tenant
|
||||
"""
|
||||
from typing import Dict
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, delete, func
|
||||
import structlog
|
||||
|
||||
from shared.services.tenant_deletion import BaseTenantDataDeletionService, TenantDataDeletionResult
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionTenantDeletionService(BaseTenantDataDeletionService):
|
||||
"""Service for deleting all production-related data for a tenant"""
|
||||
|
||||
def __init__(self, db_session: AsyncSession):
|
||||
super().__init__("production-service")
|
||||
self.db = db_session
|
||||
|
||||
async def get_tenant_data_preview(self, tenant_id: str) -> Dict[str, int]:
|
||||
"""Get counts of what would be deleted"""
|
||||
|
||||
try:
|
||||
preview = {}
|
||||
|
||||
# Import models here to avoid circular imports
|
||||
from app.models.production import (
|
||||
ProductionBatch,
|
||||
ProductionSchedule,
|
||||
Equipment,
|
||||
QualityCheck
|
||||
)
|
||||
|
||||
# Count production batches
|
||||
batch_count = await self.db.scalar(
|
||||
select(func.count(ProductionBatch.id)).where(ProductionBatch.tenant_id == tenant_id)
|
||||
)
|
||||
preview["production_batches"] = batch_count or 0
|
||||
|
||||
# Count production schedules
|
||||
try:
|
||||
schedule_count = await self.db.scalar(
|
||||
select(func.count(ProductionSchedule.id)).where(ProductionSchedule.tenant_id == tenant_id)
|
||||
)
|
||||
preview["production_schedules"] = schedule_count or 0
|
||||
except Exception:
|
||||
# Model might not exist in all versions
|
||||
preview["production_schedules"] = 0
|
||||
|
||||
# Count equipment
|
||||
try:
|
||||
equipment_count = await self.db.scalar(
|
||||
select(func.count(Equipment.id)).where(Equipment.tenant_id == tenant_id)
|
||||
)
|
||||
preview["equipment"] = equipment_count or 0
|
||||
except Exception:
|
||||
# Model might not exist in all versions
|
||||
preview["equipment"] = 0
|
||||
|
||||
# Count quality checks
|
||||
try:
|
||||
qc_count = await self.db.scalar(
|
||||
select(func.count(QualityCheck.id)).where(QualityCheck.tenant_id == tenant_id)
|
||||
)
|
||||
preview["quality_checks"] = qc_count or 0
|
||||
except Exception:
|
||||
# Model might not exist in all versions
|
||||
preview["quality_checks"] = 0
|
||||
|
||||
return preview
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting deletion preview",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
return {}
|
||||
|
||||
async def delete_tenant_data(self, tenant_id: str) -> TenantDataDeletionResult:
|
||||
"""Delete all data for a tenant"""
|
||||
|
||||
result = TenantDataDeletionResult(tenant_id, self.service_name)
|
||||
|
||||
try:
|
||||
# Import models here to avoid circular imports
|
||||
from app.models.production import (
|
||||
ProductionBatch,
|
||||
ProductionSchedule,
|
||||
Equipment,
|
||||
QualityCheck
|
||||
)
|
||||
|
||||
# Delete quality checks first (might have FK to batches)
|
||||
try:
|
||||
qc_delete = await self.db.execute(
|
||||
delete(QualityCheck).where(QualityCheck.tenant_id == tenant_id)
|
||||
)
|
||||
result.add_deleted_items("quality_checks", qc_delete.rowcount)
|
||||
except Exception as e:
|
||||
logger.warning("Error deleting quality checks (table might not exist)",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
result.add_error(f"Quality check deletion: {str(e)}")
|
||||
|
||||
# Delete production batches
|
||||
try:
|
||||
batch_delete = await self.db.execute(
|
||||
delete(ProductionBatch).where(ProductionBatch.tenant_id == tenant_id)
|
||||
)
|
||||
result.add_deleted_items("production_batches", batch_delete.rowcount)
|
||||
|
||||
logger.info("Deleted production batches for tenant",
|
||||
tenant_id=tenant_id,
|
||||
count=batch_delete.rowcount)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error deleting production batches",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
result.add_error(f"Production batch deletion: {str(e)}")
|
||||
|
||||
# Delete production schedules
|
||||
try:
|
||||
schedule_delete = await self.db.execute(
|
||||
delete(ProductionSchedule).where(ProductionSchedule.tenant_id == tenant_id)
|
||||
)
|
||||
result.add_deleted_items("production_schedules", schedule_delete.rowcount)
|
||||
except Exception as e:
|
||||
logger.warning("Error deleting production schedules (table might not exist)",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
result.add_error(f"Production schedule deletion: {str(e)}")
|
||||
|
||||
# Delete equipment
|
||||
try:
|
||||
equipment_delete = await self.db.execute(
|
||||
delete(Equipment).where(Equipment.tenant_id == tenant_id)
|
||||
)
|
||||
result.add_deleted_items("equipment", equipment_delete.rowcount)
|
||||
except Exception as e:
|
||||
logger.warning("Error deleting equipment (table might not exist)",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
result.add_error(f"Equipment deletion: {str(e)}")
|
||||
|
||||
# Commit all deletions
|
||||
await self.db.commit()
|
||||
|
||||
logger.info("Tenant data deletion completed",
|
||||
tenant_id=tenant_id,
|
||||
deleted_counts=result.deleted_counts)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Fatal error during tenant data deletion",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
await self.db.rollback()
|
||||
result.add_error(f"Fatal error: {str(e)}")
|
||||
|
||||
return result
|
||||
26
services/production/app/utils/__init__.py
Normal file
26
services/production/app/utils/__init__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# services/alert_processor/app/utils/__init__.py
|
||||
"""
|
||||
Utility modules for alert processor service
|
||||
"""
|
||||
|
||||
from .cache import (
|
||||
get_redis_client,
|
||||
close_redis,
|
||||
get_cached,
|
||||
set_cached,
|
||||
delete_cached,
|
||||
delete_pattern,
|
||||
cache_response,
|
||||
make_cache_key,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'get_redis_client',
|
||||
'close_redis',
|
||||
'get_cached',
|
||||
'set_cached',
|
||||
'delete_cached',
|
||||
'delete_pattern',
|
||||
'cache_response',
|
||||
'make_cache_key',
|
||||
]
|
||||
265
services/production/app/utils/cache.py
Normal file
265
services/production/app/utils/cache.py
Normal file
@@ -0,0 +1,265 @@
|
||||
# services/orchestrator/app/utils/cache.py
|
||||
"""
|
||||
Redis caching utilities for dashboard endpoints
|
||||
"""
|
||||
|
||||
import json
|
||||
import redis.asyncio as redis
|
||||
from typing import Optional, Any, Callable
|
||||
from functools import wraps
|
||||
import structlog
|
||||
from app.core.config import settings
|
||||
from pydantic import BaseModel
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Redis client instance
|
||||
_redis_client: Optional[redis.Redis] = None
|
||||
|
||||
|
||||
async def get_redis_client() -> redis.Redis:
|
||||
"""Get or create Redis client"""
|
||||
global _redis_client
|
||||
|
||||
if _redis_client is None:
|
||||
try:
|
||||
# Check if TLS is enabled - convert string to boolean properly
|
||||
redis_tls_str = str(getattr(settings, 'REDIS_TLS_ENABLED', 'false')).lower()
|
||||
redis_tls_enabled = redis_tls_str in ('true', '1', 'yes', 'on')
|
||||
|
||||
connection_kwargs = {
|
||||
'host': str(getattr(settings, 'REDIS_HOST', 'localhost')),
|
||||
'port': int(getattr(settings, 'REDIS_PORT', 6379)),
|
||||
'db': int(getattr(settings, 'REDIS_DB', 0)),
|
||||
'decode_responses': True,
|
||||
'socket_connect_timeout': 5,
|
||||
'socket_timeout': 5
|
||||
}
|
||||
|
||||
# Add password if configured
|
||||
redis_password = getattr(settings, 'REDIS_PASSWORD', None)
|
||||
if redis_password:
|
||||
connection_kwargs['password'] = redis_password
|
||||
|
||||
# Add SSL/TLS support if enabled
|
||||
if redis_tls_enabled:
|
||||
import ssl
|
||||
connection_kwargs['ssl'] = True
|
||||
connection_kwargs['ssl_cert_reqs'] = ssl.CERT_NONE
|
||||
logger.debug(f"Redis TLS enabled - connecting with SSL to {connection_kwargs['host']}:{connection_kwargs['port']}")
|
||||
|
||||
_redis_client = redis.Redis(**connection_kwargs)
|
||||
|
||||
# Test connection
|
||||
await _redis_client.ping()
|
||||
logger.info(f"Redis client connected successfully (TLS: {redis_tls_enabled})")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to connect to Redis: {e}. Caching will be disabled.")
|
||||
_redis_client = None
|
||||
|
||||
return _redis_client
|
||||
|
||||
|
||||
async def close_redis():
|
||||
"""Close Redis connection"""
|
||||
global _redis_client
|
||||
if _redis_client:
|
||||
await _redis_client.close()
|
||||
_redis_client = None
|
||||
logger.info("Redis connection closed")
|
||||
|
||||
|
||||
async def get_cached(key: str) -> Optional[Any]:
|
||||
"""
|
||||
Get cached value by key
|
||||
|
||||
Args:
|
||||
key: Cache key
|
||||
|
||||
Returns:
|
||||
Cached value (deserialized from JSON) or None if not found or error
|
||||
"""
|
||||
try:
|
||||
client = await get_redis_client()
|
||||
if not client:
|
||||
return None
|
||||
|
||||
cached = await client.get(key)
|
||||
if cached:
|
||||
logger.debug(f"Cache hit: {key}")
|
||||
return json.loads(cached)
|
||||
else:
|
||||
logger.debug(f"Cache miss: {key}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warning(f"Cache get error for key {key}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _serialize_value(value: Any) -> Any:
|
||||
"""
|
||||
Recursively serialize values for JSON storage, handling Pydantic models properly.
|
||||
|
||||
Args:
|
||||
value: Value to serialize
|
||||
|
||||
Returns:
|
||||
JSON-serializable value
|
||||
"""
|
||||
if isinstance(value, BaseModel):
|
||||
# Convert Pydantic model to dictionary
|
||||
return value.model_dump()
|
||||
elif isinstance(value, (list, tuple)):
|
||||
# Recursively serialize list/tuple elements
|
||||
return [_serialize_value(item) for item in value]
|
||||
elif isinstance(value, dict):
|
||||
# Recursively serialize dictionary values
|
||||
return {key: _serialize_value(val) for key, val in value.items()}
|
||||
else:
|
||||
# For other types, use default serialization
|
||||
return value
|
||||
|
||||
|
||||
async def set_cached(key: str, value: Any, ttl: int = 60) -> bool:
|
||||
"""
|
||||
Set cached value with TTL
|
||||
|
||||
Args:
|
||||
key: Cache key
|
||||
value: Value to cache (will be JSON serialized)
|
||||
ttl: Time to live in seconds
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
client = await get_redis_client()
|
||||
if not client:
|
||||
return False
|
||||
|
||||
# Serialize value properly before JSON encoding
|
||||
serialized_value = _serialize_value(value)
|
||||
serialized = json.dumps(serialized_value)
|
||||
await client.setex(key, ttl, serialized)
|
||||
logger.debug(f"Cache set: {key} (TTL: {ttl}s)")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(f"Cache set error for key {key}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
async def delete_cached(key: str) -> bool:
|
||||
"""
|
||||
Delete cached value
|
||||
|
||||
Args:
|
||||
key: Cache key
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
client = await get_redis_client()
|
||||
if not client:
|
||||
return False
|
||||
|
||||
await client.delete(key)
|
||||
logger.debug(f"Cache deleted: {key}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(f"Cache delete error for key {key}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
async def delete_pattern(pattern: str) -> int:
|
||||
"""
|
||||
Delete all keys matching pattern
|
||||
|
||||
Args:
|
||||
pattern: Redis key pattern (e.g., "dashboard:*")
|
||||
|
||||
Returns:
|
||||
Number of keys deleted
|
||||
"""
|
||||
try:
|
||||
client = await get_redis_client()
|
||||
if not client:
|
||||
return 0
|
||||
|
||||
keys = []
|
||||
async for key in client.scan_iter(match=pattern):
|
||||
keys.append(key)
|
||||
|
||||
if keys:
|
||||
deleted = await client.delete(*keys)
|
||||
logger.info(f"Deleted {deleted} keys matching pattern: {pattern}")
|
||||
return deleted
|
||||
return 0
|
||||
except Exception as e:
|
||||
logger.warning(f"Cache delete pattern error for {pattern}: {e}")
|
||||
return 0
|
||||
|
||||
|
||||
def cache_response(key_prefix: str, ttl: int = 60):
|
||||
"""
|
||||
Decorator to cache endpoint responses
|
||||
|
||||
Args:
|
||||
key_prefix: Prefix for cache key (will be combined with tenant_id)
|
||||
ttl: Time to live in seconds
|
||||
|
||||
Usage:
|
||||
@cache_response("dashboard:health", ttl=30)
|
||||
async def get_health(tenant_id: str):
|
||||
...
|
||||
"""
|
||||
def decorator(func: Callable):
|
||||
@wraps(func)
|
||||
async def wrapper(*args, **kwargs):
|
||||
# Extract tenant_id from kwargs or args
|
||||
tenant_id = kwargs.get('tenant_id')
|
||||
if not tenant_id and args:
|
||||
# Try to find tenant_id in args (assuming it's the first argument)
|
||||
tenant_id = args[0] if len(args) > 0 else None
|
||||
|
||||
if not tenant_id:
|
||||
# No tenant_id, skip caching
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
# Build cache key
|
||||
cache_key = f"{key_prefix}:{tenant_id}"
|
||||
|
||||
# Try to get from cache
|
||||
cached_value = await get_cached(cache_key)
|
||||
if cached_value is not None:
|
||||
return cached_value
|
||||
|
||||
# Execute function
|
||||
result = await func(*args, **kwargs)
|
||||
|
||||
# Cache result
|
||||
await set_cached(cache_key, result, ttl)
|
||||
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def make_cache_key(prefix: str, tenant_id: str, **params) -> str:
|
||||
"""
|
||||
Create a cache key with optional parameters
|
||||
|
||||
Args:
|
||||
prefix: Key prefix
|
||||
tenant_id: Tenant ID
|
||||
**params: Additional parameters to include in key
|
||||
|
||||
Returns:
|
||||
Cache key string
|
||||
"""
|
||||
key_parts = [prefix, tenant_id]
|
||||
for k, v in sorted(params.items()):
|
||||
if v is not None:
|
||||
key_parts.append(f"{k}:{v}")
|
||||
return ":".join(key_parts)
|
||||
Reference in New Issue
Block a user