Improve the frontend and repository layer
This commit is contained in:
@@ -426,3 +426,102 @@ async def get_predictive_maintenance_insights(
|
||||
status_code=500,
|
||||
detail="Failed to generate predictive maintenance insights"
|
||||
)
|
||||
|
||||
|
||||
# ===== SUSTAINABILITY / WASTE ANALYTICS ENDPOINT =====
|
||||
# Called by Inventory Service for sustainability metrics
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/waste-analytics",
|
||||
response_model=dict
|
||||
)
|
||||
async def get_waste_analytics_for_sustainability(
|
||||
tenant_id: UUID = Path(...),
|
||||
start_date: datetime = Query(..., description="Start date for waste analysis"),
|
||||
end_date: datetime = Query(..., description="End date for waste analysis"),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get production waste analytics for sustainability tracking
|
||||
|
||||
This endpoint is called by the Inventory Service's sustainability module
|
||||
to calculate environmental impact and SDG 12.3 compliance.
|
||||
|
||||
Does NOT require analytics tier - this is core sustainability data.
|
||||
|
||||
Returns:
|
||||
- total_production_waste: Sum of waste_quantity from all batches
|
||||
- total_defects: Sum of defect_quantity from all batches
|
||||
- total_planned: Sum of planned_quantity
|
||||
- total_actual: Sum of actual_quantity
|
||||
"""
|
||||
try:
|
||||
waste_data = await production_service.get_waste_analytics(
|
||||
tenant_id,
|
||||
start_date,
|
||||
end_date
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Production waste analytics retrieved for sustainability",
|
||||
tenant_id=str(tenant_id),
|
||||
total_waste=waste_data.get('total_production_waste', 0),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat()
|
||||
)
|
||||
|
||||
return waste_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting waste analytics for sustainability",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve waste analytics: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/production/baseline",
|
||||
response_model=dict
|
||||
)
|
||||
async def get_baseline_metrics(
|
||||
tenant_id: UUID = Path(...),
|
||||
production_service: ProductionService = Depends(get_production_service)
|
||||
):
|
||||
"""
|
||||
Get baseline production metrics from first 90 days
|
||||
|
||||
Used by sustainability service to establish waste baseline
|
||||
for SDG 12.3 compliance tracking.
|
||||
|
||||
Returns:
|
||||
- waste_percentage: Baseline waste percentage from first 90 days
|
||||
- total_production_kg: Total production in first 90 days
|
||||
- total_waste_kg: Total waste in first 90 days
|
||||
- period: Date range of baseline period
|
||||
"""
|
||||
try:
|
||||
baseline_data = await production_service.get_baseline_metrics(tenant_id)
|
||||
|
||||
logger.info(
|
||||
"Baseline metrics retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
baseline_percentage=baseline_data.get('waste_percentage', 0)
|
||||
)
|
||||
|
||||
return baseline_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting baseline metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to retrieve baseline metrics: {str(e)}"
|
||||
)
|
||||
|
||||
@@ -20,8 +20,6 @@ from app.models.production import (
|
||||
EquipmentStatus, EquipmentType
|
||||
)
|
||||
from shared.utils.demo_dates import adjust_date_for_demo, BASE_REFERENCE_DATE
|
||||
from shared.utils.alert_generator import generate_equipment_alerts
|
||||
from shared.messaging.rabbitmq import RabbitMQClient
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(prefix="/internal/demo", tags=["internal"])
|
||||
@@ -430,44 +428,18 @@ async def clone_demo_data(
|
||||
db.add(new_capacity)
|
||||
stats["production_capacity"] += 1
|
||||
|
||||
# Commit cloned data first
|
||||
# Commit cloned data
|
||||
await db.commit()
|
||||
|
||||
# Generate equipment maintenance and status alerts with RabbitMQ publishing
|
||||
rabbitmq_client = None
|
||||
try:
|
||||
# Initialize RabbitMQ client for alert publishing
|
||||
rabbitmq_host = os.getenv("RABBITMQ_HOST", "rabbitmq-service")
|
||||
rabbitmq_user = os.getenv("RABBITMQ_USER", "bakery")
|
||||
rabbitmq_password = os.getenv("RABBITMQ_PASSWORD", "forecast123")
|
||||
rabbitmq_port = os.getenv("RABBITMQ_PORT", "5672")
|
||||
rabbitmq_vhost = os.getenv("RABBITMQ_VHOST", "/")
|
||||
rabbitmq_url = f"amqp://{rabbitmq_user}:{rabbitmq_password}@{rabbitmq_host}:{rabbitmq_port}{rabbitmq_vhost}"
|
||||
# NOTE: Alert generation removed - alerts are now generated automatically by the
|
||||
# production alert service which runs scheduled checks at appropriate intervals.
|
||||
# This eliminates duplicate alerts and provides a more realistic demo experience.
|
||||
stats["alerts_generated"] = 0
|
||||
|
||||
rabbitmq_client = RabbitMQClient(rabbitmq_url, service_name="production")
|
||||
await rabbitmq_client.connect()
|
||||
|
||||
# Generate alerts and publish to RabbitMQ
|
||||
alerts_count = await generate_equipment_alerts(
|
||||
db,
|
||||
virtual_uuid,
|
||||
session_time,
|
||||
rabbitmq_client=rabbitmq_client
|
||||
)
|
||||
stats["alerts_generated"] += alerts_count
|
||||
await db.commit()
|
||||
logger.info(f"Generated {alerts_count} equipment alerts")
|
||||
except Exception as alert_error:
|
||||
logger.warning(f"Alert generation failed: {alert_error}", exc_info=True)
|
||||
finally:
|
||||
# Clean up RabbitMQ connection
|
||||
if rabbitmq_client:
|
||||
try:
|
||||
await rabbitmq_client.disconnect()
|
||||
except Exception as cleanup_error:
|
||||
logger.warning(f"Error disconnecting RabbitMQ: {cleanup_error}")
|
||||
|
||||
total_records = sum(stats.values())
|
||||
# Calculate total from non-alert stats
|
||||
total_records = (stats["equipment"] + stats["batches"] + stats["schedules"] +
|
||||
stats["quality_templates"] + stats["quality_checks"] +
|
||||
stats["production_capacity"])
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
logger.info(
|
||||
|
||||
@@ -12,7 +12,7 @@ from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import require_user_role
|
||||
from shared.routing import RouteBuilder, RouteCategory
|
||||
from app.core.database import get_db
|
||||
from app.repositories.quality_template_repository import QualityTemplateRepository
|
||||
from app.services.quality_template_service import QualityTemplateService
|
||||
from app.models.production import ProcessStage, QualityCheckTemplate
|
||||
from app.schemas.quality_templates import (
|
||||
QualityCheckTemplateCreate,
|
||||
@@ -52,9 +52,9 @@ async def list_quality_templates(
|
||||
- is_active: Filter by active status (default: True)
|
||||
"""
|
||||
try:
|
||||
repo = QualityTemplateRepository(db)
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
templates, total = await repo.get_templates_by_tenant(
|
||||
templates, total = await service.get_templates(
|
||||
tenant_id=str(tenant_id),
|
||||
stage=stage,
|
||||
check_type=check_type.value if check_type else None,
|
||||
@@ -98,29 +98,18 @@ async def create_quality_template(
|
||||
):
|
||||
"""Create a new quality check template"""
|
||||
try:
|
||||
repo = QualityTemplateRepository(db)
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Check if template code already exists (if provided)
|
||||
if template_data.template_code:
|
||||
code_exists = await repo.check_template_code_exists(
|
||||
tenant_id=str(tenant_id),
|
||||
template_code=template_data.template_code
|
||||
)
|
||||
if code_exists:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Template code '{template_data.template_code}' already exists"
|
||||
)
|
||||
|
||||
# Create template
|
||||
# Add created_by from current user
|
||||
template_dict = template_data.dict()
|
||||
template_dict['tenant_id'] = str(tenant_id)
|
||||
template_dict['created_by'] = UUID(current_user["sub"])
|
||||
template_create = QualityCheckTemplateCreate(**template_dict)
|
||||
|
||||
template = QualityCheckTemplate(**template_dict)
|
||||
db.add(template)
|
||||
await db.commit()
|
||||
await db.refresh(template)
|
||||
# Create template via service (handles validation and business rules)
|
||||
template = await service.create_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_data=template_create
|
||||
)
|
||||
|
||||
logger.info("Created quality template",
|
||||
template_id=str(template.id),
|
||||
@@ -129,10 +118,13 @@ async def create_quality_template(
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(template)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except ValueError as e:
|
||||
# Business rule validation errors
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e)
|
||||
)
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error("Error creating quality template",
|
||||
error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(
|
||||
@@ -153,9 +145,9 @@ async def get_quality_template(
|
||||
):
|
||||
"""Get a specific quality check template"""
|
||||
try:
|
||||
repo = QualityTemplateRepository(db)
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
template = await repo.get_by_tenant_and_id(
|
||||
template = await service.get_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
)
|
||||
@@ -195,12 +187,13 @@ async def update_quality_template(
|
||||
):
|
||||
"""Update a quality check template"""
|
||||
try:
|
||||
repo = QualityTemplateRepository(db)
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Get existing template
|
||||
template = await repo.get_by_tenant_and_id(
|
||||
# Update template via service (handles validation and business rules)
|
||||
template = await service.update_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
template_id=template_id,
|
||||
template_data=template_data
|
||||
)
|
||||
|
||||
if not template:
|
||||
@@ -209,37 +202,21 @@ async def update_quality_template(
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
# Check if template code already exists (if being updated)
|
||||
if template_data.template_code and template_data.template_code != template.template_code:
|
||||
code_exists = await repo.check_template_code_exists(
|
||||
tenant_id=str(tenant_id),
|
||||
template_code=template_data.template_code,
|
||||
exclude_id=template_id
|
||||
)
|
||||
if code_exists:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Template code '{template_data.template_code}' already exists"
|
||||
)
|
||||
|
||||
# Update template fields
|
||||
update_data = template_data.dict(exclude_unset=True)
|
||||
for field, value in update_data.items():
|
||||
setattr(template, field, value)
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(template)
|
||||
|
||||
logger.info("Updated quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return QualityCheckTemplateResponse.from_orm(template)
|
||||
|
||||
except ValueError as e:
|
||||
# Business rule validation errors
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e)
|
||||
)
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error("Error updating quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
@@ -262,31 +239,27 @@ async def delete_quality_template(
|
||||
db = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Delete a quality check template (soft delete by setting is_active to False)
|
||||
Delete a quality check template
|
||||
|
||||
Note: For safety, this performs a soft delete. Hard deletes would require
|
||||
checking for dependencies in recipes and production batches.
|
||||
Note: Service layer determines whether to use soft or hard delete
|
||||
based on business rules (checking dependencies, etc.)
|
||||
"""
|
||||
try:
|
||||
repo = QualityTemplateRepository(db)
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Get existing template
|
||||
template = await repo.get_by_tenant_and_id(
|
||||
# Delete template via service (handles business rules)
|
||||
success = await service.delete_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
)
|
||||
|
||||
if not template:
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
# Soft delete by marking as inactive
|
||||
template.is_active = False
|
||||
await db.commit()
|
||||
|
||||
logger.info("Deleted quality template (soft delete)",
|
||||
logger.info("Deleted quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
@@ -322,9 +295,9 @@ async def get_templates_for_stage(
|
||||
):
|
||||
"""Get all quality templates applicable to a specific process stage"""
|
||||
try:
|
||||
repo = QualityTemplateRepository(db)
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
templates = await repo.get_templates_for_stage(
|
||||
templates = await service.get_templates_for_stage(
|
||||
tenant_id=str(tenant_id),
|
||||
stage=stage,
|
||||
is_active=is_active
|
||||
@@ -367,50 +340,20 @@ async def duplicate_quality_template(
|
||||
):
|
||||
"""Duplicate an existing quality check template"""
|
||||
try:
|
||||
repo = QualityTemplateRepository(db)
|
||||
service = QualityTemplateService(db)
|
||||
|
||||
# Get existing template
|
||||
original = await repo.get_by_tenant_and_id(
|
||||
# Duplicate template via service (handles business rules)
|
||||
duplicate = await service.duplicate_template(
|
||||
tenant_id=str(tenant_id),
|
||||
template_id=template_id
|
||||
)
|
||||
|
||||
if not original:
|
||||
if not duplicate:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Quality template not found"
|
||||
)
|
||||
|
||||
# Create duplicate
|
||||
duplicate_data = {
|
||||
'tenant_id': original.tenant_id,
|
||||
'name': f"{original.name} (Copy)",
|
||||
'template_code': f"{original.template_code}_copy" if original.template_code else None,
|
||||
'check_type': original.check_type,
|
||||
'category': original.category,
|
||||
'description': original.description,
|
||||
'instructions': original.instructions,
|
||||
'parameters': original.parameters,
|
||||
'thresholds': original.thresholds,
|
||||
'scoring_criteria': original.scoring_criteria,
|
||||
'is_active': original.is_active,
|
||||
'is_required': original.is_required,
|
||||
'is_critical': original.is_critical,
|
||||
'weight': original.weight,
|
||||
'min_value': original.min_value,
|
||||
'max_value': original.max_value,
|
||||
'target_value': original.target_value,
|
||||
'unit': original.unit,
|
||||
'tolerance_percentage': original.tolerance_percentage,
|
||||
'applicable_stages': original.applicable_stages,
|
||||
'created_by': UUID(current_user["sub"])
|
||||
}
|
||||
|
||||
duplicate = QualityCheckTemplate(**duplicate_data)
|
||||
db.add(duplicate)
|
||||
await db.commit()
|
||||
await db.refresh(duplicate)
|
||||
|
||||
logger.info("Duplicated quality template",
|
||||
original_id=str(template_id),
|
||||
duplicate_id=str(duplicate.id),
|
||||
@@ -421,7 +364,6 @@ async def duplicate_quality_template(
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error("Error duplicating quality template",
|
||||
error=str(e),
|
||||
template_id=str(template_id),
|
||||
|
||||
@@ -0,0 +1,278 @@
|
||||
# services/production/app/repositories/production_alert_repository.py
|
||||
"""
|
||||
Production Alert Repository
|
||||
Data access layer for production-specific alert detection and analysis
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any
|
||||
from uuid import UUID
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionAlertRepository:
|
||||
"""Repository for production alert data access"""
|
||||
|
||||
def __init__(self, session: AsyncSession):
|
||||
self.session = session
|
||||
|
||||
async def get_capacity_issues(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production capacity overload issues
|
||||
Returns batches that exceed daily capacity thresholds
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
pb.tenant_id,
|
||||
DATE(pb.planned_start_time) as planned_date,
|
||||
COUNT(*) as batch_count,
|
||||
SUM(pb.planned_quantity) as total_planned,
|
||||
'capacity_check' as capacity_status,
|
||||
100.0 as capacity_percentage
|
||||
FROM production_batches pb
|
||||
WHERE pb.planned_start_time >= CURRENT_DATE
|
||||
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
|
||||
AND pb.status IN ('planned', 'in_progress')
|
||||
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
|
||||
HAVING COUNT(*) > 10
|
||||
ORDER BY total_planned DESC
|
||||
LIMIT 20
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get capacity issues", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_production_delays(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production batches that are delayed
|
||||
Returns batches in progress past their planned end time
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
|
||||
pb.planned_end_time as planned_completion_time, pb.actual_start_time,
|
||||
pb.actual_end_time as estimated_completion_time, pb.status,
|
||||
EXTRACT(minutes FROM (NOW() - pb.planned_end_time)) as delay_minutes,
|
||||
COALESCE(pb.priority::text, 'medium') as priority_level,
|
||||
1 as affected_orders
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'in_progress'
|
||||
AND pb.planned_end_time < NOW()
|
||||
AND pb.planned_end_time > NOW() - INTERVAL '24 hours'
|
||||
ORDER BY
|
||||
CASE COALESCE(pb.priority::text, 'MEDIUM')
|
||||
WHEN 'URGENT' THEN 1 WHEN 'HIGH' THEN 2 ELSE 3
|
||||
END,
|
||||
delay_minutes DESC
|
||||
LIMIT 50
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get production delays", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_quality_issues(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get quality control failures
|
||||
Returns quality checks that failed within recent hours
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
qc.id, qc.tenant_id, qc.batch_id, qc.test_type,
|
||||
qc.result_value, qc.min_acceptable, qc.max_acceptable,
|
||||
qc.pass_fail, qc.defect_count,
|
||||
qc.notes as qc_severity,
|
||||
1 as total_failures,
|
||||
pb.product_name, pb.batch_number,
|
||||
qc.created_at
|
||||
FROM quality_checks qc
|
||||
JOIN production_batches pb ON pb.id = qc.batch_id
|
||||
WHERE qc.pass_fail = false
|
||||
AND qc.created_at > NOW() - INTERVAL '4 hours'
|
||||
AND qc.corrective_action_needed = true
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 1
|
||||
WHEN qc.pass_fail = false THEN 2
|
||||
ELSE 3
|
||||
END,
|
||||
qc.created_at DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality issues", error=str(e))
|
||||
raise
|
||||
|
||||
async def mark_quality_check_acknowledged(self, quality_check_id: UUID) -> None:
|
||||
"""
|
||||
Mark a quality check as acknowledged to avoid duplicate alerts
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
UPDATE quality_checks
|
||||
SET acknowledged = true
|
||||
WHERE id = :id
|
||||
""")
|
||||
|
||||
await self.session.execute(query, {"id": quality_check_id})
|
||||
await self.session.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to mark quality check acknowledged", error=str(e), qc_id=str(quality_check_id))
|
||||
raise
|
||||
|
||||
async def get_equipment_status(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get equipment requiring attention
|
||||
Returns equipment with maintenance due or status issues
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days,
|
||||
EXTRACT(DAYS FROM (e.next_maintenance_date - NOW())) as days_to_maintenance,
|
||||
COUNT(ea.id) as active_alerts
|
||||
FROM equipment e
|
||||
LEFT JOIN alerts ea ON ea.equipment_id = e.id
|
||||
AND ea.is_active = true
|
||||
AND ea.is_resolved = false
|
||||
WHERE e.is_active = true
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days
|
||||
ORDER BY e.next_maintenance_date ASC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get equipment status", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_efficiency_recommendations(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production efficiency improvement recommendations
|
||||
Analyzes production patterns to identify optimization opportunities
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
WITH efficiency_analysis AS (
|
||||
SELECT
|
||||
pb.tenant_id, pb.product_name,
|
||||
AVG(EXTRACT(EPOCH FROM (pb.actual_end_time - pb.actual_start_time)) / 60) as avg_production_time,
|
||||
AVG(pb.planned_duration_minutes) as avg_planned_duration,
|
||||
COUNT(*) as batch_count,
|
||||
AVG(pb.yield_percentage) as avg_yield,
|
||||
EXTRACT(hour FROM pb.actual_start_time) as start_hour
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'COMPLETED'
|
||||
AND pb.actual_completion_time > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND pb.tenant_id = :tenant_id
|
||||
GROUP BY pb.tenant_id, pb.product_name, EXTRACT(hour FROM pb.actual_start_time)
|
||||
HAVING COUNT(*) >= 3
|
||||
),
|
||||
recommendations AS (
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN avg_production_time > avg_planned_duration * 1.2 THEN 'reduce_production_time'
|
||||
WHEN avg_yield < 85 THEN 'improve_yield'
|
||||
WHEN start_hour BETWEEN 14 AND 16 AND avg_production_time > avg_planned_duration * 1.1 THEN 'avoid_afternoon_production'
|
||||
ELSE null
|
||||
END as recommendation_type,
|
||||
(avg_production_time - avg_planned_duration) / avg_planned_duration * 100 as efficiency_loss_percent
|
||||
FROM efficiency_analysis
|
||||
)
|
||||
SELECT * FROM recommendations
|
||||
WHERE recommendation_type IS NOT NULL
|
||||
AND efficiency_loss_percent > 10
|
||||
ORDER BY efficiency_loss_percent DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get efficiency recommendations", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_energy_consumption_patterns(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get energy consumption patterns for optimization analysis
|
||||
Returns consumption by equipment and hour of day
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
e.tenant_id, e.name as equipment_name, e.type,
|
||||
AVG(ec.energy_consumption_kwh) as avg_energy,
|
||||
EXTRACT(hour FROM ec.recorded_at) as hour_of_day,
|
||||
COUNT(*) as readings_count
|
||||
FROM equipment e
|
||||
JOIN energy_consumption ec ON ec.equipment_id = e.id
|
||||
WHERE ec.recorded_at > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.tenant_id, e.id, e.name, e.type, EXTRACT(hour FROM ec.recorded_at)
|
||||
HAVING COUNT(*) >= 10
|
||||
ORDER BY avg_energy DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get energy consumption patterns", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_affected_production_batches(self, ingredient_id: str) -> List[str]:
|
||||
"""
|
||||
Get production batches affected by ingredient shortage
|
||||
Returns batch IDs that use the specified ingredient
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT DISTINCT pb.id
|
||||
FROM production_batches pb
|
||||
JOIN recipe_ingredients ri ON ri.recipe_id = pb.recipe_id
|
||||
WHERE ri.ingredient_id = :ingredient_id
|
||||
AND pb.status = 'in_progress'
|
||||
AND pb.planned_completion_time > NOW()
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"ingredient_id": ingredient_id})
|
||||
return [str(row.id) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get affected production batches", error=str(e), ingredient_id=ingredient_id)
|
||||
raise
|
||||
|
||||
async def set_statement_timeout(self, timeout: str = '30s') -> None:
|
||||
"""
|
||||
Set PostgreSQL statement timeout for the current session
|
||||
"""
|
||||
try:
|
||||
await self.session.execute(text(f"SET statement_timeout = '{timeout}'"))
|
||||
except Exception as e:
|
||||
logger.error("Failed to set statement timeout", error=str(e))
|
||||
raise
|
||||
@@ -689,4 +689,148 @@ class ProductionBatchRepository(ProductionBaseRepository, BatchCountProvider):
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error counting filtered batches", error=str(e))
|
||||
raise DatabaseError(f"Failed to count filtered batches: {str(e)}")
|
||||
raise DatabaseError(f"Failed to count filtered batches: {str(e)}")
|
||||
|
||||
async def get_waste_analytics(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
start_date: datetime,
|
||||
end_date: datetime
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get production waste analytics for sustainability reporting
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
start_date: Start date for analytics period
|
||||
end_date: End date for analytics period
|
||||
|
||||
Returns:
|
||||
Dictionary with waste analytics data
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
COALESCE(SUM(waste_quantity), 0) as total_production_waste,
|
||||
COALESCE(SUM(defect_quantity), 0) as total_defects,
|
||||
COALESCE(SUM(planned_quantity), 0) as total_planned,
|
||||
COALESCE(SUM(actual_quantity), 0) as total_actual,
|
||||
COUNT(*) as total_batches,
|
||||
COUNT(CASE WHEN forecast_id IS NOT NULL THEN 1 END) as ai_assisted_batches
|
||||
FROM production_batches
|
||||
WHERE tenant_id = :tenant_id
|
||||
AND created_at BETWEEN :start_date AND :end_date
|
||||
AND status IN ('COMPLETED', 'QUALITY_CHECK', 'FINISHED')
|
||||
""")
|
||||
|
||||
result = await self.session.execute(
|
||||
query,
|
||||
{
|
||||
'tenant_id': tenant_id,
|
||||
'start_date': start_date,
|
||||
'end_date': end_date
|
||||
}
|
||||
)
|
||||
row = result.fetchone()
|
||||
|
||||
waste_data = {
|
||||
'total_production_waste': float(row.total_production_waste or 0),
|
||||
'total_defects': float(row.total_defects or 0),
|
||||
'total_planned': float(row.total_planned or 0),
|
||||
'total_actual': float(row.total_actual or 0),
|
||||
'total_batches': int(row.total_batches or 0),
|
||||
'ai_assisted_batches': int(row.ai_assisted_batches or 0)
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Waste analytics calculated",
|
||||
tenant_id=str(tenant_id),
|
||||
total_waste=waste_data['total_production_waste'],
|
||||
batches=waste_data['total_batches']
|
||||
)
|
||||
|
||||
return waste_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating waste analytics", error=str(e), tenant_id=str(tenant_id))
|
||||
raise DatabaseError(f"Failed to calculate waste analytics: {str(e)}")
|
||||
|
||||
async def get_baseline_metrics(self, tenant_id: UUID) -> Dict[str, Any]:
|
||||
"""
|
||||
Get baseline production metrics from first 90 days
|
||||
|
||||
Used by sustainability service to establish waste baseline
|
||||
for SDG 12.3 compliance tracking.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
|
||||
Returns:
|
||||
Dictionary with baseline metrics data
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
WITH first_batch AS (
|
||||
SELECT MIN(created_at) as start_date
|
||||
FROM production_batches
|
||||
WHERE tenant_id = :tenant_id
|
||||
),
|
||||
baseline_data AS (
|
||||
SELECT
|
||||
COALESCE(SUM(waste_quantity + defect_quantity), 0) as total_waste,
|
||||
COALESCE(SUM(planned_quantity), 0) as total_production
|
||||
FROM production_batches, first_batch
|
||||
WHERE tenant_id = :tenant_id
|
||||
AND created_at BETWEEN first_batch.start_date
|
||||
AND first_batch.start_date + INTERVAL '90 days'
|
||||
AND status IN ('COMPLETED', 'QUALITY_CHECK', 'FINISHED')
|
||||
)
|
||||
SELECT
|
||||
total_waste,
|
||||
total_production,
|
||||
CASE
|
||||
WHEN total_production > 0
|
||||
THEN (total_waste / total_production * 100)
|
||||
ELSE NULL
|
||||
END as waste_percentage,
|
||||
(SELECT start_date FROM first_batch) as baseline_start,
|
||||
(SELECT start_date + INTERVAL '90 days' FROM first_batch) as baseline_end
|
||||
FROM baseline_data
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {'tenant_id': tenant_id})
|
||||
row = result.fetchone()
|
||||
|
||||
if row and row.waste_percentage is not None and row.total_production > 100:
|
||||
# We have enough data for a real baseline
|
||||
baseline_data = {
|
||||
'waste_percentage': float(row.waste_percentage),
|
||||
'total_waste': float(row.total_waste),
|
||||
'total_production': float(row.total_production),
|
||||
'baseline_start': row.baseline_start,
|
||||
'baseline_end': row.baseline_end,
|
||||
'has_baseline': True
|
||||
}
|
||||
else:
|
||||
# Not enough data yet, return defaults
|
||||
baseline_data = {
|
||||
'waste_percentage': None,
|
||||
'total_waste': 0,
|
||||
'total_production': 0,
|
||||
'baseline_start': None,
|
||||
'baseline_end': None,
|
||||
'has_baseline': False
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Baseline metrics calculated",
|
||||
tenant_id=str(tenant_id),
|
||||
has_baseline=baseline_data['has_baseline'],
|
||||
waste_percentage=baseline_data.get('waste_percentage')
|
||||
)
|
||||
|
||||
return baseline_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating baseline metrics", error=str(e), tenant_id=str(tenant_id))
|
||||
raise DatabaseError(f"Failed to calculate baseline metrics: {str(e)}")
|
||||
@@ -382,4 +382,51 @@ class ProductionScheduleRepository(ProductionBaseRepository):
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching today's schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch today's schedule: {str(e)}")
|
||||
raise DatabaseError(f"Failed to fetch today's schedule: {str(e)}")
|
||||
|
||||
async def get_all_schedules_for_tenant(self, tenant_id: UUID) -> List[ProductionSchedule]:
|
||||
"""Get all production schedules for a specific tenant"""
|
||||
try:
|
||||
from sqlalchemy import select
|
||||
from app.models.production import ProductionSchedule
|
||||
|
||||
result = await self.session.execute(
|
||||
select(ProductionSchedule).where(
|
||||
ProductionSchedule.tenant_id == tenant_id
|
||||
)
|
||||
)
|
||||
schedules = result.scalars().all()
|
||||
|
||||
logger.info("Retrieved all schedules for tenant",
|
||||
tenant_id=str(tenant_id),
|
||||
count=len(schedules))
|
||||
|
||||
return list(schedules)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching all tenant schedules", error=str(e), tenant_id=str(tenant_id))
|
||||
raise DatabaseError(f"Failed to fetch all tenant schedules: {str(e)}")
|
||||
|
||||
async def archive_schedule(self, schedule: ProductionSchedule) -> None:
|
||||
"""Archive a production schedule"""
|
||||
try:
|
||||
schedule.archived = True
|
||||
await self.session.commit()
|
||||
logger.info("Archived schedule", schedule_id=str(schedule.id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error archiving schedule", error=str(e), schedule_id=str(schedule.id))
|
||||
raise DatabaseError(f"Failed to archive schedule: {str(e)}")
|
||||
|
||||
async def cancel_schedule(self, schedule: ProductionSchedule, reason: str = None) -> None:
|
||||
"""Cancel a production schedule"""
|
||||
try:
|
||||
schedule.status = "cancelled"
|
||||
if reason:
|
||||
schedule.notes = (schedule.notes or "") + f"\n{reason}"
|
||||
await self.session.commit()
|
||||
logger.info("Cancelled schedule", schedule_id=str(schedule.id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error cancelling schedule", error=str(e), schedule_id=str(schedule.id))
|
||||
raise DatabaseError(f"Failed to cancel schedule: {str(e)}")
|
||||
@@ -93,36 +93,18 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
|
||||
try:
|
||||
self._checks_performed += 1
|
||||
|
||||
# Use a simpler query with timeout and connection management
|
||||
from sqlalchemy import text
|
||||
simplified_query = text("""
|
||||
SELECT
|
||||
pb.tenant_id,
|
||||
DATE(pb.planned_start_time) as planned_date,
|
||||
COUNT(*) as batch_count,
|
||||
SUM(pb.planned_quantity) as total_planned,
|
||||
'capacity_check' as capacity_status,
|
||||
100.0 as capacity_percentage -- Default value for processing
|
||||
FROM production_batches pb
|
||||
WHERE pb.planned_start_time >= CURRENT_DATE
|
||||
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
|
||||
AND pb.status IN ('planned', 'in_progress')
|
||||
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
|
||||
HAVING COUNT(*) > 10 -- Alert if more than 10 batches per day
|
||||
ORDER BY total_planned DESC
|
||||
LIMIT 20 -- Limit results to prevent excessive processing
|
||||
""")
|
||||
|
||||
# Use timeout and proper session handling
|
||||
try:
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
|
||||
async with self.db_manager.get_session() as session:
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
# Set statement timeout to prevent long-running queries
|
||||
await session.execute(text("SET statement_timeout = '30s'"))
|
||||
result = await session.execute(simplified_query)
|
||||
capacity_issues = result.fetchall()
|
||||
await alert_repo.set_statement_timeout('30s')
|
||||
capacity_issues = await alert_repo.get_capacity_issues()
|
||||
|
||||
for issue in capacity_issues:
|
||||
await self._process_capacity_issue(issue.tenant_id, issue)
|
||||
await self._process_capacity_issue(issue['tenant_id'], issue)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning("Capacity check timed out", service=self.config.SERVICE_NAME)
|
||||
@@ -203,36 +185,14 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
|
||||
try:
|
||||
self._checks_performed += 1
|
||||
|
||||
# Import text function at the beginning
|
||||
from sqlalchemy import text
|
||||
|
||||
# Simplified query with timeout and proper error handling
|
||||
query = text("""
|
||||
SELECT
|
||||
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
|
||||
pb.planned_end_time as planned_completion_time, pb.actual_start_time,
|
||||
pb.actual_end_time as estimated_completion_time, pb.status,
|
||||
EXTRACT(minutes FROM (NOW() - pb.planned_end_time)) as delay_minutes,
|
||||
COALESCE(pb.priority::text, 'medium') as priority_level,
|
||||
1 as affected_orders -- Default to 1 since we can't count orders
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'in_progress'
|
||||
AND pb.planned_end_time < NOW()
|
||||
AND pb.planned_end_time > NOW() - INTERVAL '24 hours'
|
||||
ORDER BY
|
||||
CASE COALESCE(pb.priority::text, 'MEDIUM')
|
||||
WHEN 'URGENT' THEN 1 WHEN 'HIGH' THEN 2 ELSE 3
|
||||
END,
|
||||
delay_minutes DESC
|
||||
LIMIT 50 -- Limit results to prevent excessive processing
|
||||
""")
|
||||
|
||||
try:
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
|
||||
async with self.db_manager.get_session() as session:
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
# Set statement timeout
|
||||
await session.execute(text("SET statement_timeout = '30s'"))
|
||||
result = await session.execute(query)
|
||||
delays = result.fetchall()
|
||||
await alert_repo.set_statement_timeout('30s')
|
||||
delays = await alert_repo.get_production_delays()
|
||||
|
||||
for delay in delays:
|
||||
await self._process_production_delay(delay)
|
||||
@@ -300,44 +260,16 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
|
||||
"""Check for quality control issues (alerts)"""
|
||||
try:
|
||||
self._checks_performed += 1
|
||||
|
||||
# Fixed query using actual quality_checks table structure
|
||||
query = """
|
||||
SELECT
|
||||
qc.id, qc.tenant_id, qc.batch_id, qc.check_type as test_type,
|
||||
qc.quality_score as result_value,
|
||||
qc.target_weight as min_acceptable,
|
||||
(qc.target_weight * (1 + qc.tolerance_percentage/100)) as max_acceptable,
|
||||
CASE
|
||||
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 'critical'
|
||||
WHEN qc.pass_fail = false THEN 'major'
|
||||
ELSE 'minor'
|
||||
END as qc_severity,
|
||||
qc.created_at,
|
||||
pb.product_name, pb.batch_number,
|
||||
COUNT(*) OVER (PARTITION BY qc.batch_id) as total_failures
|
||||
FROM quality_checks qc
|
||||
JOIN production_batches pb ON pb.id = qc.batch_id
|
||||
WHERE qc.pass_fail = false -- Use pass_fail instead of status
|
||||
AND qc.created_at > NOW() - INTERVAL '4 hours'
|
||||
AND qc.corrective_action_needed = true -- Use this instead of acknowledged
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 1
|
||||
WHEN qc.pass_fail = false THEN 2
|
||||
ELSE 3
|
||||
END,
|
||||
qc.created_at DESC
|
||||
"""
|
||||
|
||||
from sqlalchemy import text
|
||||
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
|
||||
async with self.db_manager.get_session() as session:
|
||||
result = await session.execute(text(query))
|
||||
quality_issues = result.fetchall()
|
||||
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
quality_issues = await alert_repo.get_quality_issues()
|
||||
|
||||
for issue in quality_issues:
|
||||
await self._process_quality_issue(issue)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
# Skip quality checks if tables don't exist (graceful degradation)
|
||||
if "does not exist" in str(e) or "column" in str(e).lower() and "does not exist" in str(e).lower():
|
||||
@@ -380,16 +312,14 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
|
||||
|
||||
# Mark as acknowledged to avoid duplicates - using proper session management
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
|
||||
async with self.db_manager.get_session() as session:
|
||||
await session.execute(
|
||||
text("UPDATE quality_checks SET acknowledged = true WHERE id = :id"),
|
||||
{"id": issue['id']}
|
||||
)
|
||||
await session.commit()
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
await alert_repo.mark_quality_check_acknowledged(issue['id'])
|
||||
except Exception as e:
|
||||
logger.error("Failed to update quality check acknowledged status",
|
||||
quality_check_id=str(issue.get('id')),
|
||||
logger.error("Failed to update quality check acknowledged status",
|
||||
quality_check_id=str(issue.get('id')),
|
||||
error=str(e))
|
||||
# Don't raise here to avoid breaking the main flow
|
||||
|
||||
@@ -402,49 +332,28 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
|
||||
"""Check equipment status and maintenance requirements (alerts)"""
|
||||
try:
|
||||
self._checks_performed += 1
|
||||
|
||||
# Query equipment that needs attention
|
||||
query = """
|
||||
SELECT
|
||||
e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days,
|
||||
EXTRACT(DAYS FROM (e.next_maintenance_date - NOW())) as days_to_maintenance,
|
||||
COUNT(ea.id) as active_alerts
|
||||
FROM equipment e
|
||||
LEFT JOIN alerts ea ON ea.equipment_id = e.id
|
||||
AND ea.is_active = true
|
||||
AND ea.is_resolved = false
|
||||
WHERE e.is_active = true
|
||||
AND e.tenant_id = $1
|
||||
GROUP BY e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days
|
||||
ORDER BY e.next_maintenance_date ASC
|
||||
"""
|
||||
|
||||
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
|
||||
tenants = await self.get_active_tenants()
|
||||
|
||||
|
||||
for tenant_id in tenants:
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
# Use a separate session for each tenant to avoid connection blocking
|
||||
async with self.db_manager.get_session() as session:
|
||||
result = await session.execute(text(query), {"tenant_id": tenant_id})
|
||||
equipment_list = result.fetchall()
|
||||
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
equipment_list = await alert_repo.get_equipment_status(tenant_id)
|
||||
|
||||
for equipment in equipment_list:
|
||||
# Process each equipment item in a non-blocking manner
|
||||
await self._process_equipment_issue(equipment)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking equipment status",
|
||||
tenant_id=str(tenant_id),
|
||||
logger.error("Error checking equipment status",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
# Continue processing other tenants despite this error
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Equipment status check failed", error=str(e))
|
||||
self._errors_count += 1
|
||||
@@ -530,61 +439,28 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
|
||||
"""Generate production efficiency recommendations"""
|
||||
try:
|
||||
self._checks_performed += 1
|
||||
|
||||
# Analyze production patterns for efficiency opportunities
|
||||
query = """
|
||||
WITH efficiency_analysis AS (
|
||||
SELECT
|
||||
pb.tenant_id, pb.product_name,
|
||||
AVG(EXTRACT(minutes FROM (pb.actual_completion_time - pb.actual_start_time))) as avg_production_time,
|
||||
AVG(pb.planned_duration_minutes) as avg_planned_duration,
|
||||
COUNT(*) as batch_count,
|
||||
AVG(pb.yield_percentage) as avg_yield,
|
||||
EXTRACT(hour FROM pb.actual_start_time) as start_hour
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'COMPLETED'
|
||||
AND pb.actual_completion_time > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND pb.tenant_id = $1
|
||||
GROUP BY pb.tenant_id, pb.product_name, EXTRACT(hour FROM pb.actual_start_time)
|
||||
HAVING COUNT(*) >= 3
|
||||
),
|
||||
recommendations AS (
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN avg_production_time > avg_planned_duration * 1.2 THEN 'reduce_production_time'
|
||||
WHEN avg_yield < 85 THEN 'improve_yield'
|
||||
WHEN start_hour BETWEEN 14 AND 16 AND avg_production_time > avg_planned_duration * 1.1 THEN 'avoid_afternoon_production'
|
||||
ELSE null
|
||||
END as recommendation_type,
|
||||
(avg_production_time - avg_planned_duration) / avg_planned_duration * 100 as efficiency_loss_percent
|
||||
FROM efficiency_analysis
|
||||
)
|
||||
SELECT * FROM recommendations
|
||||
WHERE recommendation_type IS NOT NULL
|
||||
AND efficiency_loss_percent > 10
|
||||
ORDER BY efficiency_loss_percent DESC
|
||||
"""
|
||||
|
||||
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
|
||||
tenants = await self.get_active_tenants()
|
||||
|
||||
|
||||
for tenant_id in tenants:
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
# Use a separate session per tenant to avoid connection blocking
|
||||
async with self.db_manager.get_session() as session:
|
||||
result = await session.execute(text(query), {"tenant_id": tenant_id})
|
||||
recommendations = result.fetchall()
|
||||
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
recommendations = await alert_repo.get_efficiency_recommendations(tenant_id)
|
||||
|
||||
for rec in recommendations:
|
||||
# Process each recommendation individually
|
||||
await self._generate_efficiency_recommendation(tenant_id, rec)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating efficiency recommendations",
|
||||
tenant_id=str(tenant_id),
|
||||
logger.error("Error generating efficiency recommendations",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
# Continue with other tenants despite this error
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Efficiency recommendations failed", error=str(e))
|
||||
self._errors_count += 1
|
||||
@@ -659,41 +535,26 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
|
||||
async def generate_energy_recommendations(self):
|
||||
"""Generate energy optimization recommendations"""
|
||||
try:
|
||||
# Analyze energy consumption patterns
|
||||
query = """
|
||||
SELECT
|
||||
e.tenant_id, e.name as equipment_name, e.type,
|
||||
AVG(ec.energy_consumption_kwh) as avg_energy,
|
||||
EXTRACT(hour FROM ec.recorded_at) as hour_of_day,
|
||||
COUNT(*) as readings_count
|
||||
FROM equipment e
|
||||
JOIN energy_consumption ec ON ec.equipment_id = e.id
|
||||
WHERE ec.recorded_at > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND e.tenant_id = $1
|
||||
GROUP BY e.tenant_id, e.id, EXTRACT(hour FROM ec.recorded_at)
|
||||
HAVING COUNT(*) >= 10
|
||||
ORDER BY avg_energy DESC
|
||||
"""
|
||||
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
|
||||
tenants = await self.get_active_tenants()
|
||||
|
||||
|
||||
for tenant_id in tenants:
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
# Use a separate session per tenant to avoid connection blocking
|
||||
async with self.db_manager.get_session() as session:
|
||||
result = await session.execute(text(query), {"tenant_id": tenant_id})
|
||||
energy_data = result.fetchall()
|
||||
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
energy_data = await alert_repo.get_energy_consumption_patterns(tenant_id)
|
||||
|
||||
# Analyze for peak hours and optimization opportunities
|
||||
await self._analyze_energy_patterns(tenant_id, energy_data)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error generating energy recommendations",
|
||||
tenant_id=str(tenant_id),
|
||||
logger.error("Error generating energy recommendations",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
# Continue with other tenants despite this error
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Energy recommendations failed", error=str(e))
|
||||
self._errors_count += 1
|
||||
@@ -839,23 +700,14 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
|
||||
async def get_affected_production_batches(self, ingredient_id: str) -> List[str]:
|
||||
"""Get production batches affected by ingredient shortage"""
|
||||
try:
|
||||
query = """
|
||||
SELECT DISTINCT pb.id
|
||||
FROM production_batches pb
|
||||
JOIN recipe_ingredients ri ON ri.recipe_id = pb.recipe_id
|
||||
WHERE ri.ingredient_id = $1
|
||||
AND pb.status = 'in_progress'
|
||||
AND pb.planned_completion_time > NOW()
|
||||
"""
|
||||
|
||||
from sqlalchemy import text
|
||||
from app.repositories.production_alert_repository import ProductionAlertRepository
|
||||
|
||||
async with self.db_manager.get_session() as session:
|
||||
result_rows = await session.execute(text(query), {"ingredient_id": ingredient_id})
|
||||
result = result_rows.fetchall()
|
||||
return [str(row['id']) for row in result]
|
||||
|
||||
alert_repo = ProductionAlertRepository(session)
|
||||
return await alert_repo.get_affected_production_batches(ingredient_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting affected production batches",
|
||||
ingredient_id=ingredient_id,
|
||||
logger.error("Error getting affected production batches",
|
||||
ingredient_id=ingredient_id,
|
||||
error=str(e))
|
||||
return []
|
||||
@@ -284,18 +284,10 @@ class ProductionSchedulerService(BaseAlertService, AlertServiceMixin):
|
||||
async def _get_schedule_by_date(self, session, tenant_id: UUID, schedule_date: date) -> Optional[Dict]:
|
||||
"""Check if production schedule exists for date"""
|
||||
try:
|
||||
from sqlalchemy import select, and_
|
||||
from app.models.production import ProductionSchedule
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
|
||||
result = await session.execute(
|
||||
select(ProductionSchedule).where(
|
||||
and_(
|
||||
ProductionSchedule.tenant_id == tenant_id,
|
||||
ProductionSchedule.schedule_date == schedule_date
|
||||
)
|
||||
)
|
||||
)
|
||||
schedule = result.scalars().first()
|
||||
schedule_repo = ProductionScheduleRepository(session)
|
||||
schedule = await schedule_repo.get_schedule_by_date(str(tenant_id), schedule_date)
|
||||
|
||||
if schedule:
|
||||
return {"id": schedule.id, "status": schedule.status}
|
||||
@@ -386,32 +378,27 @@ class ProductionSchedulerService(BaseAlertService, AlertServiceMixin):
|
||||
stats = {"archived": 0, "cancelled": 0, "escalated": 0}
|
||||
|
||||
try:
|
||||
from app.repositories.production_schedule_repository import ProductionScheduleRepository
|
||||
|
||||
async with self.db_manager.get_session() as session:
|
||||
from sqlalchemy import select, and_
|
||||
from app.models.production import ProductionSchedule
|
||||
schedule_repo = ProductionScheduleRepository(session)
|
||||
|
||||
today = date.today()
|
||||
|
||||
# Get all schedules for tenant
|
||||
result = await session.execute(
|
||||
select(ProductionSchedule).where(
|
||||
ProductionSchedule.tenant_id == tenant_id
|
||||
)
|
||||
)
|
||||
schedules = result.scalars().all()
|
||||
schedules = await schedule_repo.get_all_schedules_for_tenant(tenant_id)
|
||||
|
||||
for schedule in schedules:
|
||||
schedule_age_days = (today - schedule.schedule_date).days
|
||||
|
||||
# Archive completed schedules older than 90 days
|
||||
if schedule.status == "completed" and schedule_age_days > 90:
|
||||
schedule.archived = True
|
||||
await schedule_repo.archive_schedule(schedule)
|
||||
stats["archived"] += 1
|
||||
|
||||
# Cancel draft schedules older than 7 days
|
||||
elif schedule.status == "draft" and schedule_age_days > 7:
|
||||
schedule.status = "cancelled"
|
||||
schedule.notes = (schedule.notes or "") + "\nAuto-cancelled: stale draft schedule"
|
||||
await schedule_repo.cancel_schedule(schedule, "Auto-cancelled: stale draft schedule")
|
||||
stats["cancelled"] += 1
|
||||
|
||||
# Escalate overdue schedules
|
||||
@@ -419,8 +406,6 @@ class ProductionSchedulerService(BaseAlertService, AlertServiceMixin):
|
||||
await self._send_schedule_escalation_alert(tenant_id, schedule.id)
|
||||
stats["escalated"] += 1
|
||||
|
||||
await session.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error in tenant schedule cleanup",
|
||||
tenant_id=str(tenant_id), error=str(e))
|
||||
|
||||
@@ -1528,4 +1528,100 @@ class ProductionService:
|
||||
except Exception as e:
|
||||
logger.error("Error deleting equipment",
|
||||
error=str(e), equipment_id=str(equipment_id), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
# ================================================================
|
||||
# SUSTAINABILITY / WASTE ANALYTICS
|
||||
# ================================================================
|
||||
|
||||
async def get_waste_analytics(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
start_date: datetime,
|
||||
end_date: datetime
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get production waste analytics for sustainability tracking
|
||||
|
||||
Called by Inventory Service's sustainability module
|
||||
to calculate environmental impact and SDG 12.3 compliance.
|
||||
"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
|
||||
# Use repository for waste analytics
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
waste_data = await batch_repo.get_waste_analytics(
|
||||
tenant_id=tenant_id,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
)
|
||||
|
||||
return waste_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error calculating waste analytics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_baseline_metrics(self, tenant_id: UUID) -> Dict[str, Any]:
|
||||
"""
|
||||
Get baseline production metrics from first 90 days
|
||||
|
||||
Used by sustainability service to establish waste baseline
|
||||
for SDG 12.3 compliance tracking.
|
||||
"""
|
||||
try:
|
||||
async with self.database_manager.get_session() as session:
|
||||
from app.repositories.production_batch_repository import ProductionBatchRepository
|
||||
|
||||
# Use repository for baseline metrics
|
||||
batch_repo = ProductionBatchRepository(session)
|
||||
baseline_raw = await batch_repo.get_baseline_metrics(tenant_id)
|
||||
|
||||
# Transform repository data to match expected format
|
||||
if baseline_raw['has_baseline']:
|
||||
baseline_data = {
|
||||
'waste_percentage': baseline_raw['waste_percentage'],
|
||||
'total_production_kg': baseline_raw['total_production'],
|
||||
'total_waste_kg': baseline_raw['total_waste'],
|
||||
'period': {
|
||||
'start_date': baseline_raw['baseline_start'].isoformat() if baseline_raw['baseline_start'] else None,
|
||||
'end_date': baseline_raw['baseline_end'].isoformat() if baseline_raw['baseline_end'] else None,
|
||||
'type': 'first_90_days'
|
||||
},
|
||||
'data_available': True
|
||||
}
|
||||
else:
|
||||
# Not enough data yet - return indicator
|
||||
baseline_data = {
|
||||
'waste_percentage': 25.0, # EU bakery industry average
|
||||
'total_production_kg': 0,
|
||||
'total_waste_kg': 0,
|
||||
'period': {
|
||||
'type': 'industry_average',
|
||||
'note': 'Using EU bakery industry average of 25% as baseline'
|
||||
},
|
||||
'data_available': False
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Baseline metrics retrieved",
|
||||
tenant_id=str(tenant_id),
|
||||
waste_percentage=baseline_data['waste_percentage'],
|
||||
data_available=baseline_data['data_available']
|
||||
)
|
||||
|
||||
return baseline_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting baseline metrics",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e)
|
||||
)
|
||||
raise
|
||||
@@ -1,56 +1,82 @@
|
||||
# services/production/app/services/quality_template_service.py
|
||||
"""
|
||||
Quality Check Template Service for business logic and data operations
|
||||
Quality Check Template Service - Business Logic Layer
|
||||
Handles quality template operations with business rules and validation
|
||||
"""
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import and_, or_, func
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from typing import List, Optional, Tuple
|
||||
from uuid import UUID, uuid4
|
||||
from datetime import datetime, timezone
|
||||
import structlog
|
||||
|
||||
from ..models.production import QualityCheckTemplate, ProcessStage
|
||||
from ..schemas.quality_templates import QualityCheckTemplateCreate, QualityCheckTemplateUpdate
|
||||
from app.models.production import QualityCheckTemplate, ProcessStage
|
||||
from app.schemas.quality_templates import QualityCheckTemplateCreate, QualityCheckTemplateUpdate
|
||||
from app.repositories.quality_template_repository import QualityTemplateRepository
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class QualityTemplateService:
|
||||
"""Service for managing quality check templates"""
|
||||
"""Service for managing quality check templates with business logic"""
|
||||
|
||||
def __init__(self, db: Session):
|
||||
def __init__(self, db: AsyncSession):
|
||||
self.db = db
|
||||
self.repository = QualityTemplateRepository(db)
|
||||
|
||||
def create_template(
|
||||
async def create_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_data: QualityCheckTemplateCreate
|
||||
) -> QualityCheckTemplate:
|
||||
"""Create a new quality check template"""
|
||||
"""
|
||||
Create a new quality check template
|
||||
|
||||
# Validate template code uniqueness if provided
|
||||
if template_data.template_code:
|
||||
existing = self.db.query(QualityCheckTemplate).filter(
|
||||
and_(
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.template_code == template_data.template_code
|
||||
Business Rules:
|
||||
- Template code must be unique within tenant
|
||||
- Validates template configuration
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Validate template code uniqueness
|
||||
if template_data.template_code:
|
||||
exists = await self.repository.check_template_code_exists(
|
||||
tenant_id,
|
||||
template_data.template_code
|
||||
)
|
||||
).first()
|
||||
if existing:
|
||||
raise ValueError(f"Template code '{template_data.template_code}' already exists")
|
||||
if exists:
|
||||
raise ValueError(f"Template code '{template_data.template_code}' already exists")
|
||||
|
||||
# Create template
|
||||
template = QualityCheckTemplate(
|
||||
id=uuid4(),
|
||||
tenant_id=UUID(tenant_id),
|
||||
**template_data.dict()
|
||||
)
|
||||
# Business Rule: Validate template configuration
|
||||
is_valid, errors = self._validate_template_configuration(template_data.dict())
|
||||
if not is_valid:
|
||||
raise ValueError(f"Invalid template configuration: {', '.join(errors)}")
|
||||
|
||||
self.db.add(template)
|
||||
self.db.commit()
|
||||
self.db.refresh(template)
|
||||
# Create template via repository
|
||||
template_dict = template_data.dict()
|
||||
template_dict['id'] = uuid4()
|
||||
template_dict['tenant_id'] = UUID(tenant_id)
|
||||
|
||||
return template
|
||||
template = await self.repository.create(template_dict)
|
||||
|
||||
def get_templates(
|
||||
logger.info("Quality template created",
|
||||
template_id=str(template.id),
|
||||
tenant_id=tenant_id,
|
||||
template_code=template.template_code)
|
||||
|
||||
return template
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("Template creation validation failed",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Failed to create quality template",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def get_templates(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: Optional[ProcessStage] = None,
|
||||
@@ -59,225 +85,349 @@ class QualityTemplateService:
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> Tuple[List[QualityCheckTemplate], int]:
|
||||
"""Get quality check templates with filtering and pagination"""
|
||||
"""
|
||||
Get quality check templates with filtering and pagination
|
||||
|
||||
query = self.db.query(QualityCheckTemplate).filter(
|
||||
QualityCheckTemplate.tenant_id == tenant_id
|
||||
)
|
||||
Business Rules:
|
||||
- Default to active templates only
|
||||
- Limit maximum results per page
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Enforce maximum limit
|
||||
if limit > 1000:
|
||||
limit = 1000
|
||||
logger.warning("Template list limit capped at 1000",
|
||||
tenant_id=tenant_id,
|
||||
requested_limit=limit)
|
||||
|
||||
# Apply filters
|
||||
if is_active is not None:
|
||||
query = query.filter(QualityCheckTemplate.is_active == is_active)
|
||||
|
||||
if check_type:
|
||||
query = query.filter(QualityCheckTemplate.check_type == check_type)
|
||||
|
||||
if stage:
|
||||
# Filter by applicable stages (JSON array contains stage)
|
||||
query = query.filter(
|
||||
func.json_contains(
|
||||
QualityCheckTemplate.applicable_stages,
|
||||
f'"{stage.value}"'
|
||||
)
|
||||
templates, total = await self.repository.get_templates_by_tenant(
|
||||
tenant_id=tenant_id,
|
||||
stage=stage,
|
||||
check_type=check_type,
|
||||
is_active=is_active,
|
||||
skip=skip,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
# Get total count
|
||||
total = query.count()
|
||||
logger.debug("Retrieved quality templates",
|
||||
tenant_id=tenant_id,
|
||||
total=total,
|
||||
returned=len(templates))
|
||||
|
||||
# Apply pagination and ordering
|
||||
templates = query.order_by(
|
||||
QualityCheckTemplate.is_critical.desc(),
|
||||
QualityCheckTemplate.is_required.desc(),
|
||||
QualityCheckTemplate.name
|
||||
).offset(skip).limit(limit).all()
|
||||
return templates, total
|
||||
|
||||
return templates, total
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality templates",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
def get_template(
|
||||
async def get_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID
|
||||
) -> Optional[QualityCheckTemplate]:
|
||||
"""Get a specific quality check template"""
|
||||
"""
|
||||
Get a specific quality check template
|
||||
|
||||
return self.db.query(QualityCheckTemplate).filter(
|
||||
and_(
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.id == template_id
|
||||
)
|
||||
).first()
|
||||
Business Rules:
|
||||
- Template must belong to tenant
|
||||
"""
|
||||
try:
|
||||
template = await self.repository.get_by_tenant_and_id(tenant_id, template_id)
|
||||
|
||||
def update_template(
|
||||
if template:
|
||||
logger.debug("Retrieved quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
else:
|
||||
logger.warning("Quality template not found",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return template
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def update_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID,
|
||||
template_data: QualityCheckTemplateUpdate
|
||||
) -> Optional[QualityCheckTemplate]:
|
||||
"""Update a quality check template"""
|
||||
"""
|
||||
Update a quality check template
|
||||
|
||||
template = self.get_template(tenant_id, template_id)
|
||||
if not template:
|
||||
return None
|
||||
Business Rules:
|
||||
- Template must exist and belong to tenant
|
||||
- Template code must remain unique if changed
|
||||
- Validates updated configuration
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Template must exist
|
||||
template = await self.repository.get_by_tenant_and_id(tenant_id, template_id)
|
||||
if not template:
|
||||
logger.warning("Cannot update non-existent template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# Validate template code uniqueness if being updated
|
||||
if template_data.template_code and template_data.template_code != template.template_code:
|
||||
existing = self.db.query(QualityCheckTemplate).filter(
|
||||
and_(
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.template_code == template_data.template_code,
|
||||
QualityCheckTemplate.id != template_id
|
||||
# Business Rule: Validate template code uniqueness if being updated
|
||||
if template_data.template_code and template_data.template_code != template.template_code:
|
||||
exists = await self.repository.check_template_code_exists(
|
||||
tenant_id,
|
||||
template_data.template_code,
|
||||
exclude_id=template_id
|
||||
)
|
||||
).first()
|
||||
if existing:
|
||||
raise ValueError(f"Template code '{template_data.template_code}' already exists")
|
||||
if exists:
|
||||
raise ValueError(f"Template code '{template_data.template_code}' already exists")
|
||||
|
||||
# Update fields
|
||||
update_data = template_data.dict(exclude_unset=True)
|
||||
for field, value in update_data.items():
|
||||
setattr(template, field, value)
|
||||
# Business Rule: Validate updated configuration
|
||||
update_dict = template_data.dict(exclude_unset=True)
|
||||
if update_dict:
|
||||
# Merge with existing data for validation
|
||||
full_data = template.__dict__.copy()
|
||||
full_data.update(update_dict)
|
||||
is_valid, errors = self._validate_template_configuration(full_data)
|
||||
if not is_valid:
|
||||
raise ValueError(f"Invalid template configuration: {', '.join(errors)}")
|
||||
|
||||
template.updated_at = datetime.now(timezone.utc)
|
||||
# Update via repository
|
||||
update_dict['updated_at'] = datetime.now(timezone.utc)
|
||||
updated_template = await self.repository.update(template_id, update_dict)
|
||||
|
||||
self.db.commit()
|
||||
self.db.refresh(template)
|
||||
logger.info("Quality template updated",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return template
|
||||
return updated_template
|
||||
|
||||
def delete_template(
|
||||
except ValueError as e:
|
||||
logger.warning("Template update validation failed",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Failed to update quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def delete_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID
|
||||
) -> bool:
|
||||
"""Delete a quality check template"""
|
||||
"""
|
||||
Delete a quality check template
|
||||
|
||||
template = self.get_template(tenant_id, template_id)
|
||||
if not template:
|
||||
return False
|
||||
Business Rules:
|
||||
- Template must exist and belong to tenant
|
||||
- Consider soft delete for audit trail (future enhancement)
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Template must exist
|
||||
template = await self.repository.get_by_tenant_and_id(tenant_id, template_id)
|
||||
if not template:
|
||||
logger.warning("Cannot delete non-existent template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
return False
|
||||
|
||||
# Check if template is in use (you might want to add this check)
|
||||
# For now, we'll allow deletion but in production you might want to:
|
||||
# 1. Soft delete by setting is_active = False
|
||||
# 2. Check for dependent quality checks
|
||||
# 3. Prevent deletion if in use
|
||||
# TODO: Business Rule - Check if template is in use before deletion
|
||||
# For now, allow deletion. In production you might want to:
|
||||
# 1. Soft delete by setting is_active = False
|
||||
# 2. Check for dependent quality checks
|
||||
# 3. Prevent deletion if actively used
|
||||
|
||||
self.db.delete(template)
|
||||
self.db.commit()
|
||||
success = await self.repository.delete(template_id)
|
||||
|
||||
return True
|
||||
if success:
|
||||
logger.info("Quality template deleted",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
else:
|
||||
logger.warning("Failed to delete quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
def get_templates_for_stage(
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to delete quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def get_templates_for_stage(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: ProcessStage,
|
||||
is_active: Optional[bool] = True
|
||||
) -> List[QualityCheckTemplate]:
|
||||
"""Get all quality check templates applicable to a specific process stage"""
|
||||
"""
|
||||
Get all quality check templates applicable to a specific process stage
|
||||
|
||||
query = self.db.query(QualityCheckTemplate).filter(
|
||||
and_(
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
or_(
|
||||
# Templates that specify applicable stages
|
||||
func.json_contains(
|
||||
QualityCheckTemplate.applicable_stages,
|
||||
f'"{stage.value}"'
|
||||
),
|
||||
# Templates that don't specify stages (applicable to all)
|
||||
QualityCheckTemplate.applicable_stages.is_(None)
|
||||
)
|
||||
Business Rules:
|
||||
- Returns templates ordered by criticality
|
||||
- Required templates come first
|
||||
"""
|
||||
try:
|
||||
templates = await self.repository.get_templates_for_stage(
|
||||
tenant_id=tenant_id,
|
||||
stage=stage,
|
||||
is_active=is_active
|
||||
)
|
||||
)
|
||||
|
||||
if is_active is not None:
|
||||
query = query.filter(QualityCheckTemplate.is_active == is_active)
|
||||
logger.debug("Retrieved templates for stage",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value,
|
||||
count=len(templates))
|
||||
|
||||
return query.order_by(
|
||||
QualityCheckTemplate.is_critical.desc(),
|
||||
QualityCheckTemplate.is_required.desc(),
|
||||
QualityCheckTemplate.weight.desc(),
|
||||
QualityCheckTemplate.name
|
||||
).all()
|
||||
return templates
|
||||
|
||||
def duplicate_template(
|
||||
except Exception as e:
|
||||
logger.error("Failed to get templates for stage",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value if stage else None,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def duplicate_template(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID
|
||||
) -> Optional[QualityCheckTemplate]:
|
||||
"""Duplicate an existing quality check template"""
|
||||
"""
|
||||
Duplicate an existing quality check template
|
||||
|
||||
original = self.get_template(tenant_id, template_id)
|
||||
if not original:
|
||||
return None
|
||||
Business Rules:
|
||||
- Original template must exist
|
||||
- Duplicate gets modified name and code
|
||||
- All other attributes copied
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Original must exist
|
||||
original = await self.repository.get_by_tenant_and_id(tenant_id, template_id)
|
||||
if not original:
|
||||
logger.warning("Cannot duplicate non-existent template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# Create duplicate with modified name and code
|
||||
duplicate_data = {
|
||||
'name': f"{original.name} (Copy)",
|
||||
'template_code': f"{original.template_code}_copy" if original.template_code else None,
|
||||
'check_type': original.check_type,
|
||||
'category': original.category,
|
||||
'description': original.description,
|
||||
'instructions': original.instructions,
|
||||
'parameters': original.parameters,
|
||||
'thresholds': original.thresholds,
|
||||
'scoring_criteria': original.scoring_criteria,
|
||||
'is_active': original.is_active,
|
||||
'is_required': original.is_required,
|
||||
'is_critical': original.is_critical,
|
||||
'weight': original.weight,
|
||||
'min_value': original.min_value,
|
||||
'max_value': original.max_value,
|
||||
'target_value': original.target_value,
|
||||
'unit': original.unit,
|
||||
'tolerance_percentage': original.tolerance_percentage,
|
||||
'applicable_stages': original.applicable_stages,
|
||||
'created_by': original.created_by
|
||||
}
|
||||
# Business Rule: Create duplicate with modified identifiers
|
||||
duplicate_data = {
|
||||
'name': f"{original.name} (Copy)",
|
||||
'template_code': f"{original.template_code}_copy" if original.template_code else None,
|
||||
'check_type': original.check_type,
|
||||
'category': original.category,
|
||||
'description': original.description,
|
||||
'instructions': original.instructions,
|
||||
'parameters': original.parameters,
|
||||
'thresholds': original.thresholds,
|
||||
'scoring_criteria': original.scoring_criteria,
|
||||
'is_active': original.is_active,
|
||||
'is_required': original.is_required,
|
||||
'is_critical': original.is_critical,
|
||||
'weight': original.weight,
|
||||
'min_value': original.min_value,
|
||||
'max_value': original.max_value,
|
||||
'target_value': original.target_value,
|
||||
'unit': original.unit,
|
||||
'tolerance_percentage': original.tolerance_percentage,
|
||||
'applicable_stages': original.applicable_stages,
|
||||
'created_by': original.created_by
|
||||
}
|
||||
|
||||
create_data = QualityCheckTemplateCreate(**duplicate_data)
|
||||
return self.create_template(tenant_id, create_data)
|
||||
create_data = QualityCheckTemplateCreate(**duplicate_data)
|
||||
duplicate = await self.create_template(tenant_id, create_data)
|
||||
|
||||
def get_templates_by_recipe_config(
|
||||
logger.info("Quality template duplicated",
|
||||
original_id=str(template_id),
|
||||
duplicate_id=str(duplicate.id),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return duplicate
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to duplicate quality template",
|
||||
template_id=str(template_id),
|
||||
tenant_id=tenant_id,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def get_templates_by_recipe_config(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: ProcessStage,
|
||||
recipe_quality_config: dict
|
||||
) -> List[QualityCheckTemplate]:
|
||||
"""Get quality check templates based on recipe configuration"""
|
||||
"""
|
||||
Get quality check templates based on recipe configuration
|
||||
|
||||
# Extract template IDs from recipe configuration for the specific stage
|
||||
stage_config = recipe_quality_config.get('stages', {}).get(stage.value)
|
||||
if not stage_config:
|
||||
return []
|
||||
Business Rules:
|
||||
- Returns only active templates
|
||||
- Filters by template IDs specified in recipe config
|
||||
- Ordered by criticality
|
||||
"""
|
||||
try:
|
||||
# Business Rule: Extract template IDs from recipe config
|
||||
stage_config = recipe_quality_config.get('stages', {}).get(stage.value)
|
||||
if not stage_config:
|
||||
logger.debug("No quality config for stage",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value)
|
||||
return []
|
||||
|
||||
template_ids = stage_config.get('template_ids', [])
|
||||
if not template_ids:
|
||||
return []
|
||||
template_ids = stage_config.get('template_ids', [])
|
||||
if not template_ids:
|
||||
logger.debug("No template IDs in config",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value)
|
||||
return []
|
||||
|
||||
# Get templates by IDs
|
||||
templates = self.db.query(QualityCheckTemplate).filter(
|
||||
and_(
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.id.in_([UUID(tid) for tid in template_ids]),
|
||||
QualityCheckTemplate.is_active == True
|
||||
)
|
||||
).order_by(
|
||||
QualityCheckTemplate.is_critical.desc(),
|
||||
QualityCheckTemplate.is_required.desc(),
|
||||
QualityCheckTemplate.weight.desc()
|
||||
).all()
|
||||
# Get templates by IDs via repository
|
||||
template_ids_uuid = [UUID(tid) for tid in template_ids]
|
||||
templates = await self.repository.get_templates_by_ids(tenant_id, template_ids_uuid)
|
||||
|
||||
return templates
|
||||
logger.debug("Retrieved templates by recipe config",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value,
|
||||
count=len(templates))
|
||||
|
||||
def validate_template_configuration(
|
||||
return templates
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get templates by recipe config",
|
||||
tenant_id=tenant_id,
|
||||
stage=stage.value if stage else None,
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
def _validate_template_configuration(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_data: dict
|
||||
) -> Tuple[bool, List[str]]:
|
||||
"""Validate quality check template configuration"""
|
||||
"""
|
||||
Validate quality check template configuration (business rules)
|
||||
|
||||
Business Rules:
|
||||
- Measurement checks require unit
|
||||
- Min value must be less than max value
|
||||
- Visual checks require scoring criteria
|
||||
- Process stages must be valid
|
||||
"""
|
||||
errors = []
|
||||
|
||||
# Validate check type specific requirements
|
||||
# Business Rule: Type-specific validation
|
||||
check_type = template_data.get('check_type')
|
||||
|
||||
if check_type in ['measurement', 'temperature', 'weight']:
|
||||
@@ -290,12 +440,12 @@ class QualityTemplateService:
|
||||
if min_val is not None and max_val is not None and min_val >= max_val:
|
||||
errors.append("Minimum value must be less than maximum value")
|
||||
|
||||
# Validate scoring criteria
|
||||
# Business Rule: Visual checks need scoring criteria
|
||||
scoring = template_data.get('scoring_criteria', {})
|
||||
if check_type == 'visual' and not scoring:
|
||||
errors.append("Visual checks require scoring criteria")
|
||||
|
||||
# Validate process stages
|
||||
# Business Rule: Validate process stages
|
||||
stages = template_data.get('applicable_stages', [])
|
||||
if stages:
|
||||
valid_stages = [stage.value for stage in ProcessStage]
|
||||
@@ -303,4 +453,11 @@ class QualityTemplateService:
|
||||
if invalid_stages:
|
||||
errors.append(f"Invalid process stages: {invalid_stages}")
|
||||
|
||||
return len(errors) == 0, errors
|
||||
is_valid = len(errors) == 0
|
||||
|
||||
if not is_valid:
|
||||
logger.warning("Template configuration validation failed",
|
||||
check_type=check_type,
|
||||
errors=errors)
|
||||
|
||||
return is_valid, errors
|
||||
|
||||
Reference in New Issue
Block a user