Improve the frontend and repository layer
This commit is contained in:
@@ -0,0 +1,278 @@
|
||||
# services/production/app/repositories/production_alert_repository.py
|
||||
"""
|
||||
Production Alert Repository
|
||||
Data access layer for production-specific alert detection and analysis
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any
|
||||
from uuid import UUID
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
import structlog
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionAlertRepository:
|
||||
"""Repository for production alert data access"""
|
||||
|
||||
def __init__(self, session: AsyncSession):
|
||||
self.session = session
|
||||
|
||||
async def get_capacity_issues(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production capacity overload issues
|
||||
Returns batches that exceed daily capacity thresholds
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
pb.tenant_id,
|
||||
DATE(pb.planned_start_time) as planned_date,
|
||||
COUNT(*) as batch_count,
|
||||
SUM(pb.planned_quantity) as total_planned,
|
||||
'capacity_check' as capacity_status,
|
||||
100.0 as capacity_percentage
|
||||
FROM production_batches pb
|
||||
WHERE pb.planned_start_time >= CURRENT_DATE
|
||||
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
|
||||
AND pb.status IN ('planned', 'in_progress')
|
||||
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
|
||||
HAVING COUNT(*) > 10
|
||||
ORDER BY total_planned DESC
|
||||
LIMIT 20
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get capacity issues", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_production_delays(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production batches that are delayed
|
||||
Returns batches in progress past their planned end time
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
|
||||
pb.planned_end_time as planned_completion_time, pb.actual_start_time,
|
||||
pb.actual_end_time as estimated_completion_time, pb.status,
|
||||
EXTRACT(minutes FROM (NOW() - pb.planned_end_time)) as delay_minutes,
|
||||
COALESCE(pb.priority::text, 'medium') as priority_level,
|
||||
1 as affected_orders
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'in_progress'
|
||||
AND pb.planned_end_time < NOW()
|
||||
AND pb.planned_end_time > NOW() - INTERVAL '24 hours'
|
||||
ORDER BY
|
||||
CASE COALESCE(pb.priority::text, 'MEDIUM')
|
||||
WHEN 'URGENT' THEN 1 WHEN 'HIGH' THEN 2 ELSE 3
|
||||
END,
|
||||
delay_minutes DESC
|
||||
LIMIT 50
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get production delays", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_quality_issues(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get quality control failures
|
||||
Returns quality checks that failed within recent hours
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
qc.id, qc.tenant_id, qc.batch_id, qc.test_type,
|
||||
qc.result_value, qc.min_acceptable, qc.max_acceptable,
|
||||
qc.pass_fail, qc.defect_count,
|
||||
qc.notes as qc_severity,
|
||||
1 as total_failures,
|
||||
pb.product_name, pb.batch_number,
|
||||
qc.created_at
|
||||
FROM quality_checks qc
|
||||
JOIN production_batches pb ON pb.id = qc.batch_id
|
||||
WHERE qc.pass_fail = false
|
||||
AND qc.created_at > NOW() - INTERVAL '4 hours'
|
||||
AND qc.corrective_action_needed = true
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 1
|
||||
WHEN qc.pass_fail = false THEN 2
|
||||
ELSE 3
|
||||
END,
|
||||
qc.created_at DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality issues", error=str(e))
|
||||
raise
|
||||
|
||||
async def mark_quality_check_acknowledged(self, quality_check_id: UUID) -> None:
|
||||
"""
|
||||
Mark a quality check as acknowledged to avoid duplicate alerts
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
UPDATE quality_checks
|
||||
SET acknowledged = true
|
||||
WHERE id = :id
|
||||
""")
|
||||
|
||||
await self.session.execute(query, {"id": quality_check_id})
|
||||
await self.session.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to mark quality check acknowledged", error=str(e), qc_id=str(quality_check_id))
|
||||
raise
|
||||
|
||||
async def get_equipment_status(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get equipment requiring attention
|
||||
Returns equipment with maintenance due or status issues
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days,
|
||||
EXTRACT(DAYS FROM (e.next_maintenance_date - NOW())) as days_to_maintenance,
|
||||
COUNT(ea.id) as active_alerts
|
||||
FROM equipment e
|
||||
LEFT JOIN alerts ea ON ea.equipment_id = e.id
|
||||
AND ea.is_active = true
|
||||
AND ea.is_resolved = false
|
||||
WHERE e.is_active = true
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days
|
||||
ORDER BY e.next_maintenance_date ASC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get equipment status", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_efficiency_recommendations(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production efficiency improvement recommendations
|
||||
Analyzes production patterns to identify optimization opportunities
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
WITH efficiency_analysis AS (
|
||||
SELECT
|
||||
pb.tenant_id, pb.product_name,
|
||||
AVG(EXTRACT(EPOCH FROM (pb.actual_end_time - pb.actual_start_time)) / 60) as avg_production_time,
|
||||
AVG(pb.planned_duration_minutes) as avg_planned_duration,
|
||||
COUNT(*) as batch_count,
|
||||
AVG(pb.yield_percentage) as avg_yield,
|
||||
EXTRACT(hour FROM pb.actual_start_time) as start_hour
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'COMPLETED'
|
||||
AND pb.actual_completion_time > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND pb.tenant_id = :tenant_id
|
||||
GROUP BY pb.tenant_id, pb.product_name, EXTRACT(hour FROM pb.actual_start_time)
|
||||
HAVING COUNT(*) >= 3
|
||||
),
|
||||
recommendations AS (
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN avg_production_time > avg_planned_duration * 1.2 THEN 'reduce_production_time'
|
||||
WHEN avg_yield < 85 THEN 'improve_yield'
|
||||
WHEN start_hour BETWEEN 14 AND 16 AND avg_production_time > avg_planned_duration * 1.1 THEN 'avoid_afternoon_production'
|
||||
ELSE null
|
||||
END as recommendation_type,
|
||||
(avg_production_time - avg_planned_duration) / avg_planned_duration * 100 as efficiency_loss_percent
|
||||
FROM efficiency_analysis
|
||||
)
|
||||
SELECT * FROM recommendations
|
||||
WHERE recommendation_type IS NOT NULL
|
||||
AND efficiency_loss_percent > 10
|
||||
ORDER BY efficiency_loss_percent DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get efficiency recommendations", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_energy_consumption_patterns(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get energy consumption patterns for optimization analysis
|
||||
Returns consumption by equipment and hour of day
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
e.tenant_id, e.name as equipment_name, e.type,
|
||||
AVG(ec.energy_consumption_kwh) as avg_energy,
|
||||
EXTRACT(hour FROM ec.recorded_at) as hour_of_day,
|
||||
COUNT(*) as readings_count
|
||||
FROM equipment e
|
||||
JOIN energy_consumption ec ON ec.equipment_id = e.id
|
||||
WHERE ec.recorded_at > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.tenant_id, e.id, e.name, e.type, EXTRACT(hour FROM ec.recorded_at)
|
||||
HAVING COUNT(*) >= 10
|
||||
ORDER BY avg_energy DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get energy consumption patterns", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_affected_production_batches(self, ingredient_id: str) -> List[str]:
|
||||
"""
|
||||
Get production batches affected by ingredient shortage
|
||||
Returns batch IDs that use the specified ingredient
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT DISTINCT pb.id
|
||||
FROM production_batches pb
|
||||
JOIN recipe_ingredients ri ON ri.recipe_id = pb.recipe_id
|
||||
WHERE ri.ingredient_id = :ingredient_id
|
||||
AND pb.status = 'in_progress'
|
||||
AND pb.planned_completion_time > NOW()
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"ingredient_id": ingredient_id})
|
||||
return [str(row.id) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get affected production batches", error=str(e), ingredient_id=ingredient_id)
|
||||
raise
|
||||
|
||||
async def set_statement_timeout(self, timeout: str = '30s') -> None:
|
||||
"""
|
||||
Set PostgreSQL statement timeout for the current session
|
||||
"""
|
||||
try:
|
||||
await self.session.execute(text(f"SET statement_timeout = '{timeout}'"))
|
||||
except Exception as e:
|
||||
logger.error("Failed to set statement timeout", error=str(e))
|
||||
raise
|
||||
@@ -689,4 +689,148 @@ class ProductionBatchRepository(ProductionBaseRepository, BatchCountProvider):
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error counting filtered batches", error=str(e))
|
||||
raise DatabaseError(f"Failed to count filtered batches: {str(e)}")
|
||||
raise DatabaseError(f"Failed to count filtered batches: {str(e)}")
|
||||
|
||||
async def get_waste_analytics(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
start_date: datetime,
|
||||
end_date: datetime
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get production waste analytics for sustainability reporting
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
start_date: Start date for analytics period
|
||||
end_date: End date for analytics period
|
||||
|
||||
Returns:
|
||||
Dictionary with waste analytics data
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
SELECT
|
||||
COALESCE(SUM(waste_quantity), 0) as total_production_waste,
|
||||
COALESCE(SUM(defect_quantity), 0) as total_defects,
|
||||
COALESCE(SUM(planned_quantity), 0) as total_planned,
|
||||
COALESCE(SUM(actual_quantity), 0) as total_actual,
|
||||
COUNT(*) as total_batches,
|
||||
COUNT(CASE WHEN forecast_id IS NOT NULL THEN 1 END) as ai_assisted_batches
|
||||
FROM production_batches
|
||||
WHERE tenant_id = :tenant_id
|
||||
AND created_at BETWEEN :start_date AND :end_date
|
||||
AND status IN ('COMPLETED', 'QUALITY_CHECK', 'FINISHED')
|
||||
""")
|
||||
|
||||
result = await self.session.execute(
|
||||
query,
|
||||
{
|
||||
'tenant_id': tenant_id,
|
||||
'start_date': start_date,
|
||||
'end_date': end_date
|
||||
}
|
||||
)
|
||||
row = result.fetchone()
|
||||
|
||||
waste_data = {
|
||||
'total_production_waste': float(row.total_production_waste or 0),
|
||||
'total_defects': float(row.total_defects or 0),
|
||||
'total_planned': float(row.total_planned or 0),
|
||||
'total_actual': float(row.total_actual or 0),
|
||||
'total_batches': int(row.total_batches or 0),
|
||||
'ai_assisted_batches': int(row.ai_assisted_batches or 0)
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Waste analytics calculated",
|
||||
tenant_id=str(tenant_id),
|
||||
total_waste=waste_data['total_production_waste'],
|
||||
batches=waste_data['total_batches']
|
||||
)
|
||||
|
||||
return waste_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating waste analytics", error=str(e), tenant_id=str(tenant_id))
|
||||
raise DatabaseError(f"Failed to calculate waste analytics: {str(e)}")
|
||||
|
||||
async def get_baseline_metrics(self, tenant_id: UUID) -> Dict[str, Any]:
|
||||
"""
|
||||
Get baseline production metrics from first 90 days
|
||||
|
||||
Used by sustainability service to establish waste baseline
|
||||
for SDG 12.3 compliance tracking.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
|
||||
Returns:
|
||||
Dictionary with baseline metrics data
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
WITH first_batch AS (
|
||||
SELECT MIN(created_at) as start_date
|
||||
FROM production_batches
|
||||
WHERE tenant_id = :tenant_id
|
||||
),
|
||||
baseline_data AS (
|
||||
SELECT
|
||||
COALESCE(SUM(waste_quantity + defect_quantity), 0) as total_waste,
|
||||
COALESCE(SUM(planned_quantity), 0) as total_production
|
||||
FROM production_batches, first_batch
|
||||
WHERE tenant_id = :tenant_id
|
||||
AND created_at BETWEEN first_batch.start_date
|
||||
AND first_batch.start_date + INTERVAL '90 days'
|
||||
AND status IN ('COMPLETED', 'QUALITY_CHECK', 'FINISHED')
|
||||
)
|
||||
SELECT
|
||||
total_waste,
|
||||
total_production,
|
||||
CASE
|
||||
WHEN total_production > 0
|
||||
THEN (total_waste / total_production * 100)
|
||||
ELSE NULL
|
||||
END as waste_percentage,
|
||||
(SELECT start_date FROM first_batch) as baseline_start,
|
||||
(SELECT start_date + INTERVAL '90 days' FROM first_batch) as baseline_end
|
||||
FROM baseline_data
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {'tenant_id': tenant_id})
|
||||
row = result.fetchone()
|
||||
|
||||
if row and row.waste_percentage is not None and row.total_production > 100:
|
||||
# We have enough data for a real baseline
|
||||
baseline_data = {
|
||||
'waste_percentage': float(row.waste_percentage),
|
||||
'total_waste': float(row.total_waste),
|
||||
'total_production': float(row.total_production),
|
||||
'baseline_start': row.baseline_start,
|
||||
'baseline_end': row.baseline_end,
|
||||
'has_baseline': True
|
||||
}
|
||||
else:
|
||||
# Not enough data yet, return defaults
|
||||
baseline_data = {
|
||||
'waste_percentage': None,
|
||||
'total_waste': 0,
|
||||
'total_production': 0,
|
||||
'baseline_start': None,
|
||||
'baseline_end': None,
|
||||
'has_baseline': False
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Baseline metrics calculated",
|
||||
tenant_id=str(tenant_id),
|
||||
has_baseline=baseline_data['has_baseline'],
|
||||
waste_percentage=baseline_data.get('waste_percentage')
|
||||
)
|
||||
|
||||
return baseline_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating baseline metrics", error=str(e), tenant_id=str(tenant_id))
|
||||
raise DatabaseError(f"Failed to calculate baseline metrics: {str(e)}")
|
||||
@@ -382,4 +382,51 @@ class ProductionScheduleRepository(ProductionBaseRepository):
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching today's schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch today's schedule: {str(e)}")
|
||||
raise DatabaseError(f"Failed to fetch today's schedule: {str(e)}")
|
||||
|
||||
async def get_all_schedules_for_tenant(self, tenant_id: UUID) -> List[ProductionSchedule]:
|
||||
"""Get all production schedules for a specific tenant"""
|
||||
try:
|
||||
from sqlalchemy import select
|
||||
from app.models.production import ProductionSchedule
|
||||
|
||||
result = await self.session.execute(
|
||||
select(ProductionSchedule).where(
|
||||
ProductionSchedule.tenant_id == tenant_id
|
||||
)
|
||||
)
|
||||
schedules = result.scalars().all()
|
||||
|
||||
logger.info("Retrieved all schedules for tenant",
|
||||
tenant_id=str(tenant_id),
|
||||
count=len(schedules))
|
||||
|
||||
return list(schedules)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching all tenant schedules", error=str(e), tenant_id=str(tenant_id))
|
||||
raise DatabaseError(f"Failed to fetch all tenant schedules: {str(e)}")
|
||||
|
||||
async def archive_schedule(self, schedule: ProductionSchedule) -> None:
|
||||
"""Archive a production schedule"""
|
||||
try:
|
||||
schedule.archived = True
|
||||
await self.session.commit()
|
||||
logger.info("Archived schedule", schedule_id=str(schedule.id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error archiving schedule", error=str(e), schedule_id=str(schedule.id))
|
||||
raise DatabaseError(f"Failed to archive schedule: {str(e)}")
|
||||
|
||||
async def cancel_schedule(self, schedule: ProductionSchedule, reason: str = None) -> None:
|
||||
"""Cancel a production schedule"""
|
||||
try:
|
||||
schedule.status = "cancelled"
|
||||
if reason:
|
||||
schedule.notes = (schedule.notes or "") + f"\n{reason}"
|
||||
await self.session.commit()
|
||||
logger.info("Cancelled schedule", schedule_id=str(schedule.id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error cancelling schedule", error=str(e), schedule_id=str(schedule.id))
|
||||
raise DatabaseError(f"Failed to cancel schedule: {str(e)}")
|
||||
Reference in New Issue
Block a user