demo seed change

This commit is contained in:
Urtzi Alfaro
2025-12-13 23:57:54 +01:00
parent f3688dfb04
commit ff830a3415
299 changed files with 20328 additions and 19485 deletions

View File

@@ -9,10 +9,12 @@ from .production_batch_repository import ProductionBatchRepository
from .production_schedule_repository import ProductionScheduleRepository
from .production_capacity_repository import ProductionCapacityRepository
from .quality_check_repository import QualityCheckRepository
from .equipment_repository import EquipmentRepository
__all__ = [
"ProductionBatchRepository",
"ProductionScheduleRepository",
"ProductionCapacityRepository",
"QualityCheckRepository",
"EquipmentRepository",
]

View File

@@ -3,7 +3,7 @@ Equipment Repository
"""
from typing import Optional, List, Dict, Any
from sqlalchemy import select, func, and_
from sqlalchemy import select, func, and_, text
from sqlalchemy.ext.asyncio import AsyncSession
from uuid import UUID
import structlog
@@ -219,3 +219,168 @@ class EquipmentRepository(ProductionBaseRepository):
equipment_id=str(equipment_id),
tenant_id=str(tenant_id))
raise
# ================================================================
# ALERT-RELATED METHODS (migrated from production_alert_repository)
# ================================================================
async def get_equipment_status(self, tenant_id: UUID) -> List[Dict[str, Any]]:
"""
Get equipment requiring attention.
Returns equipment with maintenance due or status issues.
"""
try:
query = text("""
SELECT
e.id, e.tenant_id, e.name, e.type, e.status,
e.efficiency_percentage, e.uptime_percentage,
e.last_maintenance_date, e.next_maintenance_date,
e.maintenance_interval_days,
EXTRACT(DAYS FROM (e.next_maintenance_date - NOW())) as days_to_maintenance,
COUNT(ea.id) as active_alerts
FROM equipment e
LEFT JOIN alerts ea ON ea.equipment_id = e.id
AND ea.is_active = true
AND ea.is_resolved = false
WHERE e.is_active = true
AND e.tenant_id = :tenant_id
GROUP BY e.id, e.tenant_id, e.name, e.type, e.status,
e.efficiency_percentage, e.uptime_percentage,
e.last_maintenance_date, e.next_maintenance_date,
e.maintenance_interval_days
ORDER BY e.next_maintenance_date ASC
""")
result = await self.session.execute(query, {"tenant_id": tenant_id})
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get equipment status", error=str(e), tenant_id=str(tenant_id))
raise
async def get_equipment_needing_maintenance(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
"""
Get equipment that needs maintenance.
Returns equipment where next_maintenance_date has passed.
Args:
tenant_id: Optional tenant ID to filter by
"""
try:
query_str = """
SELECT
e.id, e.name, e.type, e.tenant_id,
e.last_maintenance_date,
e.next_maintenance_date,
EXTRACT(DAY FROM (NOW() - e.next_maintenance_date)) as days_overdue
FROM equipment e
WHERE e.next_maintenance_date IS NOT NULL
AND e.next_maintenance_date < NOW()
AND e.status = 'OPERATIONAL'
AND e.is_active = true
"""
params = {}
if tenant_id:
query_str += " AND e.tenant_id = :tenant_id"
params["tenant_id"] = tenant_id
query_str += " ORDER BY e.next_maintenance_date ASC LIMIT 50"
result = await self.session.execute(text(query_str), params)
rows = result.fetchall()
return [
{
'id': str(row.id),
'name': row.name,
'type': row.type,
'tenant_id': str(row.tenant_id),
'last_maintenance_date': row.last_maintenance_date.isoformat() if row.last_maintenance_date else None,
'next_maintenance_date': row.next_maintenance_date.isoformat() if row.next_maintenance_date else None,
'days_overdue': int(row.days_overdue) if row.days_overdue else 0
}
for row in rows
]
except Exception as e:
logger.error("Failed to get equipment needing maintenance", error=str(e))
raise
async def get_efficiency_recommendations(self, tenant_id: UUID) -> List[Dict[str, Any]]:
"""
Get production efficiency improvement recommendations.
Analyzes production patterns to identify optimization opportunities.
"""
try:
query = text("""
WITH efficiency_analysis AS (
SELECT
pb.tenant_id, pb.product_name,
AVG(EXTRACT(EPOCH FROM (pb.actual_end_time - pb.actual_start_time)) / 60) as avg_production_time,
AVG(pb.planned_duration_minutes) as avg_planned_duration,
COUNT(*) as batch_count,
AVG(pb.yield_percentage) as avg_yield,
EXTRACT(hour FROM pb.actual_start_time) as start_hour
FROM production_batches pb
WHERE pb.status = 'COMPLETED'
AND pb.actual_completion_time > CURRENT_DATE - INTERVAL '30 days'
AND pb.tenant_id = :tenant_id
GROUP BY pb.tenant_id, pb.product_name, EXTRACT(hour FROM pb.actual_start_time)
HAVING COUNT(*) >= 3
),
recommendations AS (
SELECT *,
CASE
WHEN avg_production_time > avg_planned_duration * 1.2 THEN 'reduce_production_time'
WHEN avg_yield < 85 THEN 'improve_yield'
WHEN start_hour BETWEEN 14 AND 16 AND avg_production_time > avg_planned_duration * 1.1 THEN 'avoid_afternoon_production'
ELSE null
END as recommendation_type,
(avg_production_time - avg_planned_duration) / avg_planned_duration * 100 as efficiency_loss_percent
FROM efficiency_analysis
)
SELECT * FROM recommendations
WHERE recommendation_type IS NOT NULL
AND efficiency_loss_percent > 10
ORDER BY efficiency_loss_percent DESC
""")
result = await self.session.execute(query, {"tenant_id": tenant_id})
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get efficiency recommendations", error=str(e), tenant_id=str(tenant_id))
raise
async def get_energy_consumption_patterns(self, tenant_id: UUID) -> List[Dict[str, Any]]:
"""
Get energy consumption patterns for optimization analysis.
Returns consumption by equipment and hour of day.
"""
try:
query = text("""
SELECT
e.tenant_id, e.name as equipment_name, e.type,
AVG(ec.energy_consumption_kwh) as avg_energy,
EXTRACT(hour FROM ec.recorded_at) as hour_of_day,
COUNT(*) as readings_count
FROM equipment e
JOIN energy_consumption ec ON ec.equipment_id = e.id
WHERE ec.recorded_at > CURRENT_DATE - INTERVAL '30 days'
AND e.tenant_id = :tenant_id
GROUP BY e.tenant_id, e.id, e.name, e.type, EXTRACT(hour FROM ec.recorded_at)
HAVING COUNT(*) >= 10
ORDER BY avg_energy DESC
""")
result = await self.session.execute(query, {"tenant_id": tenant_id})
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get energy consumption patterns", error=str(e), tenant_id=str(tenant_id))
raise

View File

@@ -1,279 +0,0 @@
# services/production/app/repositories/production_alert_repository.py
"""
Production Alert Repository
Data access layer for production-specific alert detection and analysis
"""
from typing import List, Dict, Any
from uuid import UUID
from sqlalchemy import text
from sqlalchemy.ext.asyncio import AsyncSession
import structlog
logger = structlog.get_logger()
class ProductionAlertRepository:
"""Repository for production alert data access"""
def __init__(self, session: AsyncSession):
self.session = session
async def get_capacity_issues(self) -> List[Dict[str, Any]]:
"""
Get production capacity overload issues
Returns batches that exceed daily capacity thresholds
"""
try:
query = text("""
SELECT
pb.tenant_id,
DATE(pb.planned_start_time) as planned_date,
COUNT(*) as batch_count,
SUM(pb.planned_quantity) as total_planned,
'capacity_check' as capacity_status,
100.0 as capacity_percentage
FROM production_batches pb
WHERE pb.planned_start_time >= CURRENT_DATE
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
AND pb.status IN ('PENDING', 'IN_PROGRESS')
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
HAVING COUNT(*) > 10
ORDER BY total_planned DESC
LIMIT 20
""")
result = await self.session.execute(query)
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get capacity issues", error=str(e))
raise
async def get_production_delays(self) -> List[Dict[str, Any]]:
"""
Get production batches that are delayed
Returns batches in progress past their planned end time
"""
try:
query = text("""
SELECT
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
pb.planned_end_time as planned_completion_time, pb.actual_start_time,
pb.actual_end_time as estimated_completion_time, pb.status,
EXTRACT(minutes FROM (NOW() - pb.planned_end_time)) as delay_minutes,
COALESCE(pb.priority::text, 'medium') as priority_level,
1 as affected_orders
FROM production_batches pb
WHERE pb.status = 'IN_PROGRESS'
AND pb.planned_end_time < NOW()
AND pb.planned_end_time > NOW() - INTERVAL '24 hours'
ORDER BY
CASE COALESCE(pb.priority::text, 'MEDIUM')
WHEN 'URGENT' THEN 1 WHEN 'HIGH' THEN 2 ELSE 3
END,
delay_minutes DESC
LIMIT 50
""")
result = await self.session.execute(query)
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get production delays", error=str(e))
raise
async def get_quality_issues(self) -> List[Dict[str, Any]]:
"""
Get quality control failures
Returns quality checks that failed within recent hours
"""
try:
query = text("""
SELECT
qc.id, qc.tenant_id, qc.batch_id, qc.check_type,
qc.quality_score, qc.within_tolerance,
qc.pass_fail, qc.defect_count,
qc.check_notes as qc_severity,
1 as total_failures,
pb.product_name, pb.batch_number,
qc.created_at,
qc.process_stage
FROM quality_checks qc
JOIN production_batches pb ON pb.id = qc.batch_id
WHERE qc.pass_fail = false
AND qc.created_at > NOW() - INTERVAL '4 hours'
AND qc.corrective_action_needed = true
ORDER BY
CASE
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 1
WHEN qc.pass_fail = false THEN 2
ELSE 3
END,
qc.created_at DESC
""")
result = await self.session.execute(query)
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get quality issues", error=str(e))
raise
async def mark_quality_check_acknowledged(self, quality_check_id: UUID) -> None:
"""
Mark a quality check as acknowledged to avoid duplicate alerts
"""
try:
query = text("""
UPDATE quality_checks
SET acknowledged = true
WHERE id = :id
""")
await self.session.execute(query, {"id": quality_check_id})
await self.session.commit()
except Exception as e:
logger.error("Failed to mark quality check acknowledged", error=str(e), qc_id=str(quality_check_id))
raise
async def get_equipment_status(self, tenant_id: UUID) -> List[Dict[str, Any]]:
"""
Get equipment requiring attention
Returns equipment with maintenance due or status issues
"""
try:
query = text("""
SELECT
e.id, e.tenant_id, e.name, e.type, e.status,
e.efficiency_percentage, e.uptime_percentage,
e.last_maintenance_date, e.next_maintenance_date,
e.maintenance_interval_days,
EXTRACT(DAYS FROM (e.next_maintenance_date - NOW())) as days_to_maintenance,
COUNT(ea.id) as active_alerts
FROM equipment e
LEFT JOIN alerts ea ON ea.equipment_id = e.id
AND ea.is_active = true
AND ea.is_resolved = false
WHERE e.is_active = true
AND e.tenant_id = :tenant_id
GROUP BY e.id, e.tenant_id, e.name, e.type, e.status,
e.efficiency_percentage, e.uptime_percentage,
e.last_maintenance_date, e.next_maintenance_date,
e.maintenance_interval_days
ORDER BY e.next_maintenance_date ASC
""")
result = await self.session.execute(query, {"tenant_id": tenant_id})
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get equipment status", error=str(e), tenant_id=str(tenant_id))
raise
async def get_efficiency_recommendations(self, tenant_id: UUID) -> List[Dict[str, Any]]:
"""
Get production efficiency improvement recommendations
Analyzes production patterns to identify optimization opportunities
"""
try:
query = text("""
WITH efficiency_analysis AS (
SELECT
pb.tenant_id, pb.product_name,
AVG(EXTRACT(EPOCH FROM (pb.actual_end_time - pb.actual_start_time)) / 60) as avg_production_time,
AVG(pb.planned_duration_minutes) as avg_planned_duration,
COUNT(*) as batch_count,
AVG(pb.yield_percentage) as avg_yield,
EXTRACT(hour FROM pb.actual_start_time) as start_hour
FROM production_batches pb
WHERE pb.status = 'COMPLETED'
AND pb.actual_completion_time > CURRENT_DATE - INTERVAL '30 days'
AND pb.tenant_id = :tenant_id
GROUP BY pb.tenant_id, pb.product_name, EXTRACT(hour FROM pb.actual_start_time)
HAVING COUNT(*) >= 3
),
recommendations AS (
SELECT *,
CASE
WHEN avg_production_time > avg_planned_duration * 1.2 THEN 'reduce_production_time'
WHEN avg_yield < 85 THEN 'improve_yield'
WHEN start_hour BETWEEN 14 AND 16 AND avg_production_time > avg_planned_duration * 1.1 THEN 'avoid_afternoon_production'
ELSE null
END as recommendation_type,
(avg_production_time - avg_planned_duration) / avg_planned_duration * 100 as efficiency_loss_percent
FROM efficiency_analysis
)
SELECT * FROM recommendations
WHERE recommendation_type IS NOT NULL
AND efficiency_loss_percent > 10
ORDER BY efficiency_loss_percent DESC
""")
result = await self.session.execute(query, {"tenant_id": tenant_id})
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get efficiency recommendations", error=str(e), tenant_id=str(tenant_id))
raise
async def get_energy_consumption_patterns(self, tenant_id: UUID) -> List[Dict[str, Any]]:
"""
Get energy consumption patterns for optimization analysis
Returns consumption by equipment and hour of day
"""
try:
query = text("""
SELECT
e.tenant_id, e.name as equipment_name, e.type,
AVG(ec.energy_consumption_kwh) as avg_energy,
EXTRACT(hour FROM ec.recorded_at) as hour_of_day,
COUNT(*) as readings_count
FROM equipment e
JOIN energy_consumption ec ON ec.equipment_id = e.id
WHERE ec.recorded_at > CURRENT_DATE - INTERVAL '30 days'
AND e.tenant_id = :tenant_id
GROUP BY e.tenant_id, e.id, e.name, e.type, EXTRACT(hour FROM ec.recorded_at)
HAVING COUNT(*) >= 10
ORDER BY avg_energy DESC
""")
result = await self.session.execute(query, {"tenant_id": tenant_id})
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get energy consumption patterns", error=str(e), tenant_id=str(tenant_id))
raise
async def get_affected_production_batches(self, ingredient_id: str) -> List[str]:
"""
Get production batches affected by ingredient shortage
Returns batch IDs that use the specified ingredient
"""
try:
query = text("""
SELECT DISTINCT pb.id
FROM production_batches pb
JOIN recipe_ingredients ri ON ri.recipe_id = pb.recipe_id
WHERE ri.ingredient_id = :ingredient_id
AND pb.status = 'IN_PROGRESS'
AND pb.planned_completion_time > NOW()
""")
result = await self.session.execute(query, {"ingredient_id": ingredient_id})
return [str(row.id) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get affected production batches", error=str(e), ingredient_id=ingredient_id)
raise
async def set_statement_timeout(self, timeout: str = '30s') -> None:
"""
Set PostgreSQL statement timeout for the current session
"""
try:
await self.session.execute(text(f"SET statement_timeout = '{timeout}'"))
except Exception as e:
logger.error("Failed to set statement timeout", error=str(e))
raise

View File

@@ -850,3 +850,162 @@ class ProductionBatchRepository(ProductionBaseRepository, BatchCountProvider):
except Exception as e:
logger.error("Error calculating baseline metrics", error=str(e), tenant_id=str(tenant_id))
raise DatabaseError(f"Failed to calculate baseline metrics: {str(e)}")
# ================================================================
# ALERT-RELATED METHODS (migrated from production_alert_repository)
# ================================================================
async def get_capacity_issues(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
"""
Get production capacity overload issues.
Returns batches that exceed daily capacity thresholds.
Args:
tenant_id: Optional tenant ID to filter by
"""
try:
query_str = """
SELECT
pb.tenant_id,
DATE(pb.planned_start_time) as planned_date,
COUNT(*) as batch_count,
SUM(pb.planned_quantity) as total_planned,
'capacity_check' as capacity_status,
100.0 as capacity_percentage
FROM production_batches pb
WHERE pb.planned_start_time >= CURRENT_DATE
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
AND pb.status IN ('PENDING', 'IN_PROGRESS')
"""
params = {}
if tenant_id:
query_str += " AND pb.tenant_id = :tenant_id"
params["tenant_id"] = tenant_id
query_str += """
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
HAVING COUNT(*) > 10
ORDER BY total_planned DESC
LIMIT 20
"""
result = await self.session.execute(text(query_str), params)
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get capacity issues", error=str(e))
raise DatabaseError(f"Failed to get capacity issues: {str(e)}")
async def get_production_delays(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
"""
Get production batches that are delayed.
Returns batches in progress past their planned end time.
Args:
tenant_id: Optional tenant ID to filter by
"""
try:
query_str = """
SELECT
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
pb.planned_end_time as planned_completion_time, pb.actual_start_time,
pb.actual_end_time as estimated_completion_time, pb.status,
EXTRACT(minutes FROM (NOW() - pb.planned_end_time)) as delay_minutes,
COALESCE(pb.priority::text, 'medium') as priority_level,
1 as affected_orders
FROM production_batches pb
WHERE pb.status = 'IN_PROGRESS'
AND pb.planned_end_time < NOW()
AND pb.planned_end_time > NOW() - INTERVAL '24 hours'
"""
params = {}
if tenant_id:
query_str += " AND pb.tenant_id = :tenant_id"
params["tenant_id"] = tenant_id
query_str += """
ORDER BY
CASE COALESCE(pb.priority::text, 'MEDIUM')
WHEN 'URGENT' THEN 1 WHEN 'HIGH' THEN 2 ELSE 3
END,
delay_minutes DESC
LIMIT 50
"""
result = await self.session.execute(text(query_str), params)
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get production delays", error=str(e))
raise DatabaseError(f"Failed to get production delays: {str(e)}")
async def get_batches_with_delayed_start(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
"""
Get batches that should have started but haven't.
Returns PENDING batches past their planned start time (with 30 min grace period).
Only returns batches planned for TODAY to avoid alerting on old batches.
Args:
tenant_id: Optional tenant ID to filter by
"""
try:
query_str = """
SELECT
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
pb.planned_start_time as scheduled_start_time, pb.status
FROM production_batches pb
WHERE pb.status = 'PENDING'
AND pb.planned_start_time < NOW() - INTERVAL '30 minutes'
AND pb.actual_start_time IS NULL
AND pb.planned_start_time >= CURRENT_DATE
AND pb.planned_start_time < CURRENT_DATE + INTERVAL '1 day'
"""
params = {}
if tenant_id:
query_str += " AND pb.tenant_id = :tenant_id"
params["tenant_id"] = tenant_id
query_str += " ORDER BY pb.planned_start_time ASC LIMIT 50"
result = await self.session.execute(text(query_str), params)
rows = result.fetchall()
return [
{
'id': str(row.id),
'tenant_id': str(row.tenant_id),
'product_name': row.product_name,
'batch_number': row.batch_number,
'scheduled_start_time': row.scheduled_start_time.isoformat() if row.scheduled_start_time else None
}
for row in rows
]
except Exception as e:
logger.error("Failed to get batches with delayed start", error=str(e))
raise DatabaseError(f"Failed to get batches with delayed start: {str(e)}")
async def get_affected_production_batches(self, ingredient_id: str) -> List[str]:
"""
Get production batches affected by ingredient shortage.
Returns batch IDs that use the specified ingredient.
"""
try:
query = text("""
SELECT DISTINCT pb.id
FROM production_batches pb
JOIN recipe_ingredients ri ON ri.recipe_id = pb.recipe_id
WHERE ri.ingredient_id = :ingredient_id
AND pb.status = 'IN_PROGRESS'
AND pb.planned_completion_time > NOW()
""")
result = await self.session.execute(query, {"ingredient_id": ingredient_id})
return [str(row.id) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get affected production batches", error=str(e), ingredient_id=ingredient_id)
raise DatabaseError(f"Failed to get affected production batches: {str(e)}")

View File

@@ -366,4 +366,76 @@ class QualityCheckRepository(ProductionBaseRepository):
except Exception as e:
logger.error("Error fetching quality checks with filters", error=str(e))
raise DatabaseError(f"Failed to fetch quality checks with filters: {str(e)}")
raise DatabaseError(f"Failed to fetch quality checks with filters: {str(e)}")
# ================================================================
# ALERT-RELATED METHODS (migrated from production_alert_repository)
# ================================================================
async def get_quality_issues(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
"""
Get quality control failures.
Returns quality checks that failed within recent hours.
Args:
tenant_id: Optional tenant ID to filter by
"""
try:
from app.models.production import ProductionBatch
query_str = """
SELECT
qc.id, qc.tenant_id, qc.batch_id, qc.check_type,
qc.quality_score, qc.within_tolerance,
qc.pass_fail, qc.defect_count,
qc.check_notes as qc_severity,
1 as total_failures,
pb.product_name, pb.batch_number,
qc.created_at,
qc.process_stage
FROM quality_checks qc
JOIN production_batches pb ON pb.id = qc.batch_id
WHERE qc.pass_fail = false
AND qc.created_at > NOW() - INTERVAL '4 hours'
AND qc.corrective_action_needed = true
"""
params = {}
if tenant_id:
query_str += " AND qc.tenant_id = :tenant_id"
params["tenant_id"] = tenant_id
query_str += """
ORDER BY
CASE
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 1
WHEN qc.pass_fail = false THEN 2
ELSE 3
END,
qc.created_at DESC
"""
result = await self.session.execute(text(query_str), params)
return [dict(row._mapping) for row in result.fetchall()]
except Exception as e:
logger.error("Failed to get quality issues", error=str(e))
raise DatabaseError(f"Failed to get quality issues: {str(e)}")
async def mark_quality_check_acknowledged(self, quality_check_id: UUID) -> None:
"""
Mark a quality check as acknowledged to avoid duplicate alerts.
"""
try:
query = text("""
UPDATE quality_checks
SET acknowledged = true
WHERE id = :id
""")
await self.session.execute(query, {"id": quality_check_id})
await self.session.commit()
except Exception as e:
logger.error("Failed to mark quality check acknowledged", error=str(e), qc_id=str(quality_check_id))
raise DatabaseError(f"Failed to mark quality check acknowledged: {str(e)}")