Initial commit - production deployment
This commit is contained in:
20
services/production/app/repositories/__init__.py
Normal file
20
services/production/app/repositories/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# ================================================================
|
||||
# services/production/app/repositories/__init__.py
|
||||
# ================================================================
|
||||
"""
|
||||
Repository layer for data access
|
||||
"""
|
||||
|
||||
from .production_batch_repository import ProductionBatchRepository
|
||||
from .production_schedule_repository import ProductionScheduleRepository
|
||||
from .production_capacity_repository import ProductionCapacityRepository
|
||||
from .quality_check_repository import QualityCheckRepository
|
||||
from .equipment_repository import EquipmentRepository
|
||||
|
||||
__all__ = [
|
||||
"ProductionBatchRepository",
|
||||
"ProductionScheduleRepository",
|
||||
"ProductionCapacityRepository",
|
||||
"QualityCheckRepository",
|
||||
"EquipmentRepository",
|
||||
]
|
||||
217
services/production/app/repositories/base.py
Normal file
217
services/production/app/repositories/base.py
Normal file
@@ -0,0 +1,217 @@
|
||||
"""
|
||||
Base Repository for Production Service
|
||||
Service-specific repository base class with production utilities
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any, Type
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import text, and_, or_
|
||||
from datetime import datetime, date, timedelta
|
||||
import structlog
|
||||
|
||||
from shared.database.repository import BaseRepository
|
||||
from shared.database.exceptions import DatabaseError
|
||||
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionBaseRepository(BaseRepository):
|
||||
"""Base repository for production service with common production operations"""
|
||||
|
||||
def __init__(self, model: Type, session: AsyncSession, cache_ttl: Optional[int] = 300):
|
||||
# Production data is more dynamic, shorter cache time (5 minutes)
|
||||
super().__init__(model, session, cache_ttl)
|
||||
|
||||
async def get_by_tenant_id(self, tenant_id: str, skip: int = 0, limit: int = 100) -> List:
|
||||
"""Get records by tenant ID"""
|
||||
if hasattr(self.model, 'tenant_id'):
|
||||
return await self.get_multi(
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
filters={"tenant_id": tenant_id},
|
||||
order_by="created_at",
|
||||
order_desc=True
|
||||
)
|
||||
return await self.get_multi(skip=skip, limit=limit)
|
||||
|
||||
async def get_by_status(
|
||||
self,
|
||||
tenant_id: str,
|
||||
status: str,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> List:
|
||||
"""Get records by tenant and status"""
|
||||
if hasattr(self.model, 'status'):
|
||||
return await self.get_multi(
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"status": status
|
||||
},
|
||||
order_by="created_at",
|
||||
order_desc=True
|
||||
)
|
||||
return await self.get_by_tenant_id(tenant_id, skip, limit)
|
||||
|
||||
async def get_by_date_range(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date,
|
||||
date_field: str = "created_at",
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> List:
|
||||
"""Get records by tenant and date range"""
|
||||
try:
|
||||
start_datetime = datetime.combine(start_date, datetime.min.time())
|
||||
end_datetime = datetime.combine(end_date, datetime.max.time())
|
||||
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
f"{date_field}__gte": start_datetime,
|
||||
f"{date_field}__lte": end_datetime
|
||||
}
|
||||
|
||||
return await self.get_multi(
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
filters=filters,
|
||||
order_by=date_field,
|
||||
order_desc=True
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Error fetching records by date range",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
raise DatabaseError(f"Failed to fetch records by date range: {str(e)}")
|
||||
|
||||
async def get_active_records(
|
||||
self,
|
||||
tenant_id: str,
|
||||
active_field: str = "is_active",
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> List:
|
||||
"""Get active records for a tenant"""
|
||||
if hasattr(self.model, active_field):
|
||||
return await self.get_multi(
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
active_field: True
|
||||
},
|
||||
order_by="created_at",
|
||||
order_desc=True
|
||||
)
|
||||
return await self.get_by_tenant_id(tenant_id, skip, limit)
|
||||
|
||||
def _validate_production_data(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
required_fields: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
"""Validate production data with required fields"""
|
||||
errors = []
|
||||
|
||||
# Check required fields
|
||||
for field in required_fields:
|
||||
if field not in data or data[field] is None:
|
||||
errors.append(f"Missing required field: {field}")
|
||||
|
||||
# Validate tenant_id format
|
||||
if "tenant_id" in data:
|
||||
try:
|
||||
import uuid
|
||||
uuid.UUID(str(data["tenant_id"]))
|
||||
except (ValueError, TypeError):
|
||||
errors.append("Invalid tenant_id format")
|
||||
|
||||
# Validate datetime fields
|
||||
datetime_fields = ["planned_start_time", "planned_end_time", "actual_start_time", "actual_end_time"]
|
||||
for field in datetime_fields:
|
||||
if field in data and data[field] is not None:
|
||||
if not isinstance(data[field], (datetime, str)):
|
||||
errors.append(f"Invalid datetime format for {field}")
|
||||
|
||||
# Validate numeric fields
|
||||
numeric_fields = ["planned_quantity", "actual_quantity", "quality_score", "yield_percentage"]
|
||||
for field in numeric_fields:
|
||||
if field in data and data[field] is not None:
|
||||
try:
|
||||
float(data[field])
|
||||
if data[field] < 0:
|
||||
errors.append(f"{field} cannot be negative")
|
||||
except (ValueError, TypeError):
|
||||
errors.append(f"Invalid numeric value for {field}")
|
||||
|
||||
# Validate percentage fields (0-100)
|
||||
percentage_fields = ["yield_percentage", "efficiency_percentage", "utilization_percentage"]
|
||||
for field in percentage_fields:
|
||||
if field in data and data[field] is not None:
|
||||
try:
|
||||
value = float(data[field])
|
||||
if value < 0 or value > 100:
|
||||
errors.append(f"{field} must be between 0 and 100")
|
||||
except (ValueError, TypeError):
|
||||
pass # Already caught by numeric validation
|
||||
|
||||
return {
|
||||
"is_valid": len(errors) == 0,
|
||||
"errors": errors
|
||||
}
|
||||
|
||||
async def get_production_statistics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Dict[str, Any]:
|
||||
"""Get production statistics for a tenant and date range"""
|
||||
try:
|
||||
# Base query for the model
|
||||
start_datetime = datetime.combine(start_date, datetime.min.time())
|
||||
end_datetime = datetime.combine(end_date, datetime.max.time())
|
||||
|
||||
# This would need to be implemented per specific model
|
||||
# For now, return basic count
|
||||
records = await self.get_by_date_range(
|
||||
tenant_id, start_date, end_date, limit=1000
|
||||
)
|
||||
|
||||
return {
|
||||
"total_records": len(records),
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating production statistics",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
raise DatabaseError(f"Failed to calculate statistics: {str(e)}")
|
||||
|
||||
async def check_duplicate(
|
||||
self,
|
||||
tenant_id: str,
|
||||
unique_fields: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Check if a record with the same unique fields exists"""
|
||||
try:
|
||||
filters = {"tenant_id": tenant_id}
|
||||
filters.update(unique_fields)
|
||||
|
||||
existing = await self.get_multi(
|
||||
filters=filters,
|
||||
limit=1
|
||||
)
|
||||
|
||||
return len(existing) > 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error checking for duplicates",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return False
|
||||
386
services/production/app/repositories/equipment_repository.py
Normal file
386
services/production/app/repositories/equipment_repository.py
Normal file
@@ -0,0 +1,386 @@
|
||||
"""
|
||||
Equipment Repository
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy import select, func, and_, text
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from app.repositories.base import ProductionBaseRepository
|
||||
from app.models.production import Equipment, EquipmentStatus, EquipmentType
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class EquipmentRepository(ProductionBaseRepository):
|
||||
"""Repository for equipment operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession):
|
||||
super().__init__(Equipment, session)
|
||||
|
||||
async def get_equipment_filtered(
|
||||
self,
|
||||
filters: Dict[str, Any],
|
||||
page: int = 1,
|
||||
page_size: int = 50
|
||||
) -> List[Equipment]:
|
||||
"""Get equipment list with filters and pagination"""
|
||||
try:
|
||||
# Build base query
|
||||
query = select(Equipment).filter(Equipment.tenant_id == UUID(filters.get("tenant_id")))
|
||||
|
||||
# Apply status filter
|
||||
if "status" in filters and filters["status"]:
|
||||
query = query.filter(Equipment.status == filters["status"])
|
||||
|
||||
# Apply type filter
|
||||
if "type" in filters and filters["type"]:
|
||||
query = query.filter(Equipment.type == filters["type"])
|
||||
|
||||
# Apply active filter
|
||||
if "is_active" in filters and filters["is_active"] is not None:
|
||||
query = query.filter(Equipment.is_active == filters["is_active"])
|
||||
|
||||
# Apply pagination
|
||||
query = query.order_by(Equipment.created_at.desc())
|
||||
query = query.offset((page - 1) * page_size).limit(page_size)
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return list(result.scalars().all())
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting filtered equipment", error=str(e), filters=filters)
|
||||
raise
|
||||
|
||||
async def count_equipment_filtered(self, filters: Dict[str, Any]) -> int:
|
||||
"""Count equipment matching filters"""
|
||||
try:
|
||||
# Build base query
|
||||
query = select(func.count(Equipment.id)).filter(
|
||||
Equipment.tenant_id == UUID(filters.get("tenant_id"))
|
||||
)
|
||||
|
||||
# Apply status filter
|
||||
if "status" in filters and filters["status"]:
|
||||
query = query.filter(Equipment.status == filters["status"])
|
||||
|
||||
# Apply type filter
|
||||
if "type" in filters and filters["type"]:
|
||||
query = query.filter(Equipment.type == filters["type"])
|
||||
|
||||
# Apply active filter
|
||||
if "is_active" in filters and filters["is_active"] is not None:
|
||||
query = query.filter(Equipment.is_active == filters["is_active"])
|
||||
|
||||
result = await self.session.execute(query)
|
||||
return result.scalar() or 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error counting filtered equipment", error=str(e), filters=filters)
|
||||
raise
|
||||
|
||||
async def get_equipment_by_id(self, tenant_id: UUID, equipment_id: UUID) -> Optional[Equipment]:
|
||||
"""Get equipment by ID and tenant"""
|
||||
try:
|
||||
query = select(Equipment).filter(
|
||||
and_(
|
||||
Equipment.id == equipment_id,
|
||||
Equipment.tenant_id == tenant_id
|
||||
)
|
||||
)
|
||||
result = await self.session.execute(query)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting equipment by ID",
|
||||
error=str(e),
|
||||
equipment_id=str(equipment_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def create_equipment(self, equipment_data: Dict[str, Any]) -> Equipment:
|
||||
"""Create new equipment"""
|
||||
try:
|
||||
equipment = Equipment(**equipment_data)
|
||||
self.session.add(equipment)
|
||||
await self.session.flush()
|
||||
await self.session.refresh(equipment)
|
||||
return equipment
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error creating equipment", error=str(e), data=equipment_data)
|
||||
raise
|
||||
|
||||
async def update_equipment(
|
||||
self,
|
||||
equipment_id: UUID,
|
||||
updates: Dict[str, Any]
|
||||
) -> Optional[Equipment]:
|
||||
"""Update equipment"""
|
||||
try:
|
||||
equipment = await self.get(equipment_id)
|
||||
if not equipment:
|
||||
return None
|
||||
|
||||
for key, value in updates.items():
|
||||
if hasattr(equipment, key) and value is not None:
|
||||
setattr(equipment, key, value)
|
||||
|
||||
await self.session.flush()
|
||||
await self.session.refresh(equipment)
|
||||
return equipment
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error updating equipment", error=str(e), equipment_id=str(equipment_id))
|
||||
raise
|
||||
|
||||
async def delete_equipment(self, equipment_id: UUID) -> bool:
|
||||
"""Soft delete equipment (set is_active to False)"""
|
||||
try:
|
||||
equipment = await self.get(equipment_id)
|
||||
if not equipment:
|
||||
return False
|
||||
|
||||
equipment.is_active = False
|
||||
await self.session.flush()
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error deleting equipment", error=str(e), equipment_id=str(equipment_id))
|
||||
raise
|
||||
|
||||
async def hard_delete_equipment(self, equipment_id: UUID) -> bool:
|
||||
"""Permanently delete equipment from database"""
|
||||
try:
|
||||
equipment = await self.get(equipment_id)
|
||||
if not equipment:
|
||||
return False
|
||||
|
||||
await self.session.delete(equipment)
|
||||
await self.session.flush()
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error hard deleting equipment", error=str(e), equipment_id=str(equipment_id))
|
||||
raise
|
||||
|
||||
async def get_equipment_deletion_summary(self, tenant_id: UUID, equipment_id: UUID) -> Dict[str, Any]:
|
||||
"""Get summary of what will be affected by deleting equipment"""
|
||||
try:
|
||||
equipment = await self.get_equipment_by_id(tenant_id, equipment_id)
|
||||
if not equipment:
|
||||
return {
|
||||
"can_delete": False,
|
||||
"warnings": ["Equipment not found"],
|
||||
"production_batches_count": 0,
|
||||
"maintenance_records_count": 0,
|
||||
"temperature_logs_count": 0
|
||||
}
|
||||
|
||||
# Check for related production batches
|
||||
from app.models.production import ProductionBatch
|
||||
batch_query = select(func.count(ProductionBatch.id)).filter(
|
||||
and_(
|
||||
ProductionBatch.tenant_id == tenant_id,
|
||||
ProductionBatch.equipment_id == equipment_id
|
||||
)
|
||||
)
|
||||
batch_result = await self.session.execute(batch_query)
|
||||
batches_count = batch_result.scalar() or 0
|
||||
|
||||
# For now, we'll use placeholder counts for maintenance and temperature logs
|
||||
# These would need to be implemented based on your actual models
|
||||
maintenance_count = 0
|
||||
temperature_logs_count = 0
|
||||
|
||||
warnings = []
|
||||
if batches_count > 0:
|
||||
warnings.append(f"{batches_count} production batch(es) are using this equipment")
|
||||
|
||||
# Equipment can be deleted even with dependencies, but warn the user
|
||||
can_delete = True
|
||||
|
||||
return {
|
||||
"can_delete": can_delete,
|
||||
"warnings": warnings,
|
||||
"production_batches_count": batches_count,
|
||||
"maintenance_records_count": maintenance_count,
|
||||
"temperature_logs_count": temperature_logs_count,
|
||||
"equipment_name": equipment.name,
|
||||
"equipment_type": equipment.type.value,
|
||||
"equipment_location": equipment.location
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting equipment deletion summary",
|
||||
error=str(e),
|
||||
equipment_id=str(equipment_id),
|
||||
tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
# ================================================================
|
||||
# ALERT-RELATED METHODS (migrated from production_alert_repository)
|
||||
# ================================================================
|
||||
|
||||
async def get_equipment_status(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get equipment requiring attention.
|
||||
Returns equipment with maintenance due or status issues.
|
||||
"""
|
||||
try:
|
||||
|
||||
query = text("""
|
||||
SELECT
|
||||
e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days,
|
||||
EXTRACT(DAYS FROM (e.next_maintenance_date - NOW())) as days_to_maintenance,
|
||||
COUNT(ea.id) as active_alerts
|
||||
FROM equipment e
|
||||
LEFT JOIN alerts ea ON ea.equipment_id = e.id
|
||||
AND ea.is_active = true
|
||||
AND ea.is_resolved = false
|
||||
WHERE e.is_active = true
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.id, e.tenant_id, e.name, e.type, e.status,
|
||||
e.efficiency_percentage, e.uptime_percentage,
|
||||
e.last_maintenance_date, e.next_maintenance_date,
|
||||
e.maintenance_interval_days
|
||||
ORDER BY e.next_maintenance_date ASC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get equipment status", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_equipment_needing_maintenance(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get equipment that needs maintenance.
|
||||
Returns equipment where next_maintenance_date has passed.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to filter by
|
||||
"""
|
||||
try:
|
||||
|
||||
query_str = """
|
||||
SELECT
|
||||
e.id, e.name, e.type, e.tenant_id,
|
||||
e.last_maintenance_date,
|
||||
e.next_maintenance_date,
|
||||
EXTRACT(DAY FROM (NOW() - e.next_maintenance_date)) as days_overdue
|
||||
FROM equipment e
|
||||
WHERE e.next_maintenance_date IS NOT NULL
|
||||
AND e.next_maintenance_date < NOW()
|
||||
AND e.status = 'OPERATIONAL'
|
||||
AND e.is_active = true
|
||||
"""
|
||||
|
||||
params = {}
|
||||
if tenant_id:
|
||||
query_str += " AND e.tenant_id = :tenant_id"
|
||||
params["tenant_id"] = tenant_id
|
||||
|
||||
query_str += " ORDER BY e.next_maintenance_date ASC LIMIT 50"
|
||||
|
||||
result = await self.session.execute(text(query_str), params)
|
||||
rows = result.fetchall()
|
||||
|
||||
return [
|
||||
{
|
||||
'id': str(row.id),
|
||||
'name': row.name,
|
||||
'type': row.type,
|
||||
'tenant_id': str(row.tenant_id),
|
||||
'last_maintenance_date': row.last_maintenance_date.isoformat() if row.last_maintenance_date else None,
|
||||
'next_maintenance_date': row.next_maintenance_date.isoformat() if row.next_maintenance_date else None,
|
||||
'days_overdue': int(row.days_overdue) if row.days_overdue else 0
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get equipment needing maintenance", error=str(e))
|
||||
raise
|
||||
|
||||
async def get_efficiency_recommendations(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get production efficiency improvement recommendations.
|
||||
Analyzes production patterns to identify optimization opportunities.
|
||||
"""
|
||||
try:
|
||||
|
||||
query = text("""
|
||||
WITH efficiency_analysis AS (
|
||||
SELECT
|
||||
pb.tenant_id, pb.product_name,
|
||||
AVG(EXTRACT(EPOCH FROM (pb.actual_end_time - pb.actual_start_time)) / 60) as avg_production_time,
|
||||
AVG(pb.planned_duration_minutes) as avg_planned_duration,
|
||||
COUNT(*) as batch_count,
|
||||
AVG(pb.yield_percentage) as avg_yield,
|
||||
EXTRACT(hour FROM pb.actual_start_time) as start_hour
|
||||
FROM production_batches pb
|
||||
WHERE pb.status = 'COMPLETED'
|
||||
AND pb.actual_completion_time > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND pb.tenant_id = :tenant_id
|
||||
GROUP BY pb.tenant_id, pb.product_name, EXTRACT(hour FROM pb.actual_start_time)
|
||||
HAVING COUNT(*) >= 3
|
||||
),
|
||||
recommendations AS (
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN avg_production_time > avg_planned_duration * 1.2 THEN 'reduce_production_time'
|
||||
WHEN avg_yield < 85 THEN 'improve_yield'
|
||||
WHEN start_hour BETWEEN 14 AND 16 AND avg_production_time > avg_planned_duration * 1.1 THEN 'avoid_afternoon_production'
|
||||
ELSE null
|
||||
END as recommendation_type,
|
||||
(avg_production_time - avg_planned_duration) / avg_planned_duration * 100 as efficiency_loss_percent
|
||||
FROM efficiency_analysis
|
||||
)
|
||||
SELECT * FROM recommendations
|
||||
WHERE recommendation_type IS NOT NULL
|
||||
AND efficiency_loss_percent > 10
|
||||
ORDER BY efficiency_loss_percent DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get efficiency recommendations", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
|
||||
async def get_energy_consumption_patterns(self, tenant_id: UUID) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get energy consumption patterns for optimization analysis.
|
||||
Returns consumption by equipment and hour of day.
|
||||
"""
|
||||
try:
|
||||
|
||||
query = text("""
|
||||
SELECT
|
||||
e.tenant_id, e.name as equipment_name, e.type,
|
||||
AVG(ec.energy_consumption_kwh) as avg_energy,
|
||||
EXTRACT(hour FROM ec.recorded_at) as hour_of_day,
|
||||
COUNT(*) as readings_count
|
||||
FROM equipment e
|
||||
JOIN energy_consumption ec ON ec.equipment_id = e.id
|
||||
WHERE ec.recorded_at > CURRENT_DATE - INTERVAL '30 days'
|
||||
AND e.tenant_id = :tenant_id
|
||||
GROUP BY e.tenant_id, e.id, e.name, e.type, EXTRACT(hour FROM ec.recorded_at)
|
||||
HAVING COUNT(*) >= 10
|
||||
ORDER BY avg_energy DESC
|
||||
""")
|
||||
|
||||
result = await self.session.execute(query, {"tenant_id": tenant_id})
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get energy consumption patterns", error=str(e), tenant_id=str(tenant_id))
|
||||
raise
|
||||
1011
services/production/app/repositories/production_batch_repository.py
Normal file
1011
services/production/app/repositories/production_batch_repository.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,402 @@
|
||||
"""
|
||||
Production Capacity Repository
|
||||
Repository for production capacity operations
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, and_, text, desc, func
|
||||
from datetime import datetime, timedelta, date
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from .base import ProductionBaseRepository
|
||||
from app.models.production import ProductionCapacity
|
||||
from shared.database.exceptions import DatabaseError, ValidationError
|
||||
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionCapacityRepository(ProductionBaseRepository):
|
||||
"""Repository for production capacity operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession, cache_ttl: Optional[int] = 600):
|
||||
# Capacity data changes moderately, medium cache time (10 minutes)
|
||||
super().__init__(ProductionCapacity, session, cache_ttl)
|
||||
|
||||
async def create_capacity(self, capacity_data: Dict[str, Any]) -> ProductionCapacity:
|
||||
"""Create a new production capacity entry with validation"""
|
||||
try:
|
||||
# Validate capacity data
|
||||
validation_result = self._validate_production_data(
|
||||
capacity_data,
|
||||
["tenant_id", "resource_type", "resource_id", "resource_name",
|
||||
"date", "start_time", "end_time", "total_capacity_units"]
|
||||
)
|
||||
|
||||
if not validation_result["is_valid"]:
|
||||
raise ValidationError(f"Invalid capacity data: {validation_result['errors']}")
|
||||
|
||||
# Set default values
|
||||
if "allocated_capacity_units" not in capacity_data:
|
||||
capacity_data["allocated_capacity_units"] = 0.0
|
||||
if "remaining_capacity_units" not in capacity_data:
|
||||
capacity_data["remaining_capacity_units"] = capacity_data["total_capacity_units"]
|
||||
if "is_available" not in capacity_data:
|
||||
capacity_data["is_available"] = True
|
||||
if "is_maintenance" not in capacity_data:
|
||||
capacity_data["is_maintenance"] = False
|
||||
if "is_reserved" not in capacity_data:
|
||||
capacity_data["is_reserved"] = False
|
||||
|
||||
# Create capacity entry
|
||||
capacity = await self.create(capacity_data)
|
||||
|
||||
logger.info("Production capacity created successfully",
|
||||
capacity_id=str(capacity.id),
|
||||
resource_type=capacity.resource_type,
|
||||
resource_id=capacity.resource_id,
|
||||
tenant_id=str(capacity.tenant_id))
|
||||
|
||||
return capacity
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error creating production capacity", error=str(e))
|
||||
raise DatabaseError(f"Failed to create production capacity: {str(e)}")
|
||||
|
||||
async def get_capacity_by_resource(
|
||||
self,
|
||||
tenant_id: str,
|
||||
resource_id: str,
|
||||
date_filter: Optional[date] = None
|
||||
) -> List[ProductionCapacity]:
|
||||
"""Get capacity entries for a specific resource"""
|
||||
try:
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
"resource_id": resource_id
|
||||
}
|
||||
|
||||
if date_filter:
|
||||
filters["date"] = date_filter
|
||||
|
||||
capacities = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="start_time"
|
||||
)
|
||||
|
||||
logger.info("Retrieved capacity by resource",
|
||||
count=len(capacities),
|
||||
resource_id=resource_id,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return capacities
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching capacity by resource", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch capacity by resource: {str(e)}")
|
||||
|
||||
async def get_available_capacity(
|
||||
self,
|
||||
tenant_id: str,
|
||||
resource_type: str,
|
||||
target_date: date,
|
||||
required_capacity: float
|
||||
) -> List[ProductionCapacity]:
|
||||
"""Get available capacity for a specific date and capacity requirement"""
|
||||
try:
|
||||
capacities = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"resource_type": resource_type,
|
||||
"date": target_date,
|
||||
"is_available": True,
|
||||
"is_maintenance": False,
|
||||
"remaining_capacity_units__gte": required_capacity
|
||||
},
|
||||
order_by="remaining_capacity_units",
|
||||
order_desc=True
|
||||
)
|
||||
|
||||
logger.info("Retrieved available capacity",
|
||||
count=len(capacities),
|
||||
resource_type=resource_type,
|
||||
required_capacity=required_capacity,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return capacities
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching available capacity", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch available capacity: {str(e)}")
|
||||
|
||||
async def allocate_capacity(
|
||||
self,
|
||||
capacity_id: UUID,
|
||||
allocation_amount: float,
|
||||
allocation_notes: Optional[str] = None
|
||||
) -> ProductionCapacity:
|
||||
"""Allocate capacity units from a capacity entry"""
|
||||
try:
|
||||
capacity = await self.get(capacity_id)
|
||||
if not capacity:
|
||||
raise ValidationError(f"Capacity {capacity_id} not found")
|
||||
|
||||
if allocation_amount > capacity.remaining_capacity_units:
|
||||
raise ValidationError(
|
||||
f"Insufficient capacity: requested {allocation_amount}, "
|
||||
f"available {capacity.remaining_capacity_units}"
|
||||
)
|
||||
|
||||
new_allocated = capacity.allocated_capacity_units + allocation_amount
|
||||
new_remaining = capacity.remaining_capacity_units - allocation_amount
|
||||
|
||||
update_data = {
|
||||
"allocated_capacity_units": new_allocated,
|
||||
"remaining_capacity_units": new_remaining,
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
if allocation_notes:
|
||||
current_notes = capacity.notes or ""
|
||||
update_data["notes"] = f"{current_notes}\n{allocation_notes}".strip()
|
||||
|
||||
capacity = await self.update(capacity_id, update_data)
|
||||
|
||||
logger.info("Allocated capacity",
|
||||
capacity_id=str(capacity_id),
|
||||
allocation_amount=allocation_amount,
|
||||
remaining_capacity=new_remaining)
|
||||
|
||||
return capacity
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error allocating capacity", error=str(e))
|
||||
raise DatabaseError(f"Failed to allocate capacity: {str(e)}")
|
||||
|
||||
async def release_capacity(
|
||||
self,
|
||||
capacity_id: UUID,
|
||||
release_amount: float,
|
||||
release_notes: Optional[str] = None
|
||||
) -> ProductionCapacity:
|
||||
"""Release allocated capacity units back to a capacity entry"""
|
||||
try:
|
||||
capacity = await self.get(capacity_id)
|
||||
if not capacity:
|
||||
raise ValidationError(f"Capacity {capacity_id} not found")
|
||||
|
||||
if release_amount > capacity.allocated_capacity_units:
|
||||
raise ValidationError(
|
||||
f"Cannot release more than allocated: requested {release_amount}, "
|
||||
f"allocated {capacity.allocated_capacity_units}"
|
||||
)
|
||||
|
||||
new_allocated = capacity.allocated_capacity_units - release_amount
|
||||
new_remaining = capacity.remaining_capacity_units + release_amount
|
||||
|
||||
update_data = {
|
||||
"allocated_capacity_units": new_allocated,
|
||||
"remaining_capacity_units": new_remaining,
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
if release_notes:
|
||||
current_notes = capacity.notes or ""
|
||||
update_data["notes"] = f"{current_notes}\n{release_notes}".strip()
|
||||
|
||||
capacity = await self.update(capacity_id, update_data)
|
||||
|
||||
logger.info("Released capacity",
|
||||
capacity_id=str(capacity_id),
|
||||
release_amount=release_amount,
|
||||
remaining_capacity=new_remaining)
|
||||
|
||||
return capacity
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error releasing capacity", error=str(e))
|
||||
raise DatabaseError(f"Failed to release capacity: {str(e)}")
|
||||
|
||||
async def get_capacity_utilization_summary(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date,
|
||||
resource_type: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Get capacity utilization summary for a date range"""
|
||||
try:
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
"date__gte": start_date,
|
||||
"date__lte": end_date
|
||||
}
|
||||
|
||||
if resource_type:
|
||||
filters["resource_type"] = resource_type
|
||||
|
||||
capacities = await self.get_multi(filters=filters)
|
||||
|
||||
total_capacity = sum(c.total_capacity_units for c in capacities)
|
||||
total_allocated = sum(c.allocated_capacity_units for c in capacities)
|
||||
total_available = sum(c.remaining_capacity_units for c in capacities)
|
||||
|
||||
# Group by resource type
|
||||
by_resource_type = {}
|
||||
for capacity in capacities:
|
||||
rt = capacity.resource_type
|
||||
if rt not in by_resource_type:
|
||||
by_resource_type[rt] = {
|
||||
"total_capacity": 0,
|
||||
"allocated_capacity": 0,
|
||||
"available_capacity": 0,
|
||||
"resource_count": 0
|
||||
}
|
||||
|
||||
by_resource_type[rt]["total_capacity"] += capacity.total_capacity_units
|
||||
by_resource_type[rt]["allocated_capacity"] += capacity.allocated_capacity_units
|
||||
by_resource_type[rt]["available_capacity"] += capacity.remaining_capacity_units
|
||||
by_resource_type[rt]["resource_count"] += 1
|
||||
|
||||
# Calculate utilization percentages
|
||||
for rt_data in by_resource_type.values():
|
||||
if rt_data["total_capacity"] > 0:
|
||||
rt_data["utilization_percentage"] = round(
|
||||
(rt_data["allocated_capacity"] / rt_data["total_capacity"]) * 100, 2
|
||||
)
|
||||
else:
|
||||
rt_data["utilization_percentage"] = 0
|
||||
|
||||
return {
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"total_capacity_units": total_capacity,
|
||||
"total_allocated_units": total_allocated,
|
||||
"total_available_units": total_available,
|
||||
"overall_utilization_percentage": round(
|
||||
(total_allocated / total_capacity * 100) if total_capacity > 0 else 0, 2
|
||||
),
|
||||
"by_resource_type": by_resource_type,
|
||||
"total_resources": len(capacities),
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating capacity utilization summary", error=str(e))
|
||||
raise DatabaseError(f"Failed to calculate capacity utilization summary: {str(e)}")
|
||||
|
||||
async def set_maintenance_mode(
|
||||
self,
|
||||
capacity_id: UUID,
|
||||
is_maintenance: bool,
|
||||
maintenance_notes: Optional[str] = None
|
||||
) -> ProductionCapacity:
|
||||
"""Set maintenance mode for a capacity entry"""
|
||||
try:
|
||||
capacity = await self.get(capacity_id)
|
||||
if not capacity:
|
||||
raise ValidationError(f"Capacity {capacity_id} not found")
|
||||
|
||||
update_data = {
|
||||
"is_maintenance": is_maintenance,
|
||||
"is_available": not is_maintenance, # Not available when in maintenance
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
if is_maintenance:
|
||||
update_data["maintenance_status"] = "in_maintenance"
|
||||
if maintenance_notes:
|
||||
update_data["notes"] = maintenance_notes
|
||||
else:
|
||||
update_data["maintenance_status"] = "operational"
|
||||
update_data["last_maintenance_date"] = datetime.utcnow()
|
||||
|
||||
capacity = await self.update(capacity_id, update_data)
|
||||
|
||||
logger.info("Set maintenance mode",
|
||||
capacity_id=str(capacity_id),
|
||||
is_maintenance=is_maintenance)
|
||||
|
||||
return capacity
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error setting maintenance mode", error=str(e))
|
||||
raise DatabaseError(f"Failed to set maintenance mode: {str(e)}")
|
||||
|
||||
async def get_capacity_with_filters(
|
||||
self,
|
||||
tenant_id: str,
|
||||
resource_type: Optional[str] = None,
|
||||
date_filter: Optional[date] = None,
|
||||
availability: Optional[bool] = None,
|
||||
page: int = 1,
|
||||
page_size: int = 50
|
||||
) -> tuple[List[ProductionCapacity], int]:
|
||||
"""Get production capacity with filters and pagination"""
|
||||
try:
|
||||
filters = {"tenant_id": tenant_id}
|
||||
|
||||
if resource_type:
|
||||
filters["resource_type"] = resource_type
|
||||
if date_filter:
|
||||
filters["date"] = date_filter
|
||||
if availability is not None:
|
||||
filters["is_available"] = availability
|
||||
|
||||
# Get total count
|
||||
total_count = await self.count(filters)
|
||||
|
||||
# Get paginated results
|
||||
offset = (page - 1) * page_size
|
||||
capacities = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="date",
|
||||
order_desc=True,
|
||||
limit=page_size,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
logger.info("Retrieved capacity with filters",
|
||||
count=len(capacities),
|
||||
total_count=total_count,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return capacities, total_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching capacity with filters", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch capacity with filters: {str(e)}")
|
||||
|
||||
async def get_capacity_by_date(self, tenant_id: str, target_date: date) -> List[ProductionCapacity]:
|
||||
"""Get all capacity entries for a specific date"""
|
||||
try:
|
||||
capacities = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"date": target_date
|
||||
},
|
||||
order_by="start_time"
|
||||
)
|
||||
|
||||
logger.info("Retrieved capacity by date",
|
||||
count=len(capacities),
|
||||
date=target_date.isoformat(),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return capacities
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching capacity by date", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch capacity by date: {str(e)}")
|
||||
@@ -0,0 +1,425 @@
|
||||
"""
|
||||
Production Schedule Repository
|
||||
Repository for production schedule operations
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, and_, text, desc, func
|
||||
from datetime import datetime, timedelta, date
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from .base import ProductionBaseRepository
|
||||
from app.models.production import ProductionSchedule
|
||||
from shared.database.exceptions import DatabaseError, ValidationError
|
||||
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class ProductionScheduleRepository(ProductionBaseRepository):
|
||||
"""Repository for production schedule operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession, cache_ttl: Optional[int] = 600):
|
||||
# Schedules are more stable, medium cache time (10 minutes)
|
||||
super().__init__(ProductionSchedule, session, cache_ttl)
|
||||
|
||||
async def create_schedule(self, schedule_data: Dict[str, Any]) -> ProductionSchedule:
|
||||
"""Create a new production schedule with validation"""
|
||||
try:
|
||||
# Validate schedule data
|
||||
validation_result = self._validate_production_data(
|
||||
schedule_data,
|
||||
["tenant_id", "schedule_date", "shift_start", "shift_end",
|
||||
"total_capacity_hours", "planned_capacity_hours", "staff_count"]
|
||||
)
|
||||
|
||||
if not validation_result["is_valid"]:
|
||||
raise ValidationError(f"Invalid schedule data: {validation_result['errors']}")
|
||||
|
||||
# Set default values
|
||||
if "is_finalized" not in schedule_data:
|
||||
schedule_data["is_finalized"] = False
|
||||
if "is_active" not in schedule_data:
|
||||
schedule_data["is_active"] = True
|
||||
if "overtime_hours" not in schedule_data:
|
||||
schedule_data["overtime_hours"] = 0.0
|
||||
|
||||
# Validate date uniqueness
|
||||
existing_schedule = await self.get_schedule_by_date(
|
||||
schedule_data["tenant_id"],
|
||||
schedule_data["schedule_date"]
|
||||
)
|
||||
if existing_schedule:
|
||||
raise ValidationError(f"Schedule for date {schedule_data['schedule_date']} already exists")
|
||||
|
||||
# Create schedule
|
||||
schedule = await self.create(schedule_data)
|
||||
|
||||
logger.info("Production schedule created successfully",
|
||||
schedule_id=str(schedule.id),
|
||||
schedule_date=schedule.schedule_date.isoformat(),
|
||||
tenant_id=str(schedule.tenant_id))
|
||||
|
||||
return schedule
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error creating production schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to create production schedule: {str(e)}")
|
||||
|
||||
async def get_schedule_by_date(
|
||||
self,
|
||||
tenant_id: str,
|
||||
schedule_date: date
|
||||
) -> Optional[ProductionSchedule]:
|
||||
"""Get production schedule for a specific date"""
|
||||
try:
|
||||
schedules = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"schedule_date": schedule_date
|
||||
},
|
||||
limit=1
|
||||
)
|
||||
|
||||
schedule = schedules[0] if schedules else None
|
||||
|
||||
if schedule:
|
||||
logger.info("Retrieved production schedule by date",
|
||||
schedule_id=str(schedule.id),
|
||||
schedule_date=schedule_date.isoformat(),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return schedule
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching schedule by date", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch schedule by date: {str(e)}")
|
||||
|
||||
async def get_schedules_by_date_range(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> List[ProductionSchedule]:
|
||||
"""Get production schedules within a date range"""
|
||||
try:
|
||||
schedules = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"schedule_date__gte": start_date,
|
||||
"schedule_date__lte": end_date
|
||||
},
|
||||
order_by="schedule_date"
|
||||
)
|
||||
|
||||
logger.info("Retrieved schedules by date range",
|
||||
count=len(schedules),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat(),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return schedules
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching schedules by date range", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch schedules by date range: {str(e)}")
|
||||
|
||||
async def get_active_schedules(self, tenant_id: str) -> List[ProductionSchedule]:
|
||||
"""Get active production schedules for a tenant"""
|
||||
try:
|
||||
schedules = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"is_active": True
|
||||
},
|
||||
order_by="schedule_date"
|
||||
)
|
||||
|
||||
logger.info("Retrieved active production schedules",
|
||||
count=len(schedules),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return schedules
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching active schedules", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch active schedules: {str(e)}")
|
||||
|
||||
async def finalize_schedule(
|
||||
self,
|
||||
schedule_id: UUID,
|
||||
finalized_by: str
|
||||
) -> ProductionSchedule:
|
||||
"""Finalize a production schedule"""
|
||||
try:
|
||||
schedule = await self.get(schedule_id)
|
||||
if not schedule:
|
||||
raise ValidationError(f"Schedule {schedule_id} not found")
|
||||
|
||||
if schedule.is_finalized:
|
||||
raise ValidationError("Schedule is already finalized")
|
||||
|
||||
update_data = {
|
||||
"is_finalized": True,
|
||||
"finalized_at": datetime.utcnow(),
|
||||
"updated_at": datetime.utcnow()
|
||||
}
|
||||
|
||||
schedule = await self.update(schedule_id, update_data)
|
||||
|
||||
logger.info("Production schedule finalized",
|
||||
schedule_id=str(schedule_id),
|
||||
finalized_by=finalized_by)
|
||||
|
||||
return schedule
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error finalizing schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to finalize schedule: {str(e)}")
|
||||
|
||||
async def update_schedule_metrics(
|
||||
self,
|
||||
schedule_id: UUID,
|
||||
metrics: Dict[str, Any]
|
||||
) -> ProductionSchedule:
|
||||
"""Update production schedule metrics"""
|
||||
try:
|
||||
schedule = await self.get(schedule_id)
|
||||
if not schedule:
|
||||
raise ValidationError(f"Schedule {schedule_id} not found")
|
||||
|
||||
# Validate metrics
|
||||
valid_metrics = [
|
||||
"actual_capacity_hours", "total_batches_completed",
|
||||
"total_quantity_produced", "efficiency_percentage",
|
||||
"utilization_percentage", "on_time_completion_rate"
|
||||
]
|
||||
|
||||
update_data = {"updated_at": datetime.utcnow()}
|
||||
|
||||
for metric, value in metrics.items():
|
||||
if metric in valid_metrics:
|
||||
update_data[metric] = value
|
||||
|
||||
schedule = await self.update(schedule_id, update_data)
|
||||
|
||||
logger.info("Updated schedule metrics",
|
||||
schedule_id=str(schedule_id),
|
||||
metrics=list(metrics.keys()))
|
||||
|
||||
return schedule
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error updating schedule metrics", error=str(e))
|
||||
raise DatabaseError(f"Failed to update schedule metrics: {str(e)}")
|
||||
|
||||
async def get_schedule_performance_summary(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Dict[str, Any]:
|
||||
"""Get schedule performance summary for a date range"""
|
||||
try:
|
||||
schedules = await self.get_schedules_by_date_range(tenant_id, start_date, end_date)
|
||||
|
||||
total_schedules = len(schedules)
|
||||
finalized_schedules = len([s for s in schedules if s.is_finalized])
|
||||
|
||||
# Calculate averages
|
||||
total_planned_hours = sum(s.planned_capacity_hours for s in schedules)
|
||||
total_actual_hours = sum(s.actual_capacity_hours or 0 for s in schedules)
|
||||
total_overtime = sum(s.overtime_hours or 0 for s in schedules)
|
||||
|
||||
# Calculate efficiency metrics
|
||||
schedules_with_efficiency = [s for s in schedules if s.efficiency_percentage is not None]
|
||||
avg_efficiency = (
|
||||
sum(s.efficiency_percentage for s in schedules_with_efficiency) / len(schedules_with_efficiency)
|
||||
if schedules_with_efficiency else 0
|
||||
)
|
||||
|
||||
schedules_with_utilization = [s for s in schedules if s.utilization_percentage is not None]
|
||||
avg_utilization = (
|
||||
sum(s.utilization_percentage for s in schedules_with_utilization) / len(schedules_with_utilization)
|
||||
if schedules_with_utilization else 0
|
||||
)
|
||||
|
||||
return {
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"total_schedules": total_schedules,
|
||||
"finalized_schedules": finalized_schedules,
|
||||
"finalization_rate": (finalized_schedules / total_schedules * 100) if total_schedules > 0 else 0,
|
||||
"total_planned_hours": total_planned_hours,
|
||||
"total_actual_hours": total_actual_hours,
|
||||
"total_overtime_hours": total_overtime,
|
||||
"capacity_utilization": (total_actual_hours / total_planned_hours * 100) if total_planned_hours > 0 else 0,
|
||||
"average_efficiency_percentage": round(avg_efficiency, 2),
|
||||
"average_utilization_percentage": round(avg_utilization, 2),
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating schedule performance summary", error=str(e))
|
||||
raise DatabaseError(f"Failed to calculate schedule performance summary: {str(e)}")
|
||||
|
||||
async def get_schedules_with_filters(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None,
|
||||
is_finalized: Optional[bool] = None,
|
||||
page: int = 1,
|
||||
page_size: int = 50
|
||||
) -> tuple[List[ProductionSchedule], int]:
|
||||
"""Get production schedules with filters and pagination"""
|
||||
try:
|
||||
filters = {"tenant_id": tenant_id}
|
||||
|
||||
if start_date:
|
||||
filters["schedule_date__gte"] = start_date
|
||||
if end_date:
|
||||
filters["schedule_date__lte"] = end_date
|
||||
if is_finalized is not None:
|
||||
filters["is_finalized"] = is_finalized
|
||||
|
||||
# Get total count
|
||||
total_count = await self.count(filters)
|
||||
|
||||
# Get paginated results
|
||||
offset = (page - 1) * page_size
|
||||
schedules = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="schedule_date",
|
||||
order_desc=True,
|
||||
limit=page_size,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
logger.info("Retrieved schedules with filters",
|
||||
count=len(schedules),
|
||||
total_count=total_count,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return schedules, total_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching schedules with filters", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch schedules with filters: {str(e)}")
|
||||
|
||||
async def update_schedule(self, schedule_id: UUID, update_data: Dict[str, Any]) -> ProductionSchedule:
|
||||
"""Update a production schedule"""
|
||||
try:
|
||||
schedule = await self.get(schedule_id)
|
||||
if not schedule:
|
||||
raise ValidationError(f"Schedule {schedule_id} not found")
|
||||
|
||||
# Add updated timestamp
|
||||
update_data["updated_at"] = datetime.utcnow()
|
||||
|
||||
# Update the schedule
|
||||
schedule = await self.update(schedule_id, update_data)
|
||||
|
||||
logger.info("Updated production schedule",
|
||||
schedule_id=str(schedule_id),
|
||||
update_fields=list(update_data.keys()))
|
||||
|
||||
return schedule
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error updating production schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to update production schedule: {str(e)}")
|
||||
|
||||
async def delete_schedule(self, schedule_id: UUID) -> bool:
|
||||
"""Delete a production schedule"""
|
||||
try:
|
||||
schedule = await self.get(schedule_id)
|
||||
if not schedule:
|
||||
raise ValidationError(f"Schedule {schedule_id} not found")
|
||||
|
||||
# Check if schedule can be deleted (not finalized)
|
||||
if schedule.is_finalized:
|
||||
raise ValidationError("Cannot delete finalized schedule")
|
||||
|
||||
success = await self.delete(schedule_id)
|
||||
|
||||
logger.info("Deleted production schedule",
|
||||
schedule_id=str(schedule_id),
|
||||
success=success)
|
||||
|
||||
return success
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error deleting production schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to delete production schedule: {str(e)}")
|
||||
|
||||
async def get_todays_schedule(self, tenant_id: str) -> Optional[ProductionSchedule]:
|
||||
"""Get today's production schedule for a tenant"""
|
||||
try:
|
||||
today = datetime.utcnow().date()
|
||||
return await self.get_schedule_by_date(tenant_id, today)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching today's schedule", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch today's schedule: {str(e)}")
|
||||
|
||||
async def get_all_schedules_for_tenant(self, tenant_id: UUID) -> List[ProductionSchedule]:
|
||||
"""Get all production schedules for a specific tenant"""
|
||||
try:
|
||||
from sqlalchemy import select
|
||||
from app.models.production import ProductionSchedule
|
||||
|
||||
result = await self.session.execute(
|
||||
select(ProductionSchedule).where(
|
||||
ProductionSchedule.tenant_id == tenant_id
|
||||
)
|
||||
)
|
||||
schedules = result.scalars().all()
|
||||
|
||||
logger.info("Retrieved all schedules for tenant",
|
||||
tenant_id=str(tenant_id),
|
||||
count=len(schedules))
|
||||
|
||||
return list(schedules)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching all tenant schedules", error=str(e), tenant_id=str(tenant_id))
|
||||
raise DatabaseError(f"Failed to fetch all tenant schedules: {str(e)}")
|
||||
|
||||
async def archive_schedule(self, schedule: ProductionSchedule) -> None:
|
||||
"""Archive a production schedule"""
|
||||
try:
|
||||
schedule.archived = True
|
||||
await self.session.commit()
|
||||
logger.info("Archived schedule", schedule_id=str(schedule.id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error archiving schedule", error=str(e), schedule_id=str(schedule.id))
|
||||
raise DatabaseError(f"Failed to archive schedule: {str(e)}")
|
||||
|
||||
async def cancel_schedule(self, schedule: ProductionSchedule, reason: str = None) -> None:
|
||||
"""Cancel a production schedule"""
|
||||
try:
|
||||
schedule.status = "cancelled"
|
||||
if reason:
|
||||
schedule.notes = (schedule.notes or "") + f"\n{reason}"
|
||||
await self.session.commit()
|
||||
logger.info("Cancelled schedule", schedule_id=str(schedule.id))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error cancelling schedule", error=str(e), schedule_id=str(schedule.id))
|
||||
raise DatabaseError(f"Failed to cancel schedule: {str(e)}")
|
||||
441
services/production/app/repositories/quality_check_repository.py
Normal file
441
services/production/app/repositories/quality_check_repository.py
Normal file
@@ -0,0 +1,441 @@
|
||||
"""
|
||||
Quality Check Repository
|
||||
Repository for quality check operations
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, and_, text, desc, func
|
||||
from datetime import datetime, timedelta, date
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from .base import ProductionBaseRepository
|
||||
from app.models.production import QualityCheck
|
||||
from shared.database.exceptions import DatabaseError, ValidationError
|
||||
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class QualityCheckRepository(ProductionBaseRepository):
|
||||
"""Repository for quality check operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession, cache_ttl: Optional[int] = 300):
|
||||
# Quality checks are dynamic, short cache time (5 minutes)
|
||||
super().__init__(QualityCheck, session, cache_ttl)
|
||||
|
||||
async def create_quality_check(self, check_data: Dict[str, Any]) -> QualityCheck:
|
||||
"""Create a new quality check with validation"""
|
||||
try:
|
||||
# Validate check data
|
||||
validation_result = self._validate_production_data(
|
||||
check_data,
|
||||
["tenant_id", "batch_id", "check_type", "check_time",
|
||||
"quality_score", "pass_fail"]
|
||||
)
|
||||
|
||||
if not validation_result["is_valid"]:
|
||||
raise ValidationError(f"Invalid quality check data: {validation_result['errors']}")
|
||||
|
||||
# Validate quality score range (1-10)
|
||||
if check_data.get("quality_score"):
|
||||
score = float(check_data["quality_score"])
|
||||
if score < 1 or score > 10:
|
||||
raise ValidationError("Quality score must be between 1 and 10")
|
||||
|
||||
# Set default values
|
||||
if "defect_count" not in check_data:
|
||||
check_data["defect_count"] = 0
|
||||
if "corrective_action_needed" not in check_data:
|
||||
check_data["corrective_action_needed"] = False
|
||||
|
||||
# Create quality check
|
||||
quality_check = await self.create(check_data)
|
||||
|
||||
logger.info("Quality check created successfully",
|
||||
check_id=str(quality_check.id),
|
||||
batch_id=str(quality_check.batch_id),
|
||||
check_type=quality_check.check_type,
|
||||
quality_score=quality_check.quality_score,
|
||||
tenant_id=str(quality_check.tenant_id))
|
||||
|
||||
return quality_check
|
||||
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error creating quality check", error=str(e))
|
||||
raise DatabaseError(f"Failed to create quality check: {str(e)}")
|
||||
|
||||
async def get_checks_by_batch(
|
||||
self,
|
||||
tenant_id: str,
|
||||
batch_id: str
|
||||
) -> List[QualityCheck]:
|
||||
"""Get all quality checks for a specific batch"""
|
||||
try:
|
||||
checks = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"batch_id": batch_id
|
||||
},
|
||||
order_by="check_time"
|
||||
)
|
||||
|
||||
logger.info("Retrieved quality checks by batch",
|
||||
count=len(checks),
|
||||
batch_id=batch_id,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return checks
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching quality checks by batch", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch quality checks by batch: {str(e)}")
|
||||
|
||||
async def get_checks_by_date_range(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date,
|
||||
check_type: Optional[str] = None
|
||||
) -> List[QualityCheck]:
|
||||
"""Get quality checks within a date range"""
|
||||
try:
|
||||
start_datetime = datetime.combine(start_date, datetime.min.time())
|
||||
end_datetime = datetime.combine(end_date, datetime.max.time())
|
||||
|
||||
filters = {
|
||||
"tenant_id": tenant_id,
|
||||
"check_time__gte": start_datetime,
|
||||
"check_time__lte": end_datetime
|
||||
}
|
||||
|
||||
if check_type:
|
||||
filters["check_type"] = check_type
|
||||
|
||||
checks = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="check_time",
|
||||
order_desc=True
|
||||
)
|
||||
|
||||
logger.info("Retrieved quality checks by date range",
|
||||
count=len(checks),
|
||||
start_date=start_date.isoformat(),
|
||||
end_date=end_date.isoformat(),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return checks
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching quality checks by date range", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch quality checks by date range: {str(e)}")
|
||||
|
||||
async def get_failed_checks(
|
||||
self,
|
||||
tenant_id: str,
|
||||
days_back: int = 7
|
||||
) -> List[QualityCheck]:
|
||||
"""Get failed quality checks from the last N days"""
|
||||
try:
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days_back)
|
||||
|
||||
checks = await self.get_multi(
|
||||
filters={
|
||||
"tenant_id": tenant_id,
|
||||
"pass_fail": False,
|
||||
"check_time__gte": cutoff_date
|
||||
},
|
||||
order_by="check_time",
|
||||
order_desc=True
|
||||
)
|
||||
|
||||
logger.info("Retrieved failed quality checks",
|
||||
count=len(checks),
|
||||
days_back=days_back,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return checks
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching failed quality checks", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch failed quality checks: {str(e)}")
|
||||
|
||||
async def get_quality_metrics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
start_date: date,
|
||||
end_date: date
|
||||
) -> Dict[str, Any]:
|
||||
"""Get quality metrics for a tenant and date range"""
|
||||
try:
|
||||
checks = await self.get_checks_by_date_range(tenant_id, start_date, end_date)
|
||||
|
||||
total_checks = len(checks)
|
||||
passed_checks = len([c for c in checks if c.pass_fail])
|
||||
failed_checks = total_checks - passed_checks
|
||||
|
||||
# Calculate average quality score
|
||||
quality_scores = [c.quality_score for c in checks if c.quality_score is not None]
|
||||
avg_quality_score = sum(quality_scores) / len(quality_scores) if quality_scores else 0
|
||||
|
||||
# Calculate defect rate
|
||||
total_defects = sum(c.defect_count for c in checks)
|
||||
avg_defects_per_check = total_defects / total_checks if total_checks > 0 else 0
|
||||
|
||||
# Group by check type
|
||||
by_check_type = {}
|
||||
for check in checks:
|
||||
check_type = check.check_type
|
||||
if check_type not in by_check_type:
|
||||
by_check_type[check_type] = {
|
||||
"total_checks": 0,
|
||||
"passed_checks": 0,
|
||||
"failed_checks": 0,
|
||||
"avg_quality_score": 0,
|
||||
"total_defects": 0
|
||||
}
|
||||
|
||||
by_check_type[check_type]["total_checks"] += 1
|
||||
if check.pass_fail:
|
||||
by_check_type[check_type]["passed_checks"] += 1
|
||||
else:
|
||||
by_check_type[check_type]["failed_checks"] += 1
|
||||
by_check_type[check_type]["total_defects"] += check.defect_count
|
||||
|
||||
# Calculate pass rates by check type
|
||||
for type_data in by_check_type.values():
|
||||
if type_data["total_checks"] > 0:
|
||||
type_data["pass_rate"] = round(
|
||||
(type_data["passed_checks"] / type_data["total_checks"]) * 100, 2
|
||||
)
|
||||
else:
|
||||
type_data["pass_rate"] = 0
|
||||
|
||||
type_scores = [c.quality_score for c in checks
|
||||
if c.check_type == check_type and c.quality_score is not None]
|
||||
type_data["avg_quality_score"] = round(
|
||||
sum(type_scores) / len(type_scores) if type_scores else 0, 2
|
||||
)
|
||||
|
||||
# Identify trends
|
||||
checks_needing_action = len([c for c in checks if c.corrective_action_needed])
|
||||
|
||||
return {
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"total_checks": total_checks,
|
||||
"passed_checks": passed_checks,
|
||||
"failed_checks": failed_checks,
|
||||
"pass_rate_percentage": round((passed_checks / total_checks * 100) if total_checks > 0 else 0, 2),
|
||||
"average_quality_score": round(avg_quality_score, 2),
|
||||
"total_defects": total_defects,
|
||||
"average_defects_per_check": round(avg_defects_per_check, 2),
|
||||
"checks_needing_corrective_action": checks_needing_action,
|
||||
"by_check_type": by_check_type,
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating quality metrics", error=str(e))
|
||||
raise DatabaseError(f"Failed to calculate quality metrics: {str(e)}")
|
||||
|
||||
async def get_quality_trends(
|
||||
self,
|
||||
tenant_id: str,
|
||||
check_type: str,
|
||||
days_back: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
"""Get quality trends for a specific check type"""
|
||||
try:
|
||||
end_date = datetime.utcnow().date()
|
||||
start_date = end_date - timedelta(days=days_back)
|
||||
|
||||
checks = await self.get_checks_by_date_range(
|
||||
tenant_id, start_date, end_date, check_type
|
||||
)
|
||||
|
||||
# Group by date
|
||||
daily_metrics = {}
|
||||
for check in checks:
|
||||
check_date = check.check_time.date()
|
||||
if check_date not in daily_metrics:
|
||||
daily_metrics[check_date] = {
|
||||
"total_checks": 0,
|
||||
"passed_checks": 0,
|
||||
"quality_scores": [],
|
||||
"defect_count": 0
|
||||
}
|
||||
|
||||
daily_metrics[check_date]["total_checks"] += 1
|
||||
if check.pass_fail:
|
||||
daily_metrics[check_date]["passed_checks"] += 1
|
||||
if check.quality_score is not None:
|
||||
daily_metrics[check_date]["quality_scores"].append(check.quality_score)
|
||||
daily_metrics[check_date]["defect_count"] += check.defect_count
|
||||
|
||||
# Calculate daily pass rates and averages
|
||||
trend_data = []
|
||||
for date_key, metrics in sorted(daily_metrics.items()):
|
||||
pass_rate = (metrics["passed_checks"] / metrics["total_checks"] * 100) if metrics["total_checks"] > 0 else 0
|
||||
avg_score = sum(metrics["quality_scores"]) / len(metrics["quality_scores"]) if metrics["quality_scores"] else 0
|
||||
|
||||
trend_data.append({
|
||||
"date": date_key.isoformat(),
|
||||
"total_checks": metrics["total_checks"],
|
||||
"pass_rate": round(pass_rate, 2),
|
||||
"average_quality_score": round(avg_score, 2),
|
||||
"total_defects": metrics["defect_count"]
|
||||
})
|
||||
|
||||
# Calculate overall trend direction
|
||||
if len(trend_data) >= 2:
|
||||
recent_avg = sum(d["pass_rate"] for d in trend_data[-7:]) / min(7, len(trend_data))
|
||||
earlier_avg = sum(d["pass_rate"] for d in trend_data[:-7]) / max(1, len(trend_data) - 7)
|
||||
trend_direction = "improving" if recent_avg > earlier_avg else "declining" if recent_avg < earlier_avg else "stable"
|
||||
else:
|
||||
trend_direction = "insufficient_data"
|
||||
|
||||
return {
|
||||
"check_type": check_type,
|
||||
"period_start": start_date.isoformat(),
|
||||
"period_end": end_date.isoformat(),
|
||||
"trend_direction": trend_direction,
|
||||
"daily_data": trend_data,
|
||||
"total_checks": len(checks),
|
||||
"tenant_id": tenant_id
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error calculating quality trends", error=str(e))
|
||||
raise DatabaseError(f"Failed to calculate quality trends: {str(e)}")
|
||||
|
||||
async def get_quality_checks_with_filters(
|
||||
self,
|
||||
tenant_id: str,
|
||||
batch_id: Optional[UUID] = None,
|
||||
product_id: Optional[UUID] = None,
|
||||
start_date: Optional[date] = None,
|
||||
end_date: Optional[date] = None,
|
||||
pass_fail: Optional[bool] = None,
|
||||
page: int = 1,
|
||||
page_size: int = 50
|
||||
) -> tuple[List[QualityCheck], int]:
|
||||
"""Get quality checks with filters and pagination"""
|
||||
try:
|
||||
filters = {"tenant_id": tenant_id}
|
||||
|
||||
if batch_id:
|
||||
filters["batch_id"] = batch_id
|
||||
if product_id:
|
||||
# Note: This would require a join with production batches to filter by product_id
|
||||
# For now, we'll skip this filter or implement it differently
|
||||
pass
|
||||
if start_date:
|
||||
start_datetime = datetime.combine(start_date, datetime.min.time())
|
||||
filters["check_time__gte"] = start_datetime
|
||||
if end_date:
|
||||
end_datetime = datetime.combine(end_date, datetime.max.time())
|
||||
filters["check_time__lte"] = end_datetime
|
||||
if pass_fail is not None:
|
||||
filters["pass_fail"] = pass_fail
|
||||
|
||||
# Get total count
|
||||
total_count = await self.count(filters)
|
||||
|
||||
# Get paginated results
|
||||
offset = (page - 1) * page_size
|
||||
checks = await self.get_multi(
|
||||
filters=filters,
|
||||
order_by="check_time",
|
||||
order_desc=True,
|
||||
limit=page_size,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
logger.info("Retrieved quality checks with filters",
|
||||
count=len(checks),
|
||||
total_count=total_count,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
return checks, total_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching quality checks with filters", error=str(e))
|
||||
raise DatabaseError(f"Failed to fetch quality checks with filters: {str(e)}")
|
||||
|
||||
# ================================================================
|
||||
# ALERT-RELATED METHODS (migrated from production_alert_repository)
|
||||
# ================================================================
|
||||
|
||||
async def get_quality_issues(self, tenant_id: Optional[UUID] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get quality control failures.
|
||||
Returns quality checks that failed within recent hours.
|
||||
|
||||
Args:
|
||||
tenant_id: Optional tenant ID to filter by
|
||||
"""
|
||||
try:
|
||||
from app.models.production import ProductionBatch
|
||||
|
||||
query_str = """
|
||||
SELECT
|
||||
qc.id, qc.tenant_id, qc.batch_id, qc.check_type,
|
||||
qc.quality_score, qc.within_tolerance,
|
||||
qc.pass_fail, qc.defect_count,
|
||||
qc.check_notes as qc_severity,
|
||||
1 as total_failures,
|
||||
pb.product_name, pb.batch_number,
|
||||
qc.created_at,
|
||||
qc.process_stage
|
||||
FROM quality_checks qc
|
||||
JOIN production_batches pb ON pb.id = qc.batch_id
|
||||
WHERE qc.pass_fail = false
|
||||
AND qc.created_at > NOW() - INTERVAL '4 hours'
|
||||
AND qc.corrective_action_needed = true
|
||||
"""
|
||||
|
||||
params = {}
|
||||
if tenant_id:
|
||||
query_str += " AND qc.tenant_id = :tenant_id"
|
||||
params["tenant_id"] = tenant_id
|
||||
|
||||
query_str += """
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN qc.pass_fail = false AND qc.defect_count > 5 THEN 1
|
||||
WHEN qc.pass_fail = false THEN 2
|
||||
ELSE 3
|
||||
END,
|
||||
qc.created_at DESC
|
||||
"""
|
||||
|
||||
result = await self.session.execute(text(query_str), params)
|
||||
return [dict(row._mapping) for row in result.fetchall()]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get quality issues", error=str(e))
|
||||
raise DatabaseError(f"Failed to get quality issues: {str(e)}")
|
||||
|
||||
async def mark_quality_check_acknowledged(self, quality_check_id: UUID) -> None:
|
||||
"""
|
||||
Mark a quality check as acknowledged to avoid duplicate alerts.
|
||||
"""
|
||||
try:
|
||||
query = text("""
|
||||
UPDATE quality_checks
|
||||
SET acknowledged = true
|
||||
WHERE id = :id
|
||||
""")
|
||||
|
||||
await self.session.execute(query, {"id": quality_check_id})
|
||||
await self.session.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to mark quality check acknowledged", error=str(e), qc_id=str(quality_check_id))
|
||||
raise DatabaseError(f"Failed to mark quality check acknowledged: {str(e)}")
|
||||
@@ -0,0 +1,162 @@
|
||||
"""
|
||||
Quality Template Repository for Production Service
|
||||
"""
|
||||
|
||||
from typing import List, Optional, Tuple
|
||||
from sqlalchemy import and_, or_, func, select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
|
||||
from .base import ProductionBaseRepository
|
||||
from ..models.production import QualityCheckTemplate, ProcessStage
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class QualityTemplateRepository(ProductionBaseRepository):
|
||||
"""Repository for quality check template operations"""
|
||||
|
||||
def __init__(self, session: AsyncSession):
|
||||
super().__init__(QualityCheckTemplate, session)
|
||||
|
||||
async def get_templates_by_tenant(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: Optional[ProcessStage] = None,
|
||||
check_type: Optional[str] = None,
|
||||
is_active: Optional[bool] = True,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> Tuple[List[QualityCheckTemplate], int]:
|
||||
"""Get quality check templates with filtering and pagination"""
|
||||
|
||||
filters = [QualityCheckTemplate.tenant_id == tenant_id]
|
||||
|
||||
if is_active is not None:
|
||||
filters.append(QualityCheckTemplate.is_active == is_active)
|
||||
|
||||
if check_type:
|
||||
filters.append(QualityCheckTemplate.check_type == check_type)
|
||||
|
||||
if stage:
|
||||
filters.append(
|
||||
or_(
|
||||
func.json_contains(
|
||||
QualityCheckTemplate.applicable_stages,
|
||||
f'"{stage.value}"'
|
||||
),
|
||||
QualityCheckTemplate.applicable_stages.is_(None)
|
||||
)
|
||||
)
|
||||
|
||||
# Get total count with SQLAlchemy conditions
|
||||
count_query = select(func.count(QualityCheckTemplate.id)).where(and_(*filters))
|
||||
count_result = await self.session.execute(count_query)
|
||||
total = count_result.scalar()
|
||||
|
||||
# Get templates with ordering
|
||||
query = select(QualityCheckTemplate).where(and_(*filters)).order_by(
|
||||
QualityCheckTemplate.is_critical.desc(),
|
||||
QualityCheckTemplate.is_required.desc(),
|
||||
QualityCheckTemplate.name
|
||||
).offset(skip).limit(limit)
|
||||
|
||||
result = await self.session.execute(query)
|
||||
templates = result.scalars().all()
|
||||
|
||||
return templates, total
|
||||
|
||||
async def get_by_tenant_and_id(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_id: UUID
|
||||
) -> Optional[QualityCheckTemplate]:
|
||||
"""Get a specific quality check template by tenant and ID"""
|
||||
|
||||
return await self.get_by_filters(
|
||||
and_(
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.id == template_id
|
||||
)
|
||||
)
|
||||
|
||||
async def get_templates_for_stage(
|
||||
self,
|
||||
tenant_id: str,
|
||||
stage: ProcessStage,
|
||||
is_active: Optional[bool] = True
|
||||
) -> List[QualityCheckTemplate]:
|
||||
"""Get all quality check templates applicable to a specific process stage"""
|
||||
|
||||
filters = [
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
or_(
|
||||
func.json_contains(
|
||||
QualityCheckTemplate.applicable_stages,
|
||||
f'"{stage.value}"'
|
||||
),
|
||||
QualityCheckTemplate.applicable_stages.is_(None)
|
||||
)
|
||||
]
|
||||
|
||||
if is_active is not None:
|
||||
filters.append(QualityCheckTemplate.is_active == is_active)
|
||||
|
||||
return await self.get_multi(
|
||||
filters=and_(*filters),
|
||||
order_by=[
|
||||
QualityCheckTemplate.is_critical.desc(),
|
||||
QualityCheckTemplate.is_required.desc(),
|
||||
QualityCheckTemplate.weight.desc(),
|
||||
QualityCheckTemplate.name
|
||||
]
|
||||
)
|
||||
|
||||
async def check_template_code_exists(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_code: str,
|
||||
exclude_id: Optional[UUID] = None
|
||||
) -> bool:
|
||||
"""Check if a template code already exists for the tenant"""
|
||||
|
||||
filters = [
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.template_code == template_code
|
||||
]
|
||||
|
||||
if exclude_id:
|
||||
filters.append(QualityCheckTemplate.id != exclude_id)
|
||||
|
||||
existing = await self.get_by_filters(and_(*filters))
|
||||
return existing is not None
|
||||
|
||||
async def get_by_filters(self, *filters):
|
||||
"""Get a single record by filters"""
|
||||
try:
|
||||
query = select(self.model).where(and_(*filters))
|
||||
result = await self.session.execute(query)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error("Error getting record by filters", error=str(e), filters=str(filters))
|
||||
raise
|
||||
|
||||
async def get_templates_by_ids(
|
||||
self,
|
||||
tenant_id: str,
|
||||
template_ids: List[UUID]
|
||||
) -> List[QualityCheckTemplate]:
|
||||
"""Get quality check templates by list of IDs"""
|
||||
|
||||
return await self.get_multi(
|
||||
filters=and_(
|
||||
QualityCheckTemplate.tenant_id == tenant_id,
|
||||
QualityCheckTemplate.id.in_(template_ids)
|
||||
),
|
||||
order_by=[
|
||||
QualityCheckTemplate.is_critical.desc(),
|
||||
QualityCheckTemplate.is_required.desc(),
|
||||
QualityCheckTemplate.weight.desc()
|
||||
]
|
||||
)
|
||||
Reference in New Issue
Block a user