Improve the frontend and fix TODOs

This commit is contained in:
Urtzi Alfaro
2025-10-24 13:05:04 +02:00
parent 07c33fa578
commit 61376b7a9f
100 changed files with 8284 additions and 3419 deletions

View File

@@ -5,7 +5,7 @@ Service-to-service endpoint for cloning production data
from fastapi import APIRouter, Depends, HTTPException, Header
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select
from sqlalchemy import select, delete, func
import structlog
import uuid
from datetime import datetime, timezone, timedelta
@@ -493,3 +493,53 @@ async def clone_health_check(_: bool = Depends(verify_internal_api_key)):
"clone_endpoint": "available",
"version": "2.0.0"
}
@router.delete("/tenant/{virtual_tenant_id}")
async def delete_demo_data(
virtual_tenant_id: str,
db: AsyncSession = Depends(get_db),
_: bool = Depends(verify_internal_api_key)
):
"""Delete all production data for a virtual demo tenant"""
logger.info("Deleting production data for virtual tenant", virtual_tenant_id=virtual_tenant_id)
start_time = datetime.now(timezone.utc)
try:
virtual_uuid = uuid.UUID(virtual_tenant_id)
# Count records
batch_count = await db.scalar(select(func.count(ProductionBatch.id)).where(ProductionBatch.tenant_id == virtual_uuid))
schedule_count = await db.scalar(select(func.count(ProductionSchedule.id)).where(ProductionSchedule.tenant_id == virtual_uuid))
quality_count = await db.scalar(select(func.count(QualityCheck.id)).where(QualityCheck.tenant_id == virtual_uuid))
equipment_count = await db.scalar(select(func.count(Equipment.id)).where(Equipment.tenant_id == virtual_uuid))
# Delete in order
await db.execute(delete(QualityCheck).where(QualityCheck.tenant_id == virtual_uuid))
await db.execute(delete(ProductionBatch).where(ProductionBatch.tenant_id == virtual_uuid))
await db.execute(delete(ProductionSchedule).where(ProductionSchedule.tenant_id == virtual_uuid))
await db.execute(delete(QualityCheckTemplate).where(QualityCheckTemplate.tenant_id == virtual_uuid))
await db.execute(delete(Equipment).where(Equipment.tenant_id == virtual_uuid))
await db.execute(delete(ProductionCapacity).where(ProductionCapacity.tenant_id == virtual_uuid))
await db.commit()
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
logger.info("Production data deleted successfully", virtual_tenant_id=virtual_tenant_id, duration_ms=duration_ms)
return {
"service": "production",
"status": "deleted",
"virtual_tenant_id": virtual_tenant_id,
"records_deleted": {
"batches": batch_count,
"schedules": schedule_count,
"quality_checks": quality_count,
"equipment": equipment_count,
"total": batch_count + schedule_count + quality_count + equipment_count
},
"duration_ms": duration_ms
}
except Exception as e:
logger.error("Failed to delete production data", error=str(e), exc_info=True)
await db.rollback()
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -4,7 +4,7 @@ Production Operations API - Business operations for production management
Includes: batch start/complete, schedule finalize/optimize, capacity management, transformations, stats
"""
from fastapi import APIRouter, Depends, HTTPException, Path, Query
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request, status
from typing import Optional
from datetime import date, datetime, timedelta
from uuid import UUID
@@ -12,6 +12,7 @@ import structlog
from shared.auth.decorators import get_current_user_dep
from shared.routing import RouteBuilder
from shared.monitoring.decorators import monitor_performance
from app.services.production_service import ProductionService
from app.schemas.production import (
ProductionBatchResponse,
@@ -394,3 +395,50 @@ async def transform_par_baked_products(
logger.error("Error transforming products",
error=str(e), tenant_id=str(tenant_id))
raise HTTPException(status_code=500, detail="Failed to transform products")
# ===== SCHEDULER OPERATIONS =====
@router.post(
route_builder.build_operations_route("scheduler/trigger")
)
@monitor_performance("trigger_production_scheduler")
async def trigger_production_scheduler(
tenant_id: UUID = Path(...),
request: Request = None
):
"""
Manually trigger the production scheduler for the current tenant
This endpoint is primarily for testing and development purposes.
Triggers the production schedule generation process manually.
"""
try:
# Get the scheduler service from app state
if hasattr(request.app.state, 'scheduler_service'):
scheduler_service = request.app.state.scheduler_service
await scheduler_service.test_production_schedule_generation()
logger.info("Production scheduler triggered manually",
tenant_id=str(tenant_id))
return {
"success": True,
"message": "Production scheduler executed successfully",
"tenant_id": str(tenant_id)
}
else:
raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="Scheduler service is not available"
)
except HTTPException:
raise
except Exception as e:
logger.error("Error triggering production scheduler",
error=str(e), tenant_id=str(tenant_id))
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Error triggering production scheduler: {str(e)}"
)

View File

@@ -112,6 +112,7 @@ class ProductionBatch(Base):
quality_score = Column(Float, nullable=True)
waste_quantity = Column(Float, nullable=True)
defect_quantity = Column(Float, nullable=True)
waste_defect_type = Column(String(100), nullable=True) # Type of defect causing waste (burnt, misshapen, underproofed, temperature_issues, expired)
# Equipment and resources
equipment_used = Column(JSON, nullable=True) # List of equipment IDs
@@ -123,6 +124,7 @@ class ProductionBatch(Base):
forecast_id = Column(UUID(as_uuid=True), nullable=True) # Associated demand forecast
is_rush_order = Column(Boolean, default=False)
is_special_recipe = Column(Boolean, default=False)
is_ai_assisted = Column(Boolean, default=False) # Whether batch used AI forecasting/optimization
# Notes and tracking
production_notes = Column(Text, nullable=True)
@@ -163,6 +165,7 @@ class ProductionBatch(Base):
"quality_score": self.quality_score,
"waste_quantity": self.waste_quantity,
"defect_quantity": self.defect_quantity,
"waste_defect_type": self.waste_defect_type,
"equipment_used": self.equipment_used,
"staff_assigned": self.staff_assigned,
"station_id": self.station_id,
@@ -170,6 +173,7 @@ class ProductionBatch(Base):
"forecast_id": str(self.forecast_id) if self.forecast_id else None,
"is_rush_order": self.is_rush_order,
"is_special_recipe": self.is_special_recipe,
"is_ai_assisted": self.is_ai_assisted,
"production_notes": self.production_notes,
"quality_notes": self.quality_notes,
"delay_reason": self.delay_reason,

View File

@@ -716,11 +716,16 @@ class ProductionBatchRepository(ProductionBaseRepository, BatchCountProvider):
COALESCE(SUM(planned_quantity), 0) as total_planned,
COALESCE(SUM(actual_quantity), 0) as total_actual,
COUNT(*) as total_batches,
COUNT(CASE WHEN forecast_id IS NOT NULL THEN 1 END) as ai_assisted_batches
COUNT(CASE WHEN is_ai_assisted = true THEN 1 END) as ai_assisted_batches,
COALESCE(SUM(CASE WHEN waste_defect_type = 'burnt' THEN waste_quantity ELSE 0 END), 0) as burnt_waste,
COALESCE(SUM(CASE WHEN waste_defect_type = 'misshapen' THEN waste_quantity ELSE 0 END), 0) as misshapen_waste,
COALESCE(SUM(CASE WHEN waste_defect_type = 'underproofed' THEN waste_quantity ELSE 0 END), 0) as underproofed_waste,
COALESCE(SUM(CASE WHEN waste_defect_type = 'temperature_issues' THEN waste_quantity ELSE 0 END), 0) as temperature_waste,
COALESCE(SUM(CASE WHEN waste_defect_type = 'expired' THEN waste_quantity ELSE 0 END), 0) as expired_waste
FROM production_batches
WHERE tenant_id = :tenant_id
AND created_at BETWEEN :start_date AND :end_date
AND status IN ('COMPLETED', 'QUALITY_CHECK', 'FINISHED')
AND status IN ('COMPLETED', 'QUALITY_CHECK')
""")
result = await self.session.execute(
@@ -739,7 +744,14 @@ class ProductionBatchRepository(ProductionBaseRepository, BatchCountProvider):
'total_planned': float(row.total_planned or 0),
'total_actual': float(row.total_actual or 0),
'total_batches': int(row.total_batches or 0),
'ai_assisted_batches': int(row.ai_assisted_batches or 0)
'ai_assisted_batches': int(row.ai_assisted_batches or 0),
'waste_by_defect_type': {
'burnt': float(row.burnt_waste or 0),
'misshapen': float(row.misshapen_waste or 0),
'underproofed': float(row.underproofed_waste or 0),
'temperature_issues': float(row.temperature_waste or 0),
'expired': float(row.expired_waste or 0)
}
}
logger.info(
@@ -783,7 +795,7 @@ class ProductionBatchRepository(ProductionBaseRepository, BatchCountProvider):
WHERE tenant_id = :tenant_id
AND created_at BETWEEN first_batch.start_date
AND first_batch.start_date + INTERVAL '90 days'
AND status IN ('COMPLETED', 'QUALITY_CHECK', 'FINISHED')
AND status IN ('COMPLETED', 'QUALITY_CHECK')
)
SELECT
total_waste,
@@ -833,4 +845,4 @@ class ProductionBatchRepository(ProductionBaseRepository, BatchCountProvider):
except Exception as e:
logger.error("Error calculating baseline metrics", error=str(e), tenant_id=str(tenant_id))
raise DatabaseError(f"Failed to calculate baseline metrics: {str(e)}")
raise DatabaseError(f"Failed to calculate baseline metrics: {str(e)}")

View File

@@ -4,7 +4,7 @@ Main business logic for production operations
"""
from typing import Optional, List, Dict, Any
from datetime import datetime, date, timedelta
from datetime import datetime, date, timedelta, timezone
from uuid import UUID
import structlog
@@ -369,12 +369,46 @@ class ProductionService:
str(tenant_id), week_ago, today
)
# Calculate capacity utilization from actual data
from app.models.production import QualityCheck
from sqlalchemy import select, func, and_
# Calculate capacity utilization: (Total planned quantity / Total capacity) * 100
# Assuming 8-hour workday with standard capacity per hour
STANDARD_HOURLY_CAPACITY = 100 # units per hour (configurable)
WORKING_HOURS_PER_DAY = 8
total_daily_capacity = STANDARD_HOURLY_CAPACITY * WORKING_HOURS_PER_DAY
total_planned_today = sum(b.planned_quantity or 0 for b in todays_batches)
capacity_utilization = min((total_planned_today / total_daily_capacity * 100) if total_daily_capacity > 0 else 0, 100)
# Calculate average quality score from quality checks
quality_query = select(func.avg(QualityCheck.quality_score)).where(
and_(
QualityCheck.tenant_id == tenant_id,
QualityCheck.check_time >= datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
)
)
quality_result = await batch_repo.session.execute(quality_query)
average_quality_score = quality_result.scalar() or 0.0
# If no checks today, use recent average (last 7 days)
if average_quality_score == 0:
recent_quality_query = select(func.avg(QualityCheck.quality_score)).where(
and_(
QualityCheck.tenant_id == tenant_id,
QualityCheck.check_time >= datetime.now(timezone.utc) - timedelta(days=7)
)
)
recent_quality_result = await batch_repo.session.execute(recent_quality_query)
average_quality_score = recent_quality_result.scalar() or 8.5 # Default fallback
return ProductionDashboardSummary(
active_batches=len(active_batches),
todays_production_plan=todays_plan,
capacity_utilization=85.0, # TODO: Calculate from actual capacity data
capacity_utilization=round(capacity_utilization, 1),
on_time_completion_rate=weekly_metrics.get("on_time_completion_rate", 0),
average_quality_score=8.5, # TODO: Get from quality checks
average_quality_score=round(average_quality_score, 1),
total_output_today=sum(b.actual_quantity or 0 for b in todays_batches),
efficiency_percentage=weekly_metrics.get("average_yield_percentage", 0)
)

View File

@@ -241,12 +241,32 @@ class QualityTemplateService:
tenant_id=tenant_id)
return False
# TODO: Business Rule - Check if template is in use before deletion
# For now, allow deletion. In production you might want to:
# 1. Soft delete by setting is_active = False
# 2. Check for dependent quality checks
# 3. Prevent deletion if actively used
# Business Rule: Check if template is in use before deletion
# Check for quality checks using this template
from app.models.production import QualityCheck
from sqlalchemy import select, func
usage_query = select(func.count(QualityCheck.id)).where(
QualityCheck.template_id == template_id
)
usage_result = await self.repository.session.execute(usage_query)
usage_count = usage_result.scalar() or 0
if usage_count > 0:
logger.warning("Cannot delete template in use",
template_id=str(template_id),
tenant_id=tenant_id,
usage_count=usage_count)
# Instead of deleting, soft delete by setting is_active = False
template.is_active = False
await self.repository.session.commit()
logger.info("Quality template soft deleted (set to inactive)",
template_id=str(template_id),
tenant_id=tenant_id,
usage_count=usage_count)
return True
# Template is not in use, safe to delete
success = await self.repository.delete(template_id)
if success: