Improve the frontend and repository layer

This commit is contained in:
Urtzi Alfaro
2025-10-23 07:44:54 +02:00
parent 8d30172483
commit 07c33fa578
112 changed files with 14726 additions and 2733 deletions

View File

@@ -20,8 +20,6 @@ from app.models.production import (
EquipmentStatus, EquipmentType
)
from shared.utils.demo_dates import adjust_date_for_demo, BASE_REFERENCE_DATE
from shared.utils.alert_generator import generate_equipment_alerts
from shared.messaging.rabbitmq import RabbitMQClient
logger = structlog.get_logger()
router = APIRouter(prefix="/internal/demo", tags=["internal"])
@@ -430,44 +428,18 @@ async def clone_demo_data(
db.add(new_capacity)
stats["production_capacity"] += 1
# Commit cloned data first
# Commit cloned data
await db.commit()
# Generate equipment maintenance and status alerts with RabbitMQ publishing
rabbitmq_client = None
try:
# Initialize RabbitMQ client for alert publishing
rabbitmq_host = os.getenv("RABBITMQ_HOST", "rabbitmq-service")
rabbitmq_user = os.getenv("RABBITMQ_USER", "bakery")
rabbitmq_password = os.getenv("RABBITMQ_PASSWORD", "forecast123")
rabbitmq_port = os.getenv("RABBITMQ_PORT", "5672")
rabbitmq_vhost = os.getenv("RABBITMQ_VHOST", "/")
rabbitmq_url = f"amqp://{rabbitmq_user}:{rabbitmq_password}@{rabbitmq_host}:{rabbitmq_port}{rabbitmq_vhost}"
# NOTE: Alert generation removed - alerts are now generated automatically by the
# production alert service which runs scheduled checks at appropriate intervals.
# This eliminates duplicate alerts and provides a more realistic demo experience.
stats["alerts_generated"] = 0
rabbitmq_client = RabbitMQClient(rabbitmq_url, service_name="production")
await rabbitmq_client.connect()
# Generate alerts and publish to RabbitMQ
alerts_count = await generate_equipment_alerts(
db,
virtual_uuid,
session_time,
rabbitmq_client=rabbitmq_client
)
stats["alerts_generated"] += alerts_count
await db.commit()
logger.info(f"Generated {alerts_count} equipment alerts")
except Exception as alert_error:
logger.warning(f"Alert generation failed: {alert_error}", exc_info=True)
finally:
# Clean up RabbitMQ connection
if rabbitmq_client:
try:
await rabbitmq_client.disconnect()
except Exception as cleanup_error:
logger.warning(f"Error disconnecting RabbitMQ: {cleanup_error}")
total_records = sum(stats.values())
# Calculate total from non-alert stats
total_records = (stats["equipment"] + stats["batches"] + stats["schedules"] +
stats["quality_templates"] + stats["quality_checks"] +
stats["production_capacity"])
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
logger.info(