Support subcription payments

This commit is contained in:
Urtzi Alfaro
2025-09-25 14:30:47 +02:00
parent f02a980c87
commit 89b75bd7af
22 changed files with 2119 additions and 364 deletions

View File

@@ -21,59 +21,67 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
def setup_scheduled_checks(self):
"""Production-specific scheduled checks for alerts and recommendations"""
# Production capacity checks - every 10 minutes during business hours (alerts)
# Reduced frequency to prevent deadlocks and resource contention
# Production capacity checks - every 15 minutes during business hours (reduced from 10)
self.scheduler.add_job(
self.check_production_capacity,
CronTrigger(minute='*/10', hour='6-20'),
CronTrigger(minute='*/15', hour='6-20'),
id='capacity_check',
misfire_grace_time=60,
max_instances=1
misfire_grace_time=120, # Increased grace time
max_instances=1,
coalesce=True # Combine missed runs
)
# Production delays - every 5 minutes during production hours (alerts)
# Production delays - every 10 minutes during production hours (reduced from 5)
self.scheduler.add_job(
self.check_production_delays,
CronTrigger(minute='*/5', hour='4-22'),
CronTrigger(minute='*/10', hour='4-22'),
id='delay_check',
misfire_grace_time=30,
max_instances=1
misfire_grace_time=60,
max_instances=1,
coalesce=True
)
# Quality issues check - every 15 minutes (alerts)
# Quality issues check - every 20 minutes (reduced from 15)
self.scheduler.add_job(
self.check_quality_issues,
CronTrigger(minute='*/15'),
CronTrigger(minute='*/20'),
id='quality_check',
misfire_grace_time=60,
max_instances=1
misfire_grace_time=120,
max_instances=1,
coalesce=True
)
# Equipment monitoring - check equipment status for maintenance alerts
# Equipment monitoring - check equipment status every 45 minutes (reduced from 30)
self.scheduler.add_job(
self.check_equipment_status,
CronTrigger(minute='*/30'), # Check every 30 minutes
CronTrigger(minute='*/45'),
id='equipment_check',
misfire_grace_time=30,
max_instances=1
misfire_grace_time=180,
max_instances=1,
coalesce=True
)
# Efficiency recommendations - every 30 minutes (recommendations)
# Efficiency recommendations - every hour (reduced from 30 minutes)
self.scheduler.add_job(
self.generate_efficiency_recommendations,
CronTrigger(minute='*/30'),
CronTrigger(minute='0'),
id='efficiency_recs',
misfire_grace_time=120,
max_instances=1
misfire_grace_time=300,
max_instances=1,
coalesce=True
)
# Energy optimization - every hour (recommendations)
# Energy optimization - every 2 hours (reduced from 1 hour)
self.scheduler.add_job(
self.generate_energy_recommendations,
CronTrigger(minute='0'),
CronTrigger(minute='0', hour='*/2'),
id='energy_recs',
misfire_grace_time=300,
max_instances=1
misfire_grace_time=600, # 10 minutes grace
max_instances=1,
coalesce=True
)
logger.info("Production alert schedules configured",
@@ -83,69 +91,47 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
"""Check if production plan exceeds capacity (alerts)"""
try:
self._checks_performed += 1
query = """
WITH capacity_analysis AS (
SELECT
p.tenant_id,
p.planned_date,
SUM(p.planned_quantity) as total_planned,
MAX(pc.daily_capacity) as max_daily_capacity,
COUNT(DISTINCT p.equipment_id) as equipment_count,
AVG(pc.efficiency_percent) as avg_efficiency,
CASE
WHEN SUM(p.planned_quantity) > MAX(pc.daily_capacity) * 1.2 THEN 'severe_overload'
WHEN SUM(p.planned_quantity) > MAX(pc.daily_capacity) THEN 'overload'
WHEN SUM(p.planned_quantity) > MAX(pc.daily_capacity) * 0.9 THEN 'near_capacity'
ELSE 'normal'
END as capacity_status,
(SUM(p.planned_quantity) / MAX(pc.daily_capacity)) * 100 as capacity_percentage
FROM production_schedule p
JOIN production_capacity pc ON pc.equipment_id = p.equipment_id
WHERE p.planned_date >= CURRENT_DATE
AND p.planned_date <= CURRENT_DATE + INTERVAL '3 days'
AND p.status IN ('PENDING', 'IN_PROGRESS')
AND p.tenant_id = $1
GROUP BY p.tenant_id, p.planned_date
)
SELECT * FROM capacity_analysis
WHERE capacity_status != 'normal'
ORDER BY capacity_percentage DESC
"""
# Check production capacity without tenant dependencies
# Use a simpler query with timeout and connection management
from sqlalchemy import text
simplified_query = text("""
SELECT
pb.tenant_id,
DATE(pb.planned_start_time) as planned_date,
COUNT(*) as batch_count,
SUM(pb.planned_quantity) as total_planned,
'capacity_check' as capacity_status,
100.0 as capacity_percentage -- Default value for processing
FROM production_batches pb
WHERE pb.planned_start_time >= CURRENT_DATE
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
AND pb.status IN ('PLANNED', 'PENDING', 'IN_PROGRESS')
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
HAVING COUNT(*) > 10 -- Alert if more than 10 batches per day
ORDER BY total_planned DESC
LIMIT 20 -- Limit results to prevent excessive processing
""")
# Use timeout and proper session handling
try:
from sqlalchemy import text
# Simplified query using only existing production tables
simplified_query = text("""
SELECT
pb.tenant_id,
DATE(pb.planned_start_time) as planned_date,
COUNT(*) as batch_count,
SUM(pb.planned_quantity) as total_planned,
'capacity_check' as capacity_status
FROM production_batches pb
WHERE pb.planned_start_time >= CURRENT_DATE
AND pb.planned_start_time <= CURRENT_DATE + INTERVAL '3 days'
AND pb.status IN ('PLANNED', 'PENDING', 'IN_PROGRESS')
GROUP BY pb.tenant_id, DATE(pb.planned_start_time)
HAVING COUNT(*) > 10 -- Alert if more than 10 batches per day
ORDER BY total_planned DESC
""")
async with self.db_manager.get_session() as session:
# Set statement timeout to prevent long-running queries
await session.execute(text("SET statement_timeout = '30s'"))
result = await session.execute(simplified_query)
capacity_issues = result.fetchall()
for issue in capacity_issues:
await self._process_capacity_issue(issue.tenant_id, issue)
except asyncio.TimeoutError:
logger.warning("Capacity check timed out", service=self.config.SERVICE_NAME)
self._errors_count += 1
except Exception as e:
logger.debug("Simplified capacity check failed", error=str(e))
logger.debug("Capacity check failed", error=str(e), service=self.config.SERVICE_NAME)
except Exception as e:
# Skip capacity checks if tables don't exist (graceful degradation)
if "does not exist" in str(e):
if "does not exist" in str(e).lower() or "relation" in str(e).lower():
logger.debug("Capacity check skipped - missing tables", error=str(e))
else:
logger.error("Capacity check failed", error=str(e))
@@ -215,10 +201,10 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
"""Check for production delays (alerts)"""
try:
self._checks_performed += 1
# Simplified query without customer_orders dependency
query = """
SELECT
# Simplified query with timeout and proper error handling
query = text("""
SELECT
pb.id, pb.tenant_id, pb.product_name, pb.batch_number,
pb.planned_end_time as planned_completion_time, pb.actual_start_time,
pb.actual_end_time as estimated_completion_time, pb.status,
@@ -232,24 +218,34 @@ class ProductionAlertService(BaseAlertService, AlertServiceMixin):
OR pb.status IN ('ON_HOLD', 'QUALITY_CHECK')
)
AND pb.planned_end_time > NOW() - INTERVAL '24 hours'
ORDER BY
ORDER BY
CASE COALESCE(pb.priority::text, 'MEDIUM')
WHEN 'URGENT' THEN 1 WHEN 'HIGH' THEN 2 ELSE 3
WHEN 'URGENT' THEN 1 WHEN 'HIGH' THEN 2 ELSE 3
END,
delay_minutes DESC
"""
from sqlalchemy import text
async with self.db_manager.get_session() as session:
result = await session.execute(text(query))
delays = result.fetchall()
for delay in delays:
await self._process_production_delay(delay)
LIMIT 50 -- Limit results to prevent excessive processing
""")
try:
from sqlalchemy import text
async with self.db_manager.get_session() as session:
# Set statement timeout
await session.execute(text("SET statement_timeout = '30s'"))
result = await session.execute(query)
delays = result.fetchall()
for delay in delays:
await self._process_production_delay(delay)
except asyncio.TimeoutError:
logger.warning("Production delay check timed out", service=self.config.SERVICE_NAME)
self._errors_count += 1
except Exception as e:
logger.debug("Production delay check failed", error=str(e), service=self.config.SERVICE_NAME)
except Exception as e:
# Skip delay checks if tables don't exist (graceful degradation)
if "does not exist" in str(e):
if "does not exist" in str(e).lower() or "relation" in str(e).lower():
logger.debug("Production delay check skipped - missing tables", error=str(e))
else:
logger.error("Production delay check failed", error=str(e))