Add fixes to procurement logic and fix rel-time connections
This commit is contained in:
@@ -50,6 +50,17 @@ class ProcurementSchedulerService(BaseAlertService, AlertServiceMixin):
|
||||
max_instances=1
|
||||
)
|
||||
|
||||
# Stale plan cleanup at 6:30 AM (Bug #3 FIX, Edge Cases #1 & #2)
|
||||
self.scheduler.add_job(
|
||||
func=self.run_stale_plan_cleanup,
|
||||
trigger=CronTrigger(hour=6, minute=30),
|
||||
id="stale_plan_cleanup",
|
||||
name="Stale Plan Cleanup & Reminders",
|
||||
misfire_grace_time=300,
|
||||
coalesce=True,
|
||||
max_instances=1
|
||||
)
|
||||
|
||||
# Also add a test job that runs every 30 minutes for development/testing
|
||||
# This will be disabled in production via environment variable
|
||||
if getattr(self.config, 'DEBUG', False) or getattr(self.config, 'PROCUREMENT_TEST_MODE', False):
|
||||
@@ -79,7 +90,10 @@ class ProcurementSchedulerService(BaseAlertService, AlertServiceMixin):
|
||||
jobs_count=len(self.scheduler.get_jobs()))
|
||||
|
||||
async def run_daily_procurement_planning(self):
|
||||
"""Execute daily procurement planning for all active tenants"""
|
||||
"""
|
||||
Execute daily procurement planning for all active tenants
|
||||
Edge Case #6: Uses parallel processing with per-tenant timeouts
|
||||
"""
|
||||
if not self.is_leader:
|
||||
logger.debug("Skipping procurement planning - not leader")
|
||||
return
|
||||
@@ -95,20 +109,21 @@ class ProcurementSchedulerService(BaseAlertService, AlertServiceMixin):
|
||||
logger.info("No active tenants found for procurement planning")
|
||||
return
|
||||
|
||||
# Process each tenant
|
||||
processed_tenants = 0
|
||||
failed_tenants = 0
|
||||
for tenant_id in active_tenants:
|
||||
try:
|
||||
logger.info("Processing tenant procurement", tenant_id=str(tenant_id))
|
||||
await self.process_tenant_procurement(tenant_id)
|
||||
processed_tenants += 1
|
||||
logger.info("✅ Successfully processed tenant", tenant_id=str(tenant_id))
|
||||
except Exception as e:
|
||||
failed_tenants += 1
|
||||
logger.error("❌ Error processing tenant procurement",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
# Edge Case #6: Process tenants in parallel with individual error handling
|
||||
logger.info(f"Processing {len(active_tenants)} tenants in parallel")
|
||||
|
||||
# Create tasks with timeout for each tenant
|
||||
tasks = [
|
||||
self._process_tenant_with_timeout(tenant_id, timeout_seconds=120)
|
||||
for tenant_id in active_tenants
|
||||
]
|
||||
|
||||
# Execute all tasks in parallel
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# Count successes and failures
|
||||
processed_tenants = sum(1 for r in results if r is True)
|
||||
failed_tenants = sum(1 for r in results if isinstance(r, Exception) or r is False)
|
||||
|
||||
logger.info("🎯 Daily procurement planning completed",
|
||||
total_tenants=len(active_tenants),
|
||||
@@ -118,6 +133,75 @@ class ProcurementSchedulerService(BaseAlertService, AlertServiceMixin):
|
||||
except Exception as e:
|
||||
self._errors_count += 1
|
||||
logger.error("💥 Daily procurement planning failed completely", error=str(e))
|
||||
|
||||
async def _process_tenant_with_timeout(self, tenant_id: UUID, timeout_seconds: int = 120) -> bool:
|
||||
"""
|
||||
Process tenant procurement with timeout (Edge Case #6)
|
||||
Returns True on success, False or raises exception on failure
|
||||
"""
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
self.process_tenant_procurement(tenant_id),
|
||||
timeout=timeout_seconds
|
||||
)
|
||||
logger.info("✅ Successfully processed tenant", tenant_id=str(tenant_id))
|
||||
return True
|
||||
except asyncio.TimeoutError:
|
||||
logger.error("⏱️ Tenant processing timed out",
|
||||
tenant_id=str(tenant_id),
|
||||
timeout=timeout_seconds)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("❌ Error processing tenant procurement",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def run_stale_plan_cleanup(self):
|
||||
"""
|
||||
Clean up stale plans, send reminders and escalations
|
||||
Bug #3 FIX, Edge Cases #1 & #2
|
||||
"""
|
||||
if not self.is_leader:
|
||||
logger.debug("Skipping stale plan cleanup - not leader")
|
||||
return
|
||||
|
||||
try:
|
||||
logger.info("🧹 Starting stale plan cleanup")
|
||||
|
||||
active_tenants = await self.get_active_tenants()
|
||||
if not active_tenants:
|
||||
logger.info("No active tenants found for cleanup")
|
||||
return
|
||||
|
||||
total_archived = 0
|
||||
total_cancelled = 0
|
||||
total_escalated = 0
|
||||
|
||||
# Process each tenant's stale plans
|
||||
for tenant_id in active_tenants:
|
||||
try:
|
||||
async with self.db_session_factory() as session:
|
||||
procurement_service = ProcurementService(session, self.config)
|
||||
stats = await procurement_service.cleanup_stale_plans(tenant_id)
|
||||
|
||||
total_archived += stats.get('archived', 0)
|
||||
total_cancelled += stats.get('cancelled', 0)
|
||||
total_escalated += stats.get('escalated', 0)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error cleaning up tenant plans",
|
||||
tenant_id=str(tenant_id),
|
||||
error=str(e))
|
||||
|
||||
logger.info("✅ Stale plan cleanup completed",
|
||||
archived=total_archived,
|
||||
cancelled=total_cancelled,
|
||||
escalated=total_escalated)
|
||||
|
||||
except Exception as e:
|
||||
self._errors_count += 1
|
||||
logger.error("💥 Stale plan cleanup failed", error=str(e))
|
||||
|
||||
async def get_active_tenants(self) -> List[UUID]:
|
||||
"""Get active tenants from tenant service or base implementation"""
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user