Fix and UI imporvements

This commit is contained in:
Urtzi Alfaro
2025-12-09 10:21:41 +01:00
parent 667e6e0404
commit 508f4569b9
22 changed files with 833 additions and 953 deletions

View File

@@ -130,50 +130,85 @@ class ProfessionalCloningStrategy(CloningStrategy):
tasks.append(task)
service_map[task] = service_def.name
# Wait for all tasks to complete
results = await asyncio.gather(*tasks, return_exceptions=True)
# Process results
# Process tasks as they complete for real-time progress updates
service_results = {}
total_records = 0
failed_services = []
required_service_failed = False
completed_count = 0
total_count = len(tasks)
for task, result in zip(tasks, results):
service_name = service_map[task]
service_def = next(s for s in services_to_clone if s.name == service_name)
# Create a mapping from futures to service names to properly identify completed tasks
# We'll use asyncio.wait approach instead of as_completed to access the original tasks
pending = set(tasks)
completed_tasks_info = {task: service_map[task] for task in tasks} # Map tasks to service names
if isinstance(result, Exception):
logger.error(
f"Service {service_name} cloning failed with exception",
session_id=context.session_id,
error=str(result)
)
service_results[service_name] = {
"status": "failed",
"error": str(result),
"records_cloned": 0
}
failed_services.append(service_name)
if service_def.required:
required_service_failed = True
else:
service_results[service_name] = result
if result.get("status") == "failed":
while pending:
# Wait for at least one task to complete
done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
# Process each completed task
for completed_task in done:
try:
# Get the result from the completed task
result = await completed_task
# Get the service name from our mapping
service_name = completed_tasks_info[completed_task]
service_def = next(s for s in services_to_clone if s.name == service_name)
service_results[service_name] = result
completed_count += 1
if result.get("status") == "failed":
failed_services.append(service_name)
if service_def.required:
required_service_failed = True
else:
total_records += result.get("records_cloned", 0)
# Track successful services for rollback
if result.get("status") == "completed":
rollback_stack.append({
"type": "service",
"service_name": service_name,
"tenant_id": context.virtual_tenant_id,
"session_id": context.session_id
})
# Update Redis with granular progress after each service completes
await context.orchestrator._update_progress_in_redis(context.session_id, {
"completed_services": completed_count,
"total_services": total_count,
"progress_percentage": int((completed_count / total_count) * 100),
"services": service_results,
"total_records_cloned": total_records
})
logger.info(
f"Service {service_name} completed ({completed_count}/{total_count})",
session_id=context.session_id,
records_cloned=result.get("records_cloned", 0)
)
except Exception as e:
# Handle exceptions from the task itself
service_name = completed_tasks_info[completed_task]
service_def = next(s for s in services_to_clone if s.name == service_name)
logger.error(
f"Service {service_name} cloning failed with exception",
session_id=context.session_id,
error=str(e)
)
service_results[service_name] = {
"status": "failed",
"error": str(e),
"records_cloned": 0
}
failed_services.append(service_name)
completed_count += 1
if service_def.required:
required_service_failed = True
else:
total_records += result.get("records_cloned", 0)
# Track successful services for rollback
if result.get("status") == "completed":
rollback_stack.append({
"type": "service",
"service_name": service_name,
"tenant_id": context.virtual_tenant_id,
"session_id": context.session_id
})
# Determine overall status
if required_service_failed:
@@ -475,7 +510,7 @@ class EnterpriseCloningStrategy(CloningStrategy):
elif failed_children > 0:
overall_status = "partial"
else:
overall_status = "ready"
overall_status = "completed" # Changed from "ready" to match professional strategy
# Calculate total records cloned (parent + all children)
total_records_cloned = parent_result.get("total_records", 0)

View File

@@ -464,6 +464,14 @@ class DemoSessionManager:
"""Cache session status in Redis for fast status checks"""
status_key = f"session:{session.session_id}:status"
# Calculate estimated remaining time based on demo tier
estimated_remaining_seconds = None
if session.cloning_started_at and not session.cloning_completed_at:
elapsed = (datetime.now(timezone.utc) - session.cloning_started_at).total_seconds()
# Professional: ~40s average, Enterprise: ~75s average
avg_duration = 75 if session.demo_account_type == 'enterprise' else 40
estimated_remaining_seconds = max(0, int(avg_duration - elapsed))
status_data = {
"session_id": session.session_id,
"status": session.status.value,
@@ -471,7 +479,9 @@ class DemoSessionManager:
"total_records_cloned": session.total_records_cloned,
"cloning_started_at": session.cloning_started_at.isoformat() if session.cloning_started_at else None,
"cloning_completed_at": session.cloning_completed_at.isoformat() if session.cloning_completed_at else None,
"expires_at": session.expires_at.isoformat()
"expires_at": session.expires_at.isoformat(),
"estimated_remaining_seconds": estimated_remaining_seconds,
"demo_account_type": session.demo_account_type
}
import json as json_module
@@ -508,6 +518,14 @@ class DemoSessionManager:
await self._cache_session_status(session)
# Calculate estimated remaining time for database fallback
estimated_remaining_seconds = None
if session.cloning_started_at and not session.cloning_completed_at:
elapsed = (datetime.now(timezone.utc) - session.cloning_started_at).total_seconds()
# Professional: ~40s average, Enterprise: ~75s average
avg_duration = 75 if session.demo_account_type == 'enterprise' else 40
estimated_remaining_seconds = max(0, int(avg_duration - elapsed))
return {
"session_id": session.session_id,
"status": session.status.value,
@@ -515,7 +533,9 @@ class DemoSessionManager:
"total_records_cloned": session.total_records_cloned,
"cloning_started_at": session.cloning_started_at.isoformat() if session.cloning_started_at else None,
"cloning_completed_at": session.cloning_completed_at.isoformat() if session.cloning_completed_at else None,
"expires_at": session.expires_at.isoformat()
"expires_at": session.expires_at.isoformat(),
"estimated_remaining_seconds": estimated_remaining_seconds,
"demo_account_type": session.demo_account_type
}
async def retry_failed_cloning(

View File

@@ -39,8 +39,7 @@ from typing import List, Dict, Any
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
from shared.messaging import RabbitMQClient
from shared.schemas.alert_types import AlertTypeConstants
from shared.messaging import RabbitMQClient, AlertTypeConstants
import structlog
logger = structlog.get_logger()