fix demo session 1
This commit is contained in:
@@ -16,7 +16,7 @@ from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.core.database import DatabaseManager
|
||||
from app.core.redis_wrapper import DemoRedisWrapper
|
||||
from app.services.data_cloner import DemoDataCloner
|
||||
from app.services.cleanup_service import DemoCleanupService
|
||||
from app.models.demo_session import DemoSession, DemoSessionStatus
|
||||
|
||||
logger = structlog.get_logger()
|
||||
@@ -122,93 +122,63 @@ class CleanupWorker:
|
||||
"""Execute cleanup for list of sessions with parallelization"""
|
||||
async with get_db_session() as db:
|
||||
redis = DemoRedisWrapper()
|
||||
data_cloner = DemoDataCloner(db, redis)
|
||||
cleanup_service = DemoCleanupService(db, redis)
|
||||
|
||||
try:
|
||||
# Get sessions to cleanup
|
||||
result = await db.execute(
|
||||
select(DemoSession).where(
|
||||
DemoSession.session_id.in_(session_ids)
|
||||
)
|
||||
# Get sessions to cleanup
|
||||
result = await db.execute(
|
||||
select(DemoSession).where(
|
||||
DemoSession.session_id.in_(session_ids)
|
||||
)
|
||||
sessions = result.scalars().all()
|
||||
)
|
||||
sessions = result.scalars().all()
|
||||
|
||||
stats = {
|
||||
"cleaned_up": 0,
|
||||
"failed": 0,
|
||||
"errors": []
|
||||
}
|
||||
stats = {
|
||||
"cleaned_up": 0,
|
||||
"failed": 0,
|
||||
"errors": []
|
||||
}
|
||||
|
||||
# Process each session
|
||||
for session in sessions:
|
||||
try:
|
||||
# Mark session as expired
|
||||
session.status = DemoSessionStatus.EXPIRED
|
||||
await db.commit()
|
||||
# Process each session
|
||||
for session in sessions:
|
||||
try:
|
||||
# Mark session as expired
|
||||
session.status = DemoSessionStatus.EXPIRED
|
||||
await db.commit()
|
||||
|
||||
# Check if this is an enterprise demo with children
|
||||
child_tenant_ids = []
|
||||
if session.demo_account_type == "enterprise" and session.session_metadata:
|
||||
child_tenant_ids = session.session_metadata.get("child_tenant_ids", [])
|
||||
|
||||
# Delete child tenants in parallel (for enterprise demos)
|
||||
if child_tenant_ids:
|
||||
logger.info(
|
||||
"Cleaning up enterprise demo children",
|
||||
session_id=session.session_id,
|
||||
child_count=len(child_tenant_ids)
|
||||
)
|
||||
child_tasks = [
|
||||
data_cloner.delete_session_data(
|
||||
str(child_id),
|
||||
session.session_id
|
||||
)
|
||||
for child_id in child_tenant_ids
|
||||
]
|
||||
child_results = await asyncio.gather(*child_tasks, return_exceptions=True)
|
||||
|
||||
# Log any child deletion failures
|
||||
for child_id, result in zip(child_tenant_ids, child_results):
|
||||
if isinstance(result, Exception):
|
||||
logger.error(
|
||||
"Failed to delete child tenant",
|
||||
child_id=child_id,
|
||||
error=str(result)
|
||||
)
|
||||
|
||||
# Delete parent/main session data
|
||||
await data_cloner.delete_session_data(
|
||||
str(session.virtual_tenant_id),
|
||||
session.session_id
|
||||
)
|
||||
# Use cleanup service to delete all session data
|
||||
cleanup_result = await cleanup_service.cleanup_session(session)
|
||||
|
||||
if cleanup_result["success"]:
|
||||
stats["cleaned_up"] += 1
|
||||
|
||||
logger.info(
|
||||
"Session cleaned up",
|
||||
session_id=session.session_id,
|
||||
is_enterprise=(session.demo_account_type == "enterprise"),
|
||||
children_deleted=len(child_tenant_ids)
|
||||
total_deleted=cleanup_result["total_deleted"],
|
||||
duration_ms=cleanup_result["duration_ms"]
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
else:
|
||||
stats["failed"] += 1
|
||||
stats["errors"].append({
|
||||
"session_id": session.session_id,
|
||||
"error": str(e)
|
||||
"error": "Cleanup completed with errors",
|
||||
"details": cleanup_result["errors"]
|
||||
})
|
||||
logger.error(
|
||||
"Failed to cleanup session",
|
||||
session_id=session.session_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
return stats
|
||||
except Exception as e:
|
||||
stats["failed"] += 1
|
||||
stats["errors"].append({
|
||||
"session_id": session.session_id,
|
||||
"error": str(e)
|
||||
})
|
||||
logger.error(
|
||||
"Failed to cleanup session",
|
||||
session_id=session.session_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
finally:
|
||||
# Always close HTTP client
|
||||
await data_cloner.close()
|
||||
return stats
|
||||
|
||||
async def _mark_job_complete(self, job_id: str, stats: Dict[str, Any]):
|
||||
"""Mark job as complete in Redis"""
|
||||
|
||||
@@ -98,8 +98,37 @@ class DemoCleanupService:
|
||||
# Delete child tenants if enterprise
|
||||
if session.demo_account_type == "enterprise" and session.session_metadata:
|
||||
child_tenant_ids = session.session_metadata.get("child_tenant_ids", [])
|
||||
logger.info(
|
||||
"Deleting child tenant data",
|
||||
session_id=session_id,
|
||||
child_count=len(child_tenant_ids)
|
||||
)
|
||||
|
||||
for child_tenant_id in child_tenant_ids:
|
||||
await self._delete_from_all_services(child_tenant_id)
|
||||
child_results = await self._delete_from_all_services(str(child_tenant_id))
|
||||
|
||||
# Aggregate child deletion results
|
||||
for (service_name, _), child_result in zip(self.services, child_results):
|
||||
if isinstance(child_result, Exception):
|
||||
logger.warning(
|
||||
"Failed to delete child tenant data from service",
|
||||
service=service_name,
|
||||
child_tenant_id=child_tenant_id,
|
||||
error=str(child_result)
|
||||
)
|
||||
else:
|
||||
child_deleted = child_result.get("records_deleted", {}).get("total", 0)
|
||||
total_deleted += child_deleted
|
||||
|
||||
# Update details to track child deletions
|
||||
if service_name not in details:
|
||||
details[service_name] = {"child_deletions": []}
|
||||
if "child_deletions" not in details[service_name]:
|
||||
details[service_name]["child_deletions"] = []
|
||||
details[service_name]["child_deletions"].append({
|
||||
"child_tenant_id": str(child_tenant_id),
|
||||
"records_deleted": child_deleted
|
||||
})
|
||||
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user