Initial commit - production deployment

This commit is contained in:
2026-01-21 17:17:16 +01:00
commit c23d00dd92
2289 changed files with 638440 additions and 0 deletions

View File

@@ -0,0 +1,3 @@
"""Demo Session Service"""
__version__ = "1.0.0"

View File

@@ -0,0 +1,8 @@
"""Demo Session API"""
from .demo_sessions import router as demo_sessions_router
from .demo_accounts import router as demo_accounts_router
from .demo_operations import router as demo_operations_router
from .internal import router as internal_router
__all__ = ["demo_sessions_router", "demo_accounts_router", "demo_operations_router", "internal_router"]

View File

@@ -0,0 +1,48 @@
"""
Demo Accounts API - Public demo account information (ATOMIC READ)
"""
from fastapi import APIRouter
from typing import List
import structlog
from app.api.schemas import DemoAccountInfo
from app.core import settings
from shared.routing import RouteBuilder
router = APIRouter(tags=["demo-accounts"])
logger = structlog.get_logger()
route_builder = RouteBuilder('demo')
@router.get(
route_builder.build_base_route("accounts", include_tenant_prefix=False),
response_model=List[DemoAccountInfo]
)
async def get_demo_accounts():
"""Get public demo account information (ATOMIC READ)"""
accounts = []
for account_type, config in settings.DEMO_ACCOUNTS.items():
accounts.append({
"account_type": account_type,
"name": config["name"],
"email": config["email"],
"password": "DemoSanPablo2024!" if "sanpablo" in config["email"] else "DemoLaEspiga2024!",
"description": (
"Panadería individual que produce todo localmente"
if account_type == "professional"
else "Punto de venta con obrador central"
),
"features": (
["Gestión de Producción", "Recetas", "Inventario", "Ventas", "Previsión de Demanda"]
if account_type == "professional"
else ["Gestión de Proveedores", "Pedidos", "Inventario", "Ventas", "Previsión de Demanda"]
),
"business_model": (
"Producción Local" if account_type == "professional" else "Obrador Central + Punto de Venta"
)
})
return accounts

View File

@@ -0,0 +1,253 @@
"""
Demo Operations API - Business operations for demo session management
"""
from fastapi import APIRouter, Depends, HTTPException, Path
import structlog
import jwt
from datetime import datetime, timezone
from app.api.schemas import DemoSessionResponse, DemoSessionStats
from app.services import DemoSessionManager, DemoCleanupService
from app.core import get_db, get_redis, DemoRedisWrapper
from sqlalchemy.ext.asyncio import AsyncSession
from shared.routing import RouteBuilder
router = APIRouter(tags=["demo-operations"])
logger = structlog.get_logger()
route_builder = RouteBuilder('demo')
@router.post(
route_builder.build_resource_action_route("sessions", "session_id", "extend", include_tenant_prefix=False),
response_model=DemoSessionResponse
)
async def extend_demo_session(
session_id: str = Path(...),
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""Extend demo session expiration (BUSINESS OPERATION)"""
try:
session_manager = DemoSessionManager(db, redis)
session = await session_manager.extend_session(session_id)
session_token = jwt.encode(
{
"session_id": session.session_id,
"virtual_tenant_id": str(session.virtual_tenant_id),
"demo_account_type": session.demo_account_type,
"exp": session.expires_at.timestamp()
},
"demo-secret-key",
algorithm="HS256"
)
return {
"session_id": session.session_id,
"virtual_tenant_id": str(session.virtual_tenant_id),
"demo_account_type": session.demo_account_type,
"status": session.status.value,
"created_at": session.created_at,
"expires_at": session.expires_at,
"demo_config": session.session_metadata.get("demo_config", {}),
"session_token": session_token
}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error("Failed to extend session", error=str(e))
raise HTTPException(status_code=500, detail=str(e))
@router.get(
route_builder.build_base_route("stats", include_tenant_prefix=False),
response_model=DemoSessionStats
)
async def get_demo_stats(
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""Get demo session statistics (BUSINESS OPERATION)"""
session_manager = DemoSessionManager(db, redis)
stats = await session_manager.get_session_stats()
return stats
@router.post(
route_builder.build_operations_route("cleanup", include_tenant_prefix=False),
response_model=dict
)
async def run_cleanup(
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""
Trigger session cleanup via background worker (async via Redis queue)
Returns immediately after enqueuing work - does not block
"""
from datetime import timedelta
from sqlalchemy import select
from app.models.demo_session import DemoSession, DemoSessionStatus
import uuid
import json
logger.info("Starting demo session cleanup enqueue")
now = datetime.now(timezone.utc)
stuck_threshold = now - timedelta(minutes=5)
# Find expired sessions
result = await db.execute(
select(DemoSession).where(
DemoSession.status.in_([
DemoSessionStatus.PENDING,
DemoSessionStatus.READY,
DemoSessionStatus.PARTIAL,
DemoSessionStatus.FAILED,
DemoSessionStatus.ACTIVE
]),
DemoSession.expires_at < now
)
)
expired_sessions = result.scalars().all()
# Find stuck sessions
stuck_result = await db.execute(
select(DemoSession).where(
DemoSession.status == DemoSessionStatus.PENDING,
DemoSession.created_at < stuck_threshold
)
)
stuck_sessions = stuck_result.scalars().all()
all_sessions = list(expired_sessions) + list(stuck_sessions)
if not all_sessions:
return {
"status": "no_sessions",
"message": "No sessions to cleanup",
"total_expired": 0,
"total_stuck": 0
}
# Create cleanup job
job_id = str(uuid.uuid4())
session_ids = [s.session_id for s in all_sessions]
job_data = {
"job_id": job_id,
"session_ids": session_ids,
"created_at": now.isoformat(),
"retry_count": 0
}
# Enqueue job
client = await redis.get_client()
await client.lpush("cleanup:queue", json.dumps(job_data))
logger.info(
"Cleanup job enqueued",
job_id=job_id,
session_count=len(session_ids),
expired_count=len(expired_sessions),
stuck_count=len(stuck_sessions)
)
return {
"status": "enqueued",
"job_id": job_id,
"session_count": len(session_ids),
"total_expired": len(expired_sessions),
"total_stuck": len(stuck_sessions),
"message": f"Cleanup job enqueued for {len(session_ids)} sessions"
}
@router.get(
route_builder.build_operations_route("cleanup/{job_id}", include_tenant_prefix=False),
response_model=dict
)
async def get_cleanup_status(
job_id: str,
redis: DemoRedisWrapper = Depends(get_redis)
):
"""Get status of cleanup job"""
import json
client = await redis.get_client()
status_key = f"cleanup:job:{job_id}:status"
status_data = await client.get(status_key)
if not status_data:
return {
"status": "not_found",
"message": "Job not found or expired (jobs expire after 1 hour)"
}
return json.loads(status_data)
@router.post(
"/demo/sessions/{session_id}/seed-alerts",
response_model=dict
)
async def seed_demo_alerts(
session_id: str = Path(...),
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""Seed enriched demo alerts for a demo session (DEMO OPERATION)"""
try:
import subprocess
import os
# Get session to validate and get tenant_id
session_manager = DemoSessionManager(db, redis)
session = await session_manager.get_session(session_id)
if not session:
raise HTTPException(status_code=404, detail="Demo session not found")
# Set environment variables for seeding script
env = os.environ.copy()
env['DEMO_TENANT_ID'] = str(session.virtual_tenant_id)
# Determine script path based on environment
# In container: /app/scripts/seed_enriched_alert_demo.py
# In development: services/demo_session/scripts/seed_enriched_alert_demo.py
script_path = '/app/scripts/seed_enriched_alert_demo.py' if os.path.exists('/app/scripts') else 'services/demo_session/scripts/seed_enriched_alert_demo.py'
# Run the seeding script
result = subprocess.run(
['python3', script_path],
env=env,
capture_output=True,
text=True,
timeout=30
)
if result.returncode != 0:
logger.error("Alert seeding failed",
stdout=result.stdout,
stderr=result.stderr)
raise HTTPException(status_code=500, detail=f"Alert seeding failed: {result.stderr}")
logger.info("Demo alerts seeded successfully", session_id=session_id)
return {
"status": "success",
"session_id": session_id,
"tenant_id": str(session.virtual_tenant_id),
"alerts_seeded": 5,
"message": "Demo alerts published and will be enriched automatically"
}
except subprocess.TimeoutExpired:
raise HTTPException(status_code=504, detail="Alert seeding timeout")
except Exception as e:
logger.error("Failed to seed alerts", error=str(e), session_id=session_id)
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -0,0 +1,511 @@
"""
Demo Sessions API - Atomic CRUD operations on DemoSession model
"""
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
from typing import Optional
from uuid import UUID
from datetime import datetime, timezone
import structlog
import jwt
from app.api.schemas import DemoSessionCreate, DemoSessionResponse
from app.services import DemoSessionManager
from app.core import get_db
from app.core.redis_wrapper import get_redis, DemoRedisWrapper
from sqlalchemy.ext.asyncio import AsyncSession
from shared.routing import RouteBuilder
router = APIRouter(tags=["demo-sessions"])
logger = structlog.get_logger()
route_builder = RouteBuilder('demo')
async def _background_cloning_task(session_id: str, session_obj_id: UUID, base_tenant_id: str):
"""Background task for orchestrated cloning - creates its own DB session"""
from app.core.database import db_manager
from app.models import DemoSession, DemoSessionStatus
from sqlalchemy import select, update
from app.core.redis_wrapper import get_redis
logger.info(
"Starting background cloning task",
session_id=session_id,
session_obj_id=str(session_obj_id),
base_tenant_id=base_tenant_id
)
# Create new database session for background task
async with db_manager.session_factory() as db:
try:
# Get Redis client
redis = await get_redis()
# Fetch the session from the database
result = await db.execute(
select(DemoSession).where(DemoSession.id == session_obj_id)
)
session = result.scalar_one_or_none()
if not session:
logger.error("Session not found for cloning", session_id=session_id)
# Mark session as failed in Redis for frontend polling
try:
client = await redis.get_client()
status_key = f"session:{session_id}:status"
import json
status_data = {
"session_id": session_id,
"status": "failed",
"error": "Session not found in database",
"progress": {},
"total_records_cloned": 0
}
await client.setex(status_key, 7200, json.dumps(status_data))
except Exception as redis_error:
logger.error("Failed to update Redis status for missing session", error=str(redis_error))
return
logger.info(
"Found session for cloning",
session_id=session_id,
current_status=session.status.value,
demo_account_type=session.demo_account_type
)
# Create session manager with new DB session
session_manager = DemoSessionManager(db, redis)
await session_manager.trigger_orchestrated_cloning(session, base_tenant_id)
except Exception as e:
logger.error(
"Background cloning failed",
session_id=session_id,
error=str(e),
exc_info=True
)
# Attempt to update session status to failed if possible
try:
# Try to update the session directly in DB to mark it as failed
async with db_manager.session_factory() as update_db:
update_result = await update_db.execute(
update(DemoSession)
.where(DemoSession.id == session_obj_id)
.values(status=DemoSessionStatus.FAILED, cloning_completed_at=datetime.now(timezone.utc))
)
await update_db.commit()
logger.info("Successfully updated session status to FAILED in database")
except Exception as update_error:
logger.error(
"Failed to update session status to FAILED after background task error",
session_id=session_id,
error=str(update_error)
)
# Also update Redis status for frontend polling
try:
client = await redis.get_client()
status_key = f"session:{session_id}:status"
import json
status_data = {
"session_id": session_id,
"status": "failed",
"error": str(e),
"progress": {},
"total_records_cloned": 0,
"cloning_completed_at": datetime.now(timezone.utc).isoformat()
}
await client.setex(status_key, 7200, json.dumps(status_data))
logger.info("Successfully updated Redis status to FAILED")
except Exception as redis_error:
logger.error("Failed to update Redis status after background task error", error=str(redis_error))
def _handle_task_result(task, session_id: str):
"""Handle the result of the background cloning task"""
try:
# This will raise the exception if the task failed
task.result()
except Exception as e:
logger.error(
"Background cloning task failed with exception",
session_id=session_id,
error=str(e),
exc_info=True
)
# Try to update Redis status to reflect the failure
try:
from app.core.redis_wrapper import get_redis
import json
async def update_redis_status():
redis = await get_redis()
client = await redis.get_client()
status_key = f"session:{session_id}:status"
status_data = {
"session_id": session_id,
"status": "failed",
"error": f"Task exception: {str(e)}",
"progress": {},
"total_records_cloned": 0,
"cloning_completed_at": datetime.now(timezone.utc).isoformat()
}
await client.setex(status_key, 7200, json.dumps(status_data))
# Run the async function
import asyncio
asyncio.run(update_redis_status())
except Exception as redis_error:
logger.error(
"Failed to update Redis status in task result handler",
session_id=session_id,
error=str(redis_error)
)
@router.post(
route_builder.build_base_route("sessions", include_tenant_prefix=False),
response_model=DemoSessionResponse,
status_code=201
)
async def create_demo_session(
request: DemoSessionCreate,
http_request: Request,
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""Create a new isolated demo session (ATOMIC)"""
logger.info("Creating demo session", demo_account_type=request.demo_account_type)
try:
ip_address = request.ip_address or http_request.client.host
user_agent = request.user_agent or http_request.headers.get("user-agent", "")
session_manager = DemoSessionManager(db, redis)
session = await session_manager.create_session(
demo_account_type=request.demo_account_type,
subscription_tier=request.subscription_tier,
user_id=request.user_id,
ip_address=ip_address,
user_agent=user_agent
)
# Trigger async orchestrated cloning in background
import asyncio
from app.core.config import settings
from app.models import DemoSession
# Get base tenant ID from config
demo_config = settings.DEMO_ACCOUNTS.get(request.demo_account_type, {})
base_tenant_id = demo_config.get("base_tenant_id", str(session.base_demo_tenant_id))
# Start cloning in background task with session ID (not session object)
# Store task reference in case we need to track it
task = asyncio.create_task(
_background_cloning_task(session.session_id, session.id, base_tenant_id)
)
# Add error handling for the task to prevent silent failures
task.add_done_callback(lambda t: _handle_task_result(t, session.session_id))
# Get complete demo account data from config (includes user, tenant, subscription info)
subscription_tier = demo_config.get("subscription_tier", "professional")
user_data = demo_config.get("user", {})
tenant_data = demo_config.get("tenant", {})
# Generate session token with subscription data
session_token = jwt.encode(
{
"session_id": session.session_id,
"virtual_tenant_id": str(session.virtual_tenant_id),
"demo_account_type": request.demo_account_type,
"exp": session.expires_at.timestamp(),
"tenant_id": str(session.virtual_tenant_id),
"subscription": {
"tier": subscription_tier,
"status": "active",
"valid_until": session.expires_at.isoformat()
},
"is_demo": True
},
settings.JWT_SECRET_KEY,
algorithm=settings.JWT_ALGORITHM
)
# Build complete response like a real login would return
return {
"session_id": session.session_id,
"virtual_tenant_id": str(session.virtual_tenant_id),
"demo_account_type": session.demo_account_type,
"status": session.status.value,
"created_at": session.created_at,
"expires_at": session.expires_at,
"demo_config": session.session_metadata.get("demo_config", {}),
"session_token": session_token,
"subscription_tier": subscription_tier,
"is_enterprise": session.demo_account_type == "enterprise",
# Complete user data (like a real login response)
"user": {
"id": user_data.get("id"),
"email": user_data.get("email"),
"full_name": user_data.get("full_name"),
"role": user_data.get("role", "owner"),
"is_active": user_data.get("is_active", True),
"is_verified": user_data.get("is_verified", True),
"tenant_id": str(session.virtual_tenant_id),
"created_at": session.created_at.isoformat()
},
# Complete tenant data
"tenant": {
"id": str(session.virtual_tenant_id),
"name": demo_config.get("name"),
"subdomain": demo_config.get("subdomain"),
"subscription_tier": subscription_tier,
"tenant_type": demo_config.get("tenant_type", "standalone"),
"business_type": tenant_data.get("business_type"),
"business_model": tenant_data.get("business_model"),
"description": tenant_data.get("description"),
"is_active": True
}
}
except Exception as e:
logger.error("Failed to create demo session", error=str(e))
raise HTTPException(status_code=500, detail=f"Failed to create demo session: {str(e)}")
@router.get(
route_builder.build_resource_detail_route("sessions", "session_id", include_tenant_prefix=False),
response_model=dict
)
async def get_session_info(
session_id: str = Path(...),
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""Get demo session information (ATOMIC READ)"""
session_manager = DemoSessionManager(db, redis)
session = await session_manager.get_session(session_id)
if not session:
raise HTTPException(status_code=404, detail="Session not found")
return session.to_dict()
@router.get(
route_builder.build_resource_detail_route("sessions", "session_id", include_tenant_prefix=False) + "/status",
response_model=dict
)
async def get_session_status(
session_id: str = Path(...),
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""
Get demo session provisioning status
Returns current status of data cloning and readiness.
Use this endpoint for polling (recommended interval: 1-2 seconds).
"""
session_manager = DemoSessionManager(db, redis)
status = await session_manager.get_session_status(session_id)
if not status:
raise HTTPException(status_code=404, detail="Session not found")
return status
@router.get(
route_builder.build_resource_detail_route("sessions", "session_id", include_tenant_prefix=False) + "/errors",
response_model=dict
)
async def get_session_errors(
session_id: str = Path(...),
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""
Get detailed error information for a failed demo session
Returns comprehensive error details including:
- Failed services and their specific errors
- Network connectivity issues
- Timeout problems
- Service-specific error messages
"""
try:
# Try to get the session first
session_manager = DemoSessionManager(db, redis)
session = await session_manager.get_session(session_id)
if not session:
raise HTTPException(status_code=404, detail="Session not found")
# Check if session has failed status
if session.status != DemoSessionStatus.FAILED:
return {
"session_id": session_id,
"status": session.status.value,
"has_errors": False,
"message": "Session has not failed - no error details available"
}
# Get detailed error information from cloning progress
error_details = []
failed_services = []
if session.cloning_progress:
for service_name, service_data in session.cloning_progress.items():
if isinstance(service_data, dict) and service_data.get("status") == "failed":
failed_services.append(service_name)
error_details.append({
"service": service_name,
"error": service_data.get("error", "Unknown error"),
"response_status": service_data.get("response_status"),
"response_text": service_data.get("response_text", ""),
"duration_ms": service_data.get("duration_ms", 0)
})
# Check Redis for additional error information
client = await redis.get_client()
error_key = f"session:{session_id}:errors"
redis_errors = await client.get(error_key)
if redis_errors:
import json
try:
additional_errors = json.loads(redis_errors)
if isinstance(additional_errors, list):
error_details.extend(additional_errors)
elif isinstance(additional_errors, dict):
error_details.append(additional_errors)
except json.JSONDecodeError:
logger.warning("Failed to parse Redis error data", session_id=session_id)
# Create comprehensive error report
error_report = {
"session_id": session_id,
"status": session.status.value,
"has_errors": True,
"failed_services": failed_services,
"error_count": len(error_details),
"errors": error_details,
"cloning_started_at": session.cloning_started_at.isoformat() if session.cloning_started_at else None,
"cloning_completed_at": session.cloning_completed_at.isoformat() if session.cloning_completed_at else None,
"total_records_cloned": session.total_records_cloned,
"demo_account_type": session.demo_account_type
}
# Add troubleshooting suggestions
suggestions = []
if "tenant" in failed_services:
suggestions.append("Check if tenant service is running and accessible")
suggestions.append("Verify base tenant ID configuration")
if "auth" in failed_services:
suggestions.append("Check if auth service is running and accessible")
suggestions.append("Verify seed data files for auth service")
if any(svc in failed_services for svc in ["inventory", "recipes", "suppliers", "production"]):
suggestions.append("Check if the specific service is running and accessible")
suggestions.append("Verify seed data files exist and are valid")
if any("timeout" in error.get("error", "").lower() for error in error_details):
suggestions.append("Check service response times and consider increasing timeouts")
suggestions.append("Verify network connectivity between services")
if any("network" in error.get("error", "").lower() for error in error_details):
suggestions.append("Check network connectivity between demo-session and other services")
suggestions.append("Verify DNS resolution and service discovery")
if suggestions:
error_report["troubleshooting_suggestions"] = suggestions
return error_report
except Exception as e:
logger.error(
"Failed to retrieve session errors",
session_id=session_id,
error=str(e),
exc_info=True
)
raise HTTPException(
status_code=500,
detail=f"Failed to retrieve error details: {str(e)}"
)
@router.post(
route_builder.build_resource_detail_route("sessions", "session_id", include_tenant_prefix=False) + "/retry",
response_model=dict
)
async def retry_session_cloning(
session_id: str = Path(...),
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""
Retry failed cloning operations
Only available for sessions in "failed" or "partial" status.
"""
try:
session_manager = DemoSessionManager(db, redis)
result = await session_manager.retry_failed_cloning(session_id)
return {
"message": "Cloning retry initiated",
"session_id": session_id,
"result": result
}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error("Failed to retry cloning", error=str(e))
raise HTTPException(status_code=500, detail=str(e))
@router.delete(
route_builder.build_resource_detail_route("sessions", "session_id", include_tenant_prefix=False),
response_model=dict
)
async def destroy_demo_session(
session_id: str = Path(...),
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""Destroy demo session and cleanup resources (ATOMIC DELETE)"""
try:
session_manager = DemoSessionManager(db, redis)
await session_manager.destroy_session(session_id)
return {"message": "Session destroyed successfully", "session_id": session_id}
except Exception as e:
logger.error("Failed to destroy session", error=str(e))
raise HTTPException(status_code=500, detail=str(e))
@router.post(
route_builder.build_resource_detail_route("sessions", "session_id", include_tenant_prefix=False) + "/destroy",
response_model=dict
)
async def destroy_demo_session_post(
session_id: str = Path(...),
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""Destroy demo session via POST (for frontend compatibility)"""
try:
session_manager = DemoSessionManager(db, redis)
await session_manager.destroy_session(session_id)
return {"message": "Session destroyed successfully", "session_id": session_id}
except Exception as e:
logger.error("Failed to destroy session", error=str(e))
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -0,0 +1,81 @@
"""
Internal API for Demo Session Service
Handles internal service-to-service operations
"""
from fastapi import APIRouter, Depends, HTTPException, Header
from sqlalchemy.ext.asyncio import AsyncSession
import structlog
from app.core import get_db, settings
from app.core.redis_wrapper import get_redis, DemoRedisWrapper
from app.services.cleanup_service import DemoCleanupService
logger = structlog.get_logger()
router = APIRouter()
# ✅ Security: Internal API key system removed
# All authentication now handled via JWT service tokens at gateway level
@router.post("/internal/demo/cleanup")
async def cleanup_demo_session_internal(
cleanup_request: dict,
db: AsyncSession = Depends(get_db),
redis: DemoRedisWrapper = Depends(get_redis)
):
"""
Internal endpoint to cleanup demo session data for a specific tenant
Used by rollback mechanisms
"""
try:
tenant_id = cleanup_request.get('tenant_id')
session_id = cleanup_request.get('session_id')
if not all([tenant_id, session_id]):
raise HTTPException(
status_code=400,
detail="Missing required parameters: tenant_id, session_id"
)
logger.info(
"Internal cleanup requested",
tenant_id=tenant_id,
session_id=session_id
)
cleanup_service = DemoCleanupService(db, redis)
# Validate required fields
if not tenant_id or not session_id:
raise ValueError("tenant_id and session_id are required")
# Delete session data for this tenant
await cleanup_service._delete_tenant_data(
tenant_id=str(tenant_id),
session_id=str(session_id)
)
# Delete Redis data
await redis.delete_session_data(str(session_id))
logger.info(
"Internal cleanup completed",
tenant_id=tenant_id,
session_id=session_id
)
return {
"status": "completed",
"tenant_id": tenant_id,
"session_id": session_id
}
except Exception as e:
logger.error(
"Internal cleanup failed",
error=str(e),
tenant_id=cleanup_request.get('tenant_id'),
session_id=cleanup_request.get('session_id'),
exc_info=True
)
raise HTTPException(status_code=500, detail=f"Failed to cleanup demo session: {str(e)}")

View File

@@ -0,0 +1,107 @@
"""
API Schemas for Demo Session Service
"""
from pydantic import BaseModel, Field
from typing import Optional, Dict, Any
from datetime import datetime
class DemoSessionCreate(BaseModel):
"""Create demo session request"""
demo_account_type: str = Field(..., description="professional or enterprise")
subscription_tier: Optional[str] = Field(None, description="Force specific subscription tier (professional/enterprise)")
user_id: Optional[str] = Field(None, description="Optional authenticated user ID")
ip_address: Optional[str] = None
user_agent: Optional[str] = None
class DemoUser(BaseModel):
"""Demo user data returned in session response"""
id: str
email: str
full_name: str
role: str
is_active: bool
is_verified: bool
tenant_id: str
created_at: str
class DemoTenant(BaseModel):
"""Demo tenant data returned in session response"""
id: str
name: str
subdomain: str
subscription_tier: str
tenant_type: str
business_type: Optional[str] = None
business_model: Optional[str] = None
description: Optional[str] = None
is_active: bool
class DemoSessionResponse(BaseModel):
"""Demo session response - mirrors a real login response with user and tenant data"""
session_id: str
virtual_tenant_id: str
demo_account_type: str
status: str
created_at: datetime
expires_at: datetime
demo_config: Dict[str, Any]
session_token: str
subscription_tier: str
is_enterprise: bool
# Complete user and tenant data (like a real login response)
user: DemoUser
tenant: DemoTenant
class Config:
from_attributes = True
class DemoSessionExtend(BaseModel):
"""Extend session request"""
session_id: str
class DemoSessionDestroy(BaseModel):
"""Destroy session request"""
session_id: str
class DemoSessionStats(BaseModel):
"""Demo session statistics"""
total_sessions: int
active_sessions: int
expired_sessions: int
destroyed_sessions: int
avg_duration_minutes: float
total_requests: int
class DemoAccountInfo(BaseModel):
"""Public demo account information"""
account_type: str
name: str
email: str
password: str
description: str
features: list[str]
business_model: str
class CloneDataRequest(BaseModel):
"""Request to clone tenant data"""
base_tenant_id: str
virtual_tenant_id: str
session_id: str
class CloneDataResponse(BaseModel):
"""Response from data cloning"""
session_id: str
services_cloned: list[str]
total_records: int
redis_keys: int

View File

@@ -0,0 +1,7 @@
"""Demo Session Service Core"""
from .config import settings
from .database import DatabaseManager, get_db
from .redis_wrapper import DemoRedisWrapper, get_redis
__all__ = ["settings", "DatabaseManager", "get_db", "DemoRedisWrapper", "get_redis"]

View File

@@ -0,0 +1,132 @@
"""
Demo Session Service Configuration
"""
import os
from typing import Optional
from shared.config.base import BaseServiceSettings
class Settings(BaseServiceSettings):
"""Demo Session Service Settings"""
# Service info (override base settings)
APP_NAME: str = "Demo Session Service"
SERVICE_NAME: str = "demo-session"
VERSION: str = "1.0.0"
DESCRIPTION: str = "Demo session management and orchestration service"
# Database (override base property)
@property
def DATABASE_URL(self) -> str:
"""Build database URL from environment"""
return os.getenv(
"DEMO_SESSION_DATABASE_URL",
"postgresql+asyncpg://postgres:postgres@localhost:5432/demo_session_db"
)
# Redis configuration (demo-specific)
REDIS_KEY_PREFIX: str = "demo:session"
REDIS_SESSION_TTL: int = 1800 # 30 minutes
# Demo session configuration
DEMO_SESSION_DURATION_MINUTES: int = 30
DEMO_SESSION_MAX_EXTENSIONS: int = 3
DEMO_SESSION_CLEANUP_INTERVAL_MINUTES: int = 60
# Demo account credentials (public)
# Contains complete user, tenant, and subscription data matching fixture files
DEMO_ACCOUNTS: dict = {
"professional": {
"email": "demo.professional@panaderiaartesana.com",
"name": "Panadería Artesana Madrid - Demo",
"subdomain": "demo-artesana",
"base_tenant_id": "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6",
"subscription_tier": "professional",
"tenant_type": "standalone",
# User data from fixtures/professional/01-tenant.json
"user": {
"id": "c1a2b3c4-d5e6-47a8-b9c0-d1e2f3a4b5c6",
"email": "maria.garcia@panaderiaartesana.com",
"full_name": "María García López",
"role": "owner",
"is_active": True,
"is_verified": True
},
# Tenant data
"tenant": {
"business_type": "bakery",
"business_model": "production_retail",
"description": "Professional tier demo tenant for bakery operations"
}
},
"enterprise": {
"email": "central@panaderiaartesana.es",
"name": "Panadería Artesana España - Central",
"subdomain": "artesana-central",
"base_tenant_id": "80000000-0000-4000-a000-000000000001",
"subscription_tier": "enterprise",
"tenant_type": "parent",
# User data from fixtures/enterprise/parent/01-tenant.json
"user": {
"id": "d2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7",
"email": "director@panaderiaartesana.es",
"full_name": "Director",
"role": "owner",
"is_active": True,
"is_verified": True
},
# Tenant data
"tenant": {
"business_type": "bakery_chain",
"business_model": "multi_location",
"description": "Central production facility and parent tenant for multi-location bakery chain"
},
"children": [
{
"name": "Madrid - Salamanca",
"base_tenant_id": "A0000000-0000-4000-a000-000000000001",
"location": {"city": "Madrid", "zone": "Salamanca", "latitude": 40.4284, "longitude": -3.6847},
"description": "Premium location in upscale Salamanca district"
},
{
"name": "Barcelona - Eixample",
"base_tenant_id": "B0000000-0000-4000-a000-000000000001",
"location": {"city": "Barcelona", "zone": "Eixample", "latitude": 41.3947, "longitude": 2.1616},
"description": "High-volume tourist and local area in central Barcelona"
},
{
"name": "Valencia - Ruzafa",
"base_tenant_id": "C0000000-0000-4000-a000-000000000001",
"location": {"city": "Valencia", "zone": "Ruzafa", "latitude": 39.4623, "longitude": -0.3645},
"description": "Trendy artisan neighborhood with focus on quality"
},
{
"name": "Seville - Triana",
"base_tenant_id": "D0000000-0000-4000-a000-000000000001",
"location": {"city": "Seville", "zone": "Triana", "latitude": 37.3828, "longitude": -6.0026},
"description": "Traditional Andalusian location with local specialties"
},
{
"name": "Bilbao - Casco Viejo",
"base_tenant_id": "E0000000-0000-4000-a000-000000000001",
"location": {"city": "Bilbao", "zone": "Casco Viejo", "latitude": 43.2567, "longitude": -2.9272},
"description": "Basque region location with focus on quality and local culture"
}
]
}
}
# Service URLs - these are inherited from BaseServiceSettings
# but we can override defaults if needed:
# - GATEWAY_URL (inherited)
# - AUTH_SERVICE_URL, TENANT_SERVICE_URL, etc. (inherited)
# - JWT_SECRET_KEY, JWT_ALGORITHM (inherited)
# - LOG_LEVEL (inherited)
class Config:
env_file = ".env"
case_sensitive = True
settings = Settings()

View File

@@ -0,0 +1,61 @@
"""
Database connection management for Demo Session Service
"""
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
from sqlalchemy.pool import NullPool
import structlog
from .config import settings
logger = structlog.get_logger()
class DatabaseManager:
"""Database connection manager"""
def __init__(self, database_url: str = None):
self.database_url = database_url or settings.DATABASE_URL
self.engine = None
self.session_factory = None
def initialize(self):
"""Initialize database engine and session factory"""
self.engine = create_async_engine(
self.database_url,
echo=settings.DEBUG,
poolclass=NullPool,
pool_pre_ping=True
)
self.session_factory = async_sessionmaker(
self.engine,
class_=AsyncSession,
expire_on_commit=False,
autocommit=False,
autoflush=False
)
logger.info("Database manager initialized", database_url=self.database_url.split("@")[-1])
async def close(self):
"""Close database connections"""
if self.engine:
await self.engine.dispose()
logger.info("Database connections closed")
async def get_session(self) -> AsyncSession:
"""Get database session"""
if not self.session_factory:
self.initialize()
async with self.session_factory() as session:
yield session
db_manager = DatabaseManager()
async def get_db() -> AsyncSession:
"""Dependency for FastAPI"""
async for session in db_manager.get_session():
yield session

View File

@@ -0,0 +1,131 @@
"""
Redis wrapper for demo session service using shared Redis implementation
Provides a compatibility layer for session-specific operations
"""
import json
import structlog
from typing import Optional, Any
from shared.redis_utils import get_redis_client
logger = structlog.get_logger()
class DemoRedisWrapper:
"""Wrapper around shared Redis client for demo session operations"""
def __init__(self, key_prefix: str = "demo_session"):
self.key_prefix = key_prefix
async def get_client(self):
"""Get the underlying Redis client"""
return await get_redis_client()
def _make_key(self, *parts: str) -> str:
"""Create Redis key with prefix"""
return f"{self.key_prefix}:{':'.join(parts)}"
async def set_session_data(self, session_id: str, key: str, data: Any, ttl: int = None):
"""Store session data in Redis"""
client = await get_redis_client()
redis_key = self._make_key(session_id, key)
serialized = json.dumps(data) if not isinstance(data, str) else data
if ttl:
await client.setex(redis_key, ttl, serialized)
else:
await client.set(redis_key, serialized)
logger.debug("Session data stored", session_id=session_id, key=key)
async def get_session_data(self, session_id: str, key: str) -> Optional[Any]:
"""Retrieve session data from Redis"""
client = await get_redis_client()
redis_key = self._make_key(session_id, key)
data = await client.get(redis_key)
if data:
try:
return json.loads(data)
except json.JSONDecodeError:
return data
return None
async def delete_session_data(self, session_id: str, key: str = None):
"""Delete session data"""
client = await get_redis_client()
if key:
redis_key = self._make_key(session_id, key)
await client.delete(redis_key)
else:
pattern = self._make_key(session_id, "*")
keys = await client.keys(pattern)
if keys:
await client.delete(*keys)
logger.debug("Session data deleted", session_id=session_id, key=key)
async def extend_session_ttl(self, session_id: str, ttl: int):
"""Extend TTL for all session keys"""
client = await get_redis_client()
pattern = self._make_key(session_id, "*")
keys = await client.keys(pattern)
for key in keys:
await client.expire(key, ttl)
logger.debug("Session TTL extended", session_id=session_id, ttl=ttl)
async def set_hash(self, session_id: str, hash_key: str, field: str, value: Any):
"""Store hash field in Redis"""
client = await get_redis_client()
redis_key = self._make_key(session_id, hash_key)
serialized = json.dumps(value) if not isinstance(value, str) else value
await client.hset(redis_key, field, serialized)
async def get_hash(self, session_id: str, hash_key: str, field: str) -> Optional[Any]:
"""Get hash field from Redis"""
client = await get_redis_client()
redis_key = self._make_key(session_id, hash_key)
data = await client.hget(redis_key, field)
if data:
try:
return json.loads(data)
except json.JSONDecodeError:
return data
return None
async def get_all_hash(self, session_id: str, hash_key: str) -> dict:
"""Get all hash fields"""
client = await get_redis_client()
redis_key = self._make_key(session_id, hash_key)
data = await client.hgetall(redis_key)
result = {}
for field, value in data.items():
try:
result[field] = json.loads(value)
except json.JSONDecodeError:
result[field] = value
return result
async def get_client(self):
"""Get raw Redis client for direct operations"""
return await get_redis_client()
# Cached instance
_redis_wrapper = None
async def get_redis() -> DemoRedisWrapper:
"""Dependency for FastAPI - returns wrapper around shared Redis"""
global _redis_wrapper
if _redis_wrapper is None:
_redis_wrapper = DemoRedisWrapper()
return _redis_wrapper

View File

@@ -0,0 +1,7 @@
"""
Background Jobs Package
"""
from .cleanup_worker import CleanupWorker, run_cleanup_worker
__all__ = ["CleanupWorker", "run_cleanup_worker"]

View File

@@ -0,0 +1,244 @@
"""
Background Cleanup Worker
Processes demo session cleanup jobs from Redis queue
"""
import asyncio
import structlog
from datetime import datetime, timezone, timedelta
from typing import Dict, Any
import json
import uuid
from contextlib import asynccontextmanager
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.core.database import DatabaseManager
from app.core.redis_wrapper import DemoRedisWrapper
from app.services.cleanup_service import DemoCleanupService
from app.models.demo_session import DemoSession, DemoSessionStatus
logger = structlog.get_logger()
@asynccontextmanager
async def get_db_session():
"""Get database session context manager"""
db_manager = DatabaseManager()
db_manager.initialize()
async with db_manager.session_factory() as session:
try:
yield session
await session.commit()
except Exception:
await session.rollback()
raise
finally:
await session.close()
class CleanupWorker:
"""Background worker for processing cleanup jobs"""
def __init__(self, redis: DemoRedisWrapper):
self.redis = redis
self.queue_key = "cleanup:queue"
self.processing_key = "cleanup:processing"
self.running = False
async def start(self):
"""Start the worker (runs indefinitely)"""
self.running = True
logger.info("Cleanup worker started")
while self.running:
try:
await self._process_next_job()
except Exception as e:
logger.error("Worker error", error=str(e), exc_info=True)
await asyncio.sleep(5) # Back off on error
async def stop(self):
"""Stop the worker gracefully"""
self.running = False
logger.info("Cleanup worker stopped")
async def _process_next_job(self):
"""Process next job from queue"""
client = await self.redis.get_client()
# Blocking pop from queue (5 second timeout)
result = await client.brpoplpush(
self.queue_key,
self.processing_key,
timeout=5
)
if not result:
return # No job available
job_data = json.loads(result)
job_id = job_data["job_id"]
session_ids = job_data["session_ids"]
logger.info(
"Processing cleanup job",
job_id=job_id,
session_count=len(session_ids)
)
try:
# Process cleanup
stats = await self._cleanup_sessions(session_ids)
# Mark job as complete
await self._mark_job_complete(job_id, stats)
# Remove from processing queue
await client.lrem(self.processing_key, 1, result)
logger.info("Job completed", job_id=job_id, stats=stats)
except Exception as e:
logger.error("Job failed", job_id=job_id, error=str(e), exc_info=True)
# Check retry count
retry_count = job_data.get("retry_count", 0)
if retry_count < 3:
# Retry - put back in queue
job_data["retry_count"] = retry_count + 1
await client.lpush(self.queue_key, json.dumps(job_data))
logger.info("Job requeued for retry", job_id=job_id, retry_count=retry_count + 1)
else:
# Max retries reached - mark as failed
await self._mark_job_failed(job_id, str(e))
logger.error("Job failed after max retries", job_id=job_id)
# Remove from processing queue
await client.lrem(self.processing_key, 1, result)
async def _cleanup_sessions(self, session_ids: list) -> Dict[str, Any]:
"""Execute cleanup for list of sessions with parallelization"""
async with get_db_session() as db:
redis = DemoRedisWrapper()
cleanup_service = DemoCleanupService(db, redis)
# Get sessions to cleanup
result = await db.execute(
select(DemoSession).where(
DemoSession.session_id.in_(session_ids)
)
)
sessions = result.scalars().all()
stats = {
"cleaned_up": 0,
"failed": 0,
"errors": []
}
# Process each session
for session in sessions:
try:
# Mark session as expired
session.status = DemoSessionStatus.EXPIRED
await db.commit()
# Use cleanup service to delete all session data
cleanup_result = await cleanup_service.cleanup_session(session)
if cleanup_result["success"]:
stats["cleaned_up"] += 1
logger.info(
"Session cleaned up",
session_id=session.session_id,
is_enterprise=(session.demo_account_type == "enterprise"),
total_deleted=cleanup_result["total_deleted"],
duration_ms=cleanup_result["duration_ms"]
)
else:
stats["failed"] += 1
stats["errors"].append({
"session_id": session.session_id,
"error": "Cleanup completed with errors",
"details": cleanup_result["errors"]
})
except Exception as e:
stats["failed"] += 1
stats["errors"].append({
"session_id": session.session_id,
"error": str(e)
})
logger.error(
"Failed to cleanup session",
session_id=session.session_id,
error=str(e),
exc_info=True
)
return stats
async def _mark_job_complete(self, job_id: str, stats: Dict[str, Any]):
"""Mark job as complete in Redis"""
client = await self.redis.get_client()
status_key = f"cleanup:job:{job_id}:status"
await client.setex(
status_key,
3600, # Keep status for 1 hour
json.dumps({
"status": "completed",
"stats": stats,
"completed_at": datetime.now(timezone.utc).isoformat()
})
)
async def _mark_job_failed(self, job_id: str, error: str):
"""Mark job as failed in Redis"""
client = await self.redis.get_client()
status_key = f"cleanup:job:{job_id}:status"
await client.setex(
status_key,
3600,
json.dumps({
"status": "failed",
"error": error,
"failed_at": datetime.now(timezone.utc).isoformat()
})
)
async def run_cleanup_worker():
"""Entry point for worker process"""
# Initialize Redis client
import os
from shared.redis_utils import initialize_redis
from app.core.config import Settings
settings = Settings()
redis_url = settings.REDIS_URL # Use proper configuration with TLS and auth
try:
# Initialize Redis with connection pool settings
await initialize_redis(redis_url, db=settings.REDIS_DB, max_connections=settings.REDIS_MAX_CONNECTIONS)
logger.info("Redis initialized successfully", redis_url=redis_url.split('@')[-1], db=settings.REDIS_DB)
except Exception as e:
logger.error("Failed to initialize Redis", error=str(e), redis_url=redis_url.split('@')[-1])
raise
redis = DemoRedisWrapper()
worker = CleanupWorker(redis)
try:
await worker.start()
except KeyboardInterrupt:
logger.info("Received interrupt signal")
await worker.stop()
except Exception as e:
logger.error("Worker crashed", error=str(e), exc_info=True)
raise
if __name__ == "__main__":
asyncio.run(run_cleanup_worker())

View File

@@ -0,0 +1,82 @@
"""
Demo Session Service - Main Application
Manages isolated demo sessions with ephemeral data
"""
import structlog
from app.core import settings, DatabaseManager
from app.api import demo_sessions, demo_accounts, demo_operations, internal
from shared.redis_utils import initialize_redis, close_redis
from shared.service_base import StandardFastAPIService
# Initialize logger
logger = structlog.get_logger()
# Initialize database manager
db_manager = DatabaseManager()
class DemoSessionService(StandardFastAPIService):
"""Demo Session Service with standardized monitoring setup"""
async def on_startup(self, app):
"""Custom startup logic for Demo Session"""
# Initialize database
db_manager.initialize()
logger.info("Database initialized")
# Initialize Redis
await initialize_redis(
redis_url=settings.REDIS_URL,
db=0,
max_connections=50
)
logger.info("Redis initialized")
await super().on_startup(app)
async def on_shutdown(self, app):
"""Custom shutdown logic for Demo Session"""
await super().on_shutdown(app)
# Cleanup
await db_manager.close()
await close_redis()
logger.info("Database and Redis connections closed")
# Create service instance
service = DemoSessionService(
service_name="demo-session",
app_name="Demo Session Service",
description="Manages isolated demo sessions for prospect users",
version=settings.VERSION,
log_level=getattr(settings, 'LOG_LEVEL', 'INFO'),
cors_origins=["*"], # Configure appropriately for production
api_prefix="/api/v1",
enable_metrics=True,
enable_health_checks=True,
enable_tracing=True,
enable_cors=True
)
# Create FastAPI app
app = service.create_app(debug=settings.DEBUG)
# Add service-specific routers
app.include_router(demo_sessions.router)
app.include_router(demo_accounts.router)
app.include_router(demo_operations.router)
app.include_router(internal.router)
if __name__ == "__main__":
import uvicorn
uvicorn.run(
"app.main:app",
host="0.0.0.0",
port=8000,
reload=settings.DEBUG,
log_level=settings.LOG_LEVEL.lower()
)

View File

@@ -0,0 +1,12 @@
# Import AuditLog model for this service
from shared.security import create_audit_log_model
from shared.database.base import Base
# Create audit log model for this service
AuditLog = create_audit_log_model(Base)
"""Demo Session Service Models"""
from .demo_session import DemoSession, DemoSessionStatus, CloningStatus
__all__ = ["DemoSession", "DemoSessionStatus", "CloningStatus", "AuditLog"]

View File

@@ -0,0 +1,96 @@
"""
Demo Session Models
Tracks ephemeral demo sessions for prospect users
"""
from sqlalchemy import Column, String, Boolean, DateTime, Integer, Enum as SQLEnum
from sqlalchemy.dialects.postgresql import UUID, JSONB
from datetime import datetime, timezone
import uuid
import enum
from shared.database.base import Base
class DemoSessionStatus(enum.Enum):
"""Demo session status"""
PENDING = "pending" # Data cloning in progress
READY = "ready" # All data loaded, safe to use
FAILED = "failed" # One or more services failed completely
PARTIAL = "partial" # Some services failed, others succeeded
ACTIVE = "active" # User is actively using the session (deprecated, use READY)
EXPIRED = "expired" # Session TTL exceeded
DESTROYING = "destroying" # Session in the process of being destroyed
DESTROYED = "destroyed" # Session terminated
class CloningStatus(enum.Enum):
"""Individual service cloning status"""
NOT_STARTED = "not_started"
IN_PROGRESS = "in_progress"
COMPLETED = "completed"
FAILED = "failed"
class DemoSession(Base):
"""Demo Session tracking model"""
__tablename__ = "demo_sessions"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
session_id = Column(String(100), unique=True, nullable=False, index=True)
# Session ownership
user_id = Column(UUID(as_uuid=True), nullable=True)
ip_address = Column(String(45), nullable=True)
user_agent = Column(String(500), nullable=True)
# Demo tenant linking
base_demo_tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
virtual_tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
demo_account_type = Column(String(50), nullable=False) # 'professional', 'enterprise'
# Session lifecycle
status = Column(SQLEnum(DemoSessionStatus, values_callable=lambda obj: [e.value for e in obj]), default=DemoSessionStatus.PENDING, index=True)
created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc), index=True)
expires_at = Column(DateTime(timezone=True), nullable=False, index=True)
last_activity_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
destroyed_at = Column(DateTime(timezone=True), nullable=True)
# Cloning progress tracking
cloning_started_at = Column(DateTime(timezone=True), nullable=True)
cloning_completed_at = Column(DateTime(timezone=True), nullable=True)
total_records_cloned = Column(Integer, default=0)
# Per-service cloning status
cloning_progress = Column(JSONB, default=dict) # {service_name: {status, records, started_at, completed_at, error}}
# Session metrics
request_count = Column(Integer, default=0)
data_cloned = Column(Boolean, default=False) # Deprecated: use status instead
redis_populated = Column(Boolean, default=False) # Deprecated: use status instead
# Session metadata
session_metadata = Column(JSONB, default=dict)
# Error tracking
error_details = Column(JSONB, default=list) # List of error objects for failed sessions
def __repr__(self):
return f"<DemoSession(session_id={self.session_id}, status={self.status.value})>"
def to_dict(self):
"""Convert to dictionary"""
return {
"id": str(self.id),
"session_id": self.session_id,
"user_id": str(self.user_id) if self.user_id else None,
"virtual_tenant_id": str(self.virtual_tenant_id),
"base_demo_tenant_id": str(self.base_demo_tenant_id),
"demo_account_type": self.demo_account_type,
"status": self.status.value,
"created_at": self.created_at.isoformat() if self.created_at else None,
"expires_at": self.expires_at.isoformat() if self.expires_at else None,
"last_activity_at": self.last_activity_at.isoformat() if self.last_activity_at else None,
"request_count": self.request_count,
"metadata": self.session_metadata
}

View File

@@ -0,0 +1,7 @@
"""
Demo Session Repositories
"""
from .demo_session_repository import DemoSessionRepository
__all__ = ["DemoSessionRepository"]

View File

@@ -0,0 +1,204 @@
"""
Demo Session Repository
Data access layer for demo sessions
"""
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, update
from datetime import datetime, timezone
from typing import Optional, List, Dict, Any
from uuid import UUID
import structlog
from app.models import DemoSession, DemoSessionStatus
logger = structlog.get_logger()
class DemoSessionRepository:
"""Repository for DemoSession data access"""
def __init__(self, db: AsyncSession):
self.db = db
async def create(self, session_data: Dict[str, Any]) -> DemoSession:
"""
Create a new demo session
Args:
session_data: Dictionary with session attributes
Returns:
Created DemoSession instance
"""
session = DemoSession(**session_data)
self.db.add(session)
await self.db.commit()
await self.db.refresh(session)
return session
async def get_by_session_id(self, session_id: str) -> Optional[DemoSession]:
"""
Get session by session_id
Args:
session_id: Session ID string
Returns:
DemoSession or None if not found
"""
result = await self.db.execute(
select(DemoSession).where(DemoSession.session_id == session_id)
)
return result.scalar_one_or_none()
async def get_by_virtual_tenant_id(self, virtual_tenant_id: UUID) -> Optional[DemoSession]:
"""
Get session by virtual tenant ID
Args:
virtual_tenant_id: Virtual tenant UUID
Returns:
DemoSession or None if not found
"""
result = await self.db.execute(
select(DemoSession).where(DemoSession.virtual_tenant_id == virtual_tenant_id)
)
return result.scalar_one_or_none()
async def update(self, session: DemoSession) -> DemoSession:
"""
Update an existing session
Args:
session: DemoSession instance with updates
Returns:
Updated DemoSession instance
"""
await self.db.commit()
await self.db.refresh(session)
return session
async def update_fields(self, session_id: str, **fields) -> None:
"""
Update specific fields of a session
Args:
session_id: Session ID to update
**fields: Field names and values to update
"""
await self.db.execute(
update(DemoSession)
.where(DemoSession.session_id == session_id)
.values(**fields)
)
await self.db.commit()
async def update_activity(self, session_id: str) -> None:
"""
Update last activity timestamp and increment request count
Args:
session_id: Session ID to update
"""
await self.db.execute(
update(DemoSession)
.where(DemoSession.session_id == session_id)
.values(
last_activity_at=datetime.now(timezone.utc),
request_count=DemoSession.request_count + 1
)
)
await self.db.commit()
async def mark_data_cloned(self, session_id: str) -> None:
"""
Mark session as having data cloned
Args:
session_id: Session ID to update
"""
await self.update_fields(session_id, data_cloned=True)
async def mark_redis_populated(self, session_id: str) -> None:
"""
Mark session as having Redis data populated
Args:
session_id: Session ID to update
"""
await self.update_fields(session_id, redis_populated=True)
async def destroy(self, session_id: str) -> None:
"""
Mark session as destroyed
Args:
session_id: Session ID to destroy
"""
await self.update_fields(
session_id,
status=DemoSessionStatus.DESTROYED,
destroyed_at=datetime.now(timezone.utc)
)
async def get_active_sessions_count(self) -> int:
"""
Get count of active sessions
Returns:
Number of active sessions
"""
result = await self.db.execute(
select(DemoSession).where(DemoSession.status == DemoSessionStatus.ACTIVE)
)
return len(result.scalars().all())
async def get_all_sessions(self) -> List[DemoSession]:
"""
Get all demo sessions
Returns:
List of all DemoSession instances
"""
result = await self.db.execute(select(DemoSession))
return result.scalars().all()
async def get_sessions_by_status(self, status: DemoSessionStatus) -> List[DemoSession]:
"""
Get sessions by status
Args:
status: DemoSessionStatus to filter by
Returns:
List of DemoSession instances with the specified status
"""
result = await self.db.execute(
select(DemoSession).where(DemoSession.status == status)
)
return result.scalars().all()
async def get_session_stats(self) -> Dict[str, Any]:
"""
Get session statistics
Returns:
Dictionary with session statistics
"""
all_sessions = await self.get_all_sessions()
active_sessions = [s for s in all_sessions if s.status == DemoSessionStatus.ACTIVE]
return {
"total_sessions": len(all_sessions),
"active_sessions": len(active_sessions),
"expired_sessions": len([s for s in all_sessions if s.status == DemoSessionStatus.EXPIRED]),
"destroyed_sessions": len([s for s in all_sessions if s.status == DemoSessionStatus.DESTROYED]),
"avg_duration_minutes": sum(
(s.destroyed_at - s.created_at).total_seconds() / 60
for s in all_sessions if s.destroyed_at
) / max(len([s for s in all_sessions if s.destroyed_at]), 1),
"total_requests": sum(s.request_count for s in all_sessions)
}

View File

@@ -0,0 +1,9 @@
"""Demo Session Services"""
from .session_manager import DemoSessionManager
from .cleanup_service import DemoCleanupService
__all__ = [
"DemoSessionManager",
"DemoCleanupService",
]

View File

@@ -0,0 +1,461 @@
"""
Demo Cleanup Service
Handles automatic cleanup of expired sessions
"""
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select
from datetime import datetime, timezone, timedelta
import structlog
import httpx
import asyncio
import os
from app.models import DemoSession, DemoSessionStatus
from datetime import datetime, timezone, timedelta
from app.core.redis_wrapper import DemoRedisWrapper
from shared.auth.jwt_handler import JWTHandler
logger = structlog.get_logger()
class DemoCleanupService:
"""Handles cleanup of expired demo sessions"""
def __init__(self, db: AsyncSession, redis: DemoRedisWrapper):
self.db = db
self.redis = redis
from app.core.config import settings
# ✅ Security: JWT service tokens used for all internal communication
# No longer using internal API keys
# JWT handler for creating service tokens
self.jwt_handler = JWTHandler(settings.JWT_SECRET_KEY, settings.JWT_ALGORITHM)
# Service URLs for cleanup
self.services = [
("tenant", os.getenv("TENANT_SERVICE_URL", "http://tenant-service:8000")),
("auth", os.getenv("AUTH_SERVICE_URL", "http://auth-service:8000")),
("inventory", os.getenv("INVENTORY_SERVICE_URL", "http://inventory-service:8000")),
("recipes", os.getenv("RECIPES_SERVICE_URL", "http://recipes-service:8000")),
("suppliers", os.getenv("SUPPLIERS_SERVICE_URL", "http://suppliers-service:8000")),
("production", os.getenv("PRODUCTION_SERVICE_URL", "http://production-service:8000")),
("procurement", os.getenv("PROCUREMENT_SERVICE_URL", "http://procurement-service:8000")),
("sales", os.getenv("SALES_SERVICE_URL", "http://sales-service:8000")),
("orders", os.getenv("ORDERS_SERVICE_URL", "http://orders-service:8000")),
("forecasting", os.getenv("FORECASTING_SERVICE_URL", "http://forecasting-service:8000")),
("orchestrator", os.getenv("ORCHESTRATOR_SERVICE_URL", "http://orchestrator-service:8000")),
]
async def cleanup_session(self, session: DemoSession) -> dict:
"""
Delete all data for a demo session across all services.
Returns:
{
"success": bool,
"total_deleted": int,
"duration_ms": int,
"details": {service: {records_deleted, duration_ms}},
"errors": []
}
"""
start_time = datetime.now(timezone.utc)
virtual_tenant_id = str(session.virtual_tenant_id)
session_id = session.session_id
logger.info(
"Starting demo session cleanup",
session_id=session_id,
virtual_tenant_id=virtual_tenant_id,
demo_account_type=session.demo_account_type
)
# Delete from all services in parallel
tasks = [
self._delete_from_service(name, url, virtual_tenant_id)
for name, url in self.services
]
service_results = await asyncio.gather(*tasks, return_exceptions=True)
# Aggregate results
total_deleted = 0
details = {}
errors = []
for (service_name, _), result in zip(self.services, service_results):
if isinstance(result, Exception):
errors.append(f"{service_name}: {str(result)}")
details[service_name] = {"status": "error", "error": str(result)}
else:
total_deleted += result.get("records_deleted", {}).get("total", 0)
details[service_name] = result
# Delete from Redis
await self._delete_redis_cache(virtual_tenant_id)
# Delete child tenants if enterprise
if session.demo_account_type == "enterprise" and session.session_metadata:
child_tenant_ids = session.session_metadata.get("child_tenant_ids", [])
logger.info(
"Deleting child tenant data",
session_id=session_id,
child_count=len(child_tenant_ids)
)
for child_tenant_id in child_tenant_ids:
child_results = await self._delete_from_all_services(str(child_tenant_id))
# Aggregate child deletion results
for (service_name, _), child_result in zip(self.services, child_results):
if isinstance(child_result, Exception):
logger.warning(
"Failed to delete child tenant data from service",
service=service_name,
child_tenant_id=child_tenant_id,
error=str(child_result)
)
else:
child_deleted = child_result.get("records_deleted", {}).get("total", 0)
total_deleted += child_deleted
# Update details to track child deletions
if service_name not in details:
details[service_name] = {"child_deletions": []}
if "child_deletions" not in details[service_name]:
details[service_name]["child_deletions"] = []
details[service_name]["child_deletions"].append({
"child_tenant_id": str(child_tenant_id),
"records_deleted": child_deleted
})
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
success = len(errors) == 0
logger.info(
"Demo session cleanup completed",
session_id=session_id,
virtual_tenant_id=virtual_tenant_id,
success=success,
total_deleted=total_deleted,
duration_ms=duration_ms,
error_count=len(errors)
)
return {
"success": success,
"total_deleted": total_deleted,
"duration_ms": duration_ms,
"details": details,
"errors": errors
}
async def _delete_from_service(
self,
service_name: str,
service_url: str,
virtual_tenant_id: str
) -> dict:
"""Delete all data from a single service"""
try:
# Create JWT service token with tenant context
service_token = self.jwt_handler.create_service_token(
service_name="demo-session",
tenant_id=virtual_tenant_id
)
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.delete(
f"{service_url}/internal/demo/tenant/{virtual_tenant_id}",
headers={
"Authorization": f"Bearer {service_token}",
"X-Service": "demo-session-service"
}
)
if response.status_code == 200:
return response.json()
elif response.status_code == 404:
# Already deleted or never existed - idempotent
return {
"service": service_name,
"status": "not_found",
"records_deleted": {"total": 0}
}
else:
raise Exception(f"HTTP {response.status_code}: {response.text}")
except Exception as e:
logger.error(
"Failed to delete from service",
service=service_name,
virtual_tenant_id=virtual_tenant_id,
error=str(e)
)
raise
async def _delete_redis_cache(self, virtual_tenant_id: str):
"""Delete all Redis keys for a virtual tenant"""
try:
client = await self.redis.get_client()
pattern = f"*:{virtual_tenant_id}:*"
keys = await client.keys(pattern)
if keys:
await client.delete(*keys)
logger.debug("Deleted Redis cache", tenant_id=virtual_tenant_id, keys_deleted=len(keys))
except Exception as e:
logger.warning("Failed to delete Redis cache", error=str(e), tenant_id=virtual_tenant_id)
async def _delete_from_all_services(self, virtual_tenant_id: str):
"""Delete data from all services for a tenant"""
tasks = [
self._delete_from_service(name, url, virtual_tenant_id)
for name, url in self.services
]
return await asyncio.gather(*tasks, return_exceptions=True)
async def _delete_tenant_data(self, tenant_id: str, session_id: str) -> dict:
"""Delete demo data for a tenant across all services"""
logger.info("Deleting tenant data", tenant_id=tenant_id, session_id=session_id)
results = {}
async def delete_from_service(service_name: str, service_url: str):
try:
# Create JWT service token with tenant context
service_token = self.jwt_handler.create_service_token(
service_name="demo-session",
tenant_id=tenant_id
)
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.delete(
f"{service_url}/internal/demo/tenant/{tenant_id}",
headers={
"Authorization": f"Bearer {service_token}",
"X-Service": "demo-session-service"
}
)
if response.status_code == 200:
logger.debug(f"Deleted data from {service_name}", tenant_id=tenant_id)
return {"service": service_name, "status": "deleted"}
else:
logger.warning(
f"Failed to delete from {service_name}",
status_code=response.status_code,
tenant_id=tenant_id
)
return {"service": service_name, "status": "failed", "error": f"HTTP {response.status_code}"}
except Exception as e:
logger.warning(
f"Exception deleting from {service_name}",
error=str(e),
tenant_id=tenant_id
)
return {"service": service_name, "status": "failed", "error": str(e)}
# Delete from all services in parallel
tasks = [delete_from_service(name, url) for name, url in self.services]
service_results = await asyncio.gather(*tasks, return_exceptions=True)
for result in service_results:
if isinstance(result, Exception):
logger.error("Service deletion failed", error=str(result))
elif isinstance(result, dict):
results[result["service"]] = result
return results
async def cleanup_expired_sessions(self) -> dict:
"""
Find and cleanup all expired sessions
Also cleans up sessions stuck in PENDING for too long (>5 minutes)
Returns:
Cleanup statistics
"""
logger.info("Starting demo session cleanup")
start_time = datetime.now(timezone.utc)
now = datetime.now(timezone.utc)
stuck_threshold = now - timedelta(minutes=5) # Sessions pending > 5 min are stuck
# Find expired sessions (any status except EXPIRED and DESTROYED)
result = await self.db.execute(
select(DemoSession).where(
DemoSession.status.in_([
DemoSessionStatus.PENDING,
DemoSessionStatus.READY,
DemoSessionStatus.PARTIAL,
DemoSessionStatus.FAILED,
DemoSessionStatus.ACTIVE # Legacy status, kept for compatibility
]),
DemoSession.expires_at < now
)
)
expired_sessions = result.scalars().all()
# Also find sessions stuck in PENDING
stuck_result = await self.db.execute(
select(DemoSession).where(
DemoSession.status == DemoSessionStatus.PENDING,
DemoSession.created_at < stuck_threshold
)
)
stuck_sessions = stuck_result.scalars().all()
# Combine both lists
all_sessions_to_cleanup = list(expired_sessions) + list(stuck_sessions)
stats = {
"total_expired": len(expired_sessions),
"total_stuck": len(stuck_sessions),
"total_to_cleanup": len(all_sessions_to_cleanup),
"cleaned_up": 0,
"failed": 0,
"errors": []
}
for session in all_sessions_to_cleanup:
try:
# Mark as expired
session.status = DemoSessionStatus.EXPIRED
await self.db.commit()
# Check if this is an enterprise demo with children
is_enterprise = session.demo_account_type == "enterprise"
child_tenant_ids = []
if is_enterprise and session.session_metadata:
child_tenant_ids = session.session_metadata.get("child_tenant_ids", [])
# Delete child tenants first (for enterprise demos)
if child_tenant_ids:
logger.info(
"Cleaning up enterprise demo children",
session_id=session.session_id,
child_count=len(child_tenant_ids)
)
for child_id in child_tenant_ids:
try:
await self._delete_tenant_data(child_id, session.session_id)
except Exception as child_error:
logger.error(
"Failed to delete child tenant",
child_id=child_id,
error=str(child_error)
)
# Delete parent/main session data
await self._delete_tenant_data(
str(session.virtual_tenant_id),
session.session_id
)
# Delete Redis data
await self.redis.delete_session_data(session.session_id)
stats["cleaned_up"] += 1
logger.info(
"Session cleaned up",
session_id=session.session_id,
is_enterprise=is_enterprise,
children_deleted=len(child_tenant_ids),
age_minutes=(now - session.created_at).total_seconds() / 60
)
except Exception as e:
stats["failed"] += 1
stats["errors"].append({
"session_id": session.session_id,
"error": str(e)
})
logger.error(
"Failed to cleanup session",
session_id=session.session_id,
error=str(e)
)
logger.info("Demo session cleanup completed", stats=stats)
return stats
async def cleanup_old_destroyed_sessions(self, days: int = 7) -> int:
"""
Delete destroyed session records older than specified days
Args:
days: Number of days to keep destroyed sessions
Returns:
Number of deleted records
"""
cutoff_date = datetime.now(timezone.utc) - timedelta(days=days)
result = await self.db.execute(
select(DemoSession).where(
DemoSession.status == DemoSessionStatus.DESTROYED,
DemoSession.destroyed_at < cutoff_date
)
)
old_sessions = result.scalars().all()
for session in old_sessions:
await self.db.delete(session)
await self.db.commit()
logger.info(
"Old destroyed sessions deleted",
count=len(old_sessions),
older_than_days=days
)
return len(old_sessions)
async def get_cleanup_stats(self) -> dict:
"""Get cleanup statistics"""
result = await self.db.execute(select(DemoSession))
all_sessions = result.scalars().all()
now = datetime.now(timezone.utc)
# Count by status
pending_count = len([s for s in all_sessions if s.status == DemoSessionStatus.PENDING])
ready_count = len([s for s in all_sessions if s.status == DemoSessionStatus.READY])
partial_count = len([s for s in all_sessions if s.status == DemoSessionStatus.PARTIAL])
failed_count = len([s for s in all_sessions if s.status == DemoSessionStatus.FAILED])
active_count = len([s for s in all_sessions if s.status == DemoSessionStatus.ACTIVE])
expired_count = len([s for s in all_sessions if s.status == DemoSessionStatus.EXPIRED])
destroyed_count = len([s for s in all_sessions if s.status == DemoSessionStatus.DESTROYED])
# Find sessions that should be expired but aren't marked yet
should_be_expired = len([
s for s in all_sessions
if s.status in [
DemoSessionStatus.PENDING,
DemoSessionStatus.READY,
DemoSessionStatus.PARTIAL,
DemoSessionStatus.FAILED,
DemoSessionStatus.ACTIVE
] and s.expires_at < now
])
return {
"total_sessions": len(all_sessions),
"by_status": {
"pending": pending_count,
"ready": ready_count,
"partial": partial_count,
"failed": failed_count,
"active": active_count, # Legacy
"expired": expired_count,
"destroyed": destroyed_count
},
"pending_cleanup": should_be_expired
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,533 @@
"""
Demo Session Manager
Handles creation, extension, and destruction of demo sessions
"""
from sqlalchemy.ext.asyncio import AsyncSession
from datetime import datetime, timedelta, timezone
from typing import Optional, Dict, Any
import uuid
import secrets
import structlog
from sqlalchemy import select
from app.models import DemoSession, DemoSessionStatus, CloningStatus
from app.core.redis_wrapper import DemoRedisWrapper
from app.core import settings
from app.services.clone_orchestrator import CloneOrchestrator
from app.services.cleanup_service import DemoCleanupService
from app.repositories.demo_session_repository import DemoSessionRepository
logger = structlog.get_logger()
class DemoSessionManager:
"""Manages demo session lifecycle"""
def __init__(self, db: AsyncSession, redis: DemoRedisWrapper):
self.db = db
self.redis = redis
self.repository = DemoSessionRepository(db)
self.orchestrator = CloneOrchestrator(redis_manager=redis) # Pass Redis for real-time progress updates
async def create_session(
self,
demo_account_type: str,
subscription_tier: Optional[str] = None,
user_id: Optional[str] = None,
ip_address: Optional[str] = None,
user_agent: Optional[str] = None
) -> DemoSession:
"""
Create a new demo session
Args:
demo_account_type: 'professional' or 'enterprise'
subscription_tier: Force specific subscription tier (professional/enterprise)
user_id: Optional user ID if authenticated
ip_address: Client IP address
user_agent: Client user agent
Returns:
Created demo session
"""
logger.info("Creating demo session",
demo_account_type=demo_account_type,
subscription_tier=subscription_tier)
# Generate unique session ID
session_id = f"demo_{secrets.token_urlsafe(16)}"
# Generate virtual tenant ID
virtual_tenant_id = uuid.uuid4()
# Get base demo tenant ID from config
demo_config = settings.DEMO_ACCOUNTS.get(demo_account_type)
if not demo_config:
raise ValueError(f"Invalid demo account type: {demo_account_type}")
# Override subscription tier if specified
effective_subscription_tier = subscription_tier or demo_config.get("subscription_tier")
# Get base tenant ID for cloning
base_tenant_id_str = demo_config.get("base_tenant_id")
if not base_tenant_id_str:
raise ValueError(f"Base tenant ID not configured for demo account type: {demo_account_type}")
base_tenant_id = uuid.UUID(base_tenant_id_str)
# Handle enterprise chain setup
child_tenant_ids = []
if demo_account_type == 'enterprise':
# Generate child tenant IDs for enterprise demos
child_configs = demo_config.get('children', [])
child_tenant_ids = [uuid.uuid4() for _ in child_configs]
# Create session record using repository
session_data = {
"session_id": session_id,
"user_id": uuid.UUID(user_id) if user_id else None,
"ip_address": ip_address,
"user_agent": user_agent,
"base_demo_tenant_id": base_tenant_id,
"virtual_tenant_id": virtual_tenant_id,
"demo_account_type": demo_account_type,
"status": DemoSessionStatus.PENDING, # Start as pending until cloning completes
"created_at": datetime.now(timezone.utc),
"expires_at": datetime.now(timezone.utc) + timedelta(
minutes=settings.DEMO_SESSION_DURATION_MINUTES
),
"last_activity_at": datetime.now(timezone.utc),
"data_cloned": False,
"redis_populated": False,
"session_metadata": {
"demo_config": demo_config,
"subscription_tier": effective_subscription_tier,
"extension_count": 0,
"is_enterprise": demo_account_type == 'enterprise',
"child_tenant_ids": [str(tid) for tid in child_tenant_ids] if child_tenant_ids else [],
"child_configs": demo_config.get('children', []) if demo_account_type == 'enterprise' else []
}
}
session = await self.repository.create(session_data)
# Store session metadata in Redis
await self._store_session_metadata(session)
logger.info(
"Demo session created",
session_id=session_id,
virtual_tenant_id=str(virtual_tenant_id),
demo_account_type=demo_account_type,
is_enterprise=demo_account_type == 'enterprise',
child_tenant_count=len(child_tenant_ids),
expires_at=session.expires_at.isoformat()
)
return session
async def get_session(self, session_id: str) -> Optional[DemoSession]:
"""Get session by session_id"""
return await self.repository.get_by_session_id(session_id)
async def get_session_by_virtual_tenant(self, virtual_tenant_id: str) -> Optional[DemoSession]:
"""Get session by virtual tenant ID"""
return await self.repository.get_by_virtual_tenant_id(uuid.UUID(virtual_tenant_id))
async def extend_session(self, session_id: str) -> DemoSession:
"""
Extend session expiration time
Args:
session_id: Session ID to extend
Returns:
Updated session
Raises:
ValueError: If session cannot be extended
"""
session = await self.get_session(session_id)
if not session:
raise ValueError(f"Session not found: {session_id}")
if session.status != DemoSessionStatus.ACTIVE:
raise ValueError(f"Cannot extend {session.status.value} session")
# Check extension limit
extension_count = session.session_metadata.get("extension_count", 0)
if extension_count >= settings.DEMO_SESSION_MAX_EXTENSIONS:
raise ValueError(f"Maximum extensions ({settings.DEMO_SESSION_MAX_EXTENSIONS}) reached")
# Extend expiration
new_expires_at = datetime.now(timezone.utc) + timedelta(
minutes=settings.DEMO_SESSION_DURATION_MINUTES
)
session.expires_at = new_expires_at
session.last_activity_at = datetime.now(timezone.utc)
session.session_metadata["extension_count"] = extension_count + 1
session = await self.repository.update(session)
# Extend Redis TTL
await self.redis.extend_session_ttl(
session_id,
settings.REDIS_SESSION_TTL
)
logger.info(
"Session extended",
session_id=session_id,
new_expires_at=new_expires_at.isoformat(),
extension_count=extension_count + 1
)
return session
async def update_activity(self, session_id: str):
"""Update last activity timestamp"""
await self.repository.update_activity(session_id)
async def mark_data_cloned(self, session_id: str):
"""Mark session as having data cloned"""
await self.repository.mark_data_cloned(session_id)
async def mark_redis_populated(self, session_id: str):
"""Mark session as having Redis data populated"""
await self.repository.mark_redis_populated(session_id)
async def destroy_session(self, session_id: str):
"""
Destroy a demo session and cleanup resources
This triggers parallel deletion across all services.
"""
session = await self.get_session(session_id)
if not session:
logger.warning("Session not found for destruction", session_id=session_id)
return
try:
# Update status to DESTROYING
await self.repository.update_fields(
session_id,
status=DemoSessionStatus.DESTROYING
)
# Trigger cleanup across all services
cleanup_service = DemoCleanupService(self.db, self.redis)
result = await cleanup_service.cleanup_session(session)
if result["success"]:
# Update status to DESTROYED
await self.repository.update_fields(
session_id,
status=DemoSessionStatus.DESTROYED,
destroyed_at=datetime.now(timezone.utc)
)
else:
# Update status to FAILED with error details
await self.repository.update_fields(
session_id,
status=DemoSessionStatus.FAILED,
error_details=result["errors"]
)
# Delete Redis data
await self.redis.delete_session_data(session_id)
logger.info(
"Session destroyed",
session_id=session_id,
virtual_tenant_id=str(session.virtual_tenant_id),
total_records_deleted=result.get("total_deleted", 0),
duration_ms=result.get("duration_ms", 0)
)
except Exception as e:
logger.error(
"Failed to destroy session",
session_id=session_id,
error=str(e),
exc_info=True
)
# Update status to FAILED with error details
await self.repository.update_fields(
session_id,
status=DemoSessionStatus.FAILED,
error_details=[f"Cleanup failed: {str(e)}"]
)
raise
async def _check_database_disk_space(self):
"""Check if database has sufficient disk space for demo operations"""
try:
# Execute a simple query to check database health and disk space
# This is a basic check - in production you might want more comprehensive monitoring
from sqlalchemy import text
# Check if we can execute a simple query (indicates basic database health)
result = await self.db.execute(text("SELECT 1"))
# Get the scalar result properly
scalar_result = result.scalar_one_or_none()
# For more comprehensive checking, you could add:
# 1. Check table sizes
# 2. Check available disk space via system queries (if permissions allow)
# 3. Check for long-running transactions that might block operations
logger.debug("Database health check passed", result=scalar_result)
except Exception as e:
logger.error("Database health check failed", error=str(e), exc_info=True)
raise RuntimeError(f"Database health check failed: {str(e)}")
async def _store_session_metadata(self, session: DemoSession):
"""Store session metadata in Redis"""
await self.redis.set_session_data(
session.session_id,
"metadata",
{
"session_id": session.session_id,
"virtual_tenant_id": str(session.virtual_tenant_id),
"demo_account_type": session.demo_account_type,
"expires_at": session.expires_at.isoformat(),
"created_at": session.created_at.isoformat()
},
ttl=settings.REDIS_SESSION_TTL
)
async def get_active_sessions_count(self) -> int:
"""Get count of active sessions"""
return await self.repository.get_active_sessions_count()
async def get_session_stats(self) -> Dict[str, Any]:
"""Get session statistics"""
return await self.repository.get_session_stats()
async def trigger_orchestrated_cloning(
self,
session: DemoSession,
base_tenant_id: str
) -> Dict[str, Any]:
"""
Trigger orchestrated cloning across all services
Args:
session: Demo session
base_tenant_id: Template tenant ID to clone from
Returns:
Orchestration result
"""
logger.info(
"Triggering orchestrated cloning",
session_id=session.session_id,
virtual_tenant_id=str(session.virtual_tenant_id)
)
# Check database disk space before starting cloning
try:
await self._check_database_disk_space()
except Exception as e:
logger.error(
"Database disk space check failed",
session_id=session.session_id,
error=str(e)
)
# Mark session as failed due to infrastructure issue
session.status = DemoSessionStatus.FAILED
session.cloning_completed_at = datetime.now(timezone.utc)
session.total_records_cloned = 0
session.cloning_progress = {
"error": "Database disk space issue detected",
"details": str(e)
}
await self.repository.update(session)
await self._cache_session_status(session)
return {
"overall_status": "failed",
"services": {},
"total_records": 0,
"failed_services": ["database"],
"error": "Database disk space issue"
}
# Mark cloning as started and update both database and Redis cache
session.cloning_started_at = datetime.now(timezone.utc)
await self.repository.update(session)
# Update Redis cache to reflect that cloning has started
await self._cache_session_status(session)
# Run orchestration
result = await self.orchestrator.clone_all_services(
base_tenant_id=base_tenant_id,
virtual_tenant_id=str(session.virtual_tenant_id),
demo_account_type=session.demo_account_type,
session_id=session.session_id,
session_metadata=session.session_metadata
)
# Update session with results
await self._update_session_from_clone_result(session, result)
return result
async def _update_session_from_clone_result(
self,
session: DemoSession,
clone_result: Dict[str, Any]
):
"""Update session with cloning results"""
# Map overall status to session status
overall_status = clone_result.get("overall_status")
if overall_status in ["ready", "completed"]:
session.status = DemoSessionStatus.READY
elif overall_status == "failed":
session.status = DemoSessionStatus.FAILED
elif overall_status == "partial":
session.status = DemoSessionStatus.PARTIAL
# Update cloning metadata
session.cloning_completed_at = datetime.now(timezone.utc)
# The clone result might use 'total_records' or 'total_records_cloned'
session.total_records_cloned = clone_result.get("total_records_cloned",
clone_result.get("total_records", 0))
session.cloning_progress = clone_result.get("services", {})
# Mark legacy flags for backward compatibility
if overall_status in ["ready", "completed", "partial"]:
session.data_cloned = True
session.redis_populated = True
await self.repository.update(session)
# Cache status in Redis for fast polling
await self._cache_session_status(session)
logger.info(
"Session updated with clone results",
session_id=session.session_id,
status=session.status.value,
total_records=session.total_records_cloned
)
async def _cache_session_status(self, session: DemoSession):
"""Cache session status in Redis for fast status checks"""
status_key = f"session:{session.session_id}:status"
# Calculate estimated remaining time based on demo tier
estimated_remaining_seconds = None
if session.cloning_started_at and not session.cloning_completed_at:
elapsed = (datetime.now(timezone.utc) - session.cloning_started_at).total_seconds()
avg_duration = 5
estimated_remaining_seconds = max(0, int(avg_duration - elapsed))
status_data = {
"session_id": session.session_id,
"status": session.status.value,
"progress": session.cloning_progress,
"total_records_cloned": session.total_records_cloned,
"cloning_started_at": session.cloning_started_at.isoformat() if session.cloning_started_at else None,
"cloning_completed_at": session.cloning_completed_at.isoformat() if session.cloning_completed_at else None,
"expires_at": session.expires_at.isoformat(),
"estimated_remaining_seconds": estimated_remaining_seconds,
"demo_account_type": session.demo_account_type
}
import json as json_module
client = await self.redis.get_client()
await client.setex(
status_key,
7200, # Cache for 2 hours
json_module.dumps(status_data) # Convert to JSON string
)
async def get_session_status(self, session_id: str) -> Dict[str, Any]:
"""
Get current session status with cloning progress
Args:
session_id: Session ID
Returns:
Status information including per-service progress
"""
# Try Redis cache first
status_key = f"session:{session_id}:status"
client = await self.redis.get_client()
cached = await client.get(status_key)
if cached:
import json
return json.loads(cached)
# Fall back to database
session = await self.get_session(session_id)
if not session:
return None
await self._cache_session_status(session)
# Calculate estimated remaining time for database fallback
estimated_remaining_seconds = None
if session.cloning_started_at and not session.cloning_completed_at:
elapsed = (datetime.now(timezone.utc) - session.cloning_started_at).total_seconds()
avg_duration = 5
estimated_remaining_seconds = max(0, int(avg_duration - elapsed))
return {
"session_id": session.session_id,
"status": session.status.value,
"progress": session.cloning_progress,
"total_records_cloned": session.total_records_cloned,
"cloning_started_at": session.cloning_started_at.isoformat() if session.cloning_started_at else None,
"cloning_completed_at": session.cloning_completed_at.isoformat() if session.cloning_completed_at else None,
"expires_at": session.expires_at.isoformat(),
"estimated_remaining_seconds": estimated_remaining_seconds,
"demo_account_type": session.demo_account_type
}
async def retry_failed_cloning(
self,
session_id: str,
services: Optional[list] = None
) -> Dict[str, Any]:
"""
Retry failed cloning operations
Args:
session_id: Session ID
services: Specific services to retry (defaults to all failed)
Returns:
Retry result
"""
session = await self.get_session(session_id)
if not session:
raise ValueError(f"Session not found: {session_id}")
if session.status not in [DemoSessionStatus.FAILED, DemoSessionStatus.PARTIAL]:
raise ValueError(f"Cannot retry session in {session.status.value} state")
logger.info(
"Retrying failed cloning",
session_id=session_id,
services=services
)
# Get base tenant ID from config
demo_config = settings.DEMO_ACCOUNTS.get(session.demo_account_type)
base_tenant_id = demo_config.get("base_tenant_id", str(session.base_demo_tenant_id))
# Trigger new cloning attempt
result = await self.trigger_orchestrated_cloning(session, base_tenant_id)
return result