Add user role

This commit is contained in:
Urtzi Alfaro
2025-08-02 09:41:50 +02:00
parent d4687e6375
commit 277e8bec73
13 changed files with 1051 additions and 28 deletions

View File

@@ -32,7 +32,7 @@ async def register(
# ✅ DEBUG: Log incoming registration data (without password)
logger.info(f"Registration attempt for email: {user_data.email}")
logger.debug(f"Registration data - email: {user_data.email}, full_name: {user_data.full_name}")
logger.debug(f"Registration data - email: {user_data.email}, full_name: {user_data.full_name}, role: {user_data.role}")
try:
# ✅ DEBUG: Validate input data

View File

@@ -21,8 +21,7 @@ from app.services.admin_delete import AdminUserDeleteService
# Import unified authentication from shared library
from shared.auth.decorators import (
get_current_user_dep,
get_current_tenant_id_dep,
require_role # For admin-only endpoints
require_admin_role
)
logger = structlog.get_logger()
@@ -126,7 +125,7 @@ async def delete_admin_user(
user_id: str,
background_tasks: BackgroundTasks,
current_user = Depends(get_current_user_dep),
#_admin_check = Depends(require_admin_role),
_admin_check = Depends(require_admin_role),
db: AsyncSession = Depends(get_db)
):
"""
@@ -191,7 +190,7 @@ async def delete_admin_user(
async def preview_user_deletion(
user_id: str,
current_user = Depends(get_current_user_dep),
#_admin_check = Depends(require_admin_role),
_admin_check = Depends(require_admin_role),
db: AsyncSession = Depends(get_db)
):
"""

View File

@@ -31,6 +31,7 @@ class User(Base):
phone = Column(String(20))
language = Column(String(10), default="es")
timezone = Column(String(50), default="Europe/Madrid")
role = Column(String(20), default="user")
# REMOVED: All tenant relationships - these are handled by tenant service
# No tenant_memberships, tenants relationships

View File

@@ -18,6 +18,7 @@ class UserRegistration(BaseModel):
password: str = Field(..., min_length=8, max_length=128)
full_name: str = Field(..., min_length=1, max_length=255)
tenant_name: Optional[str] = Field(None, max_length=255)
role: Optional[str] = Field("user", pattern=r'^(user|admin|manager)$')
class UserLogin(BaseModel):
"""User login request"""

View File

@@ -48,7 +48,8 @@ class AuthService:
is_active=True,
is_verified=False,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc)
updated_at=datetime.now(timezone.utc),
role=user_data.role
)
db.add(new_user)
@@ -115,7 +116,8 @@ class AuthService:
"full_name": new_user.full_name,
"is_active": new_user.is_active,
"is_verified": new_user.is_verified,
"created_at": new_user.created_at.isoformat()
"created_at": new_user.created_at.isoformat(),
"role": new_user.role
}
}
@@ -242,7 +244,8 @@ class AuthService:
"full_name": user.full_name,
"is_active": user.is_active,
"is_verified": user.is_verified,
"created_at": user.created_at.isoformat()
"created_at": user.created_at.isoformat(),
"role": user.role
}
}

View File

@@ -9,12 +9,14 @@ import structlog
from fastapi import APIRouter, Depends, HTTPException, status, Query, Path
from sqlalchemy.ext.asyncio import AsyncSession
from typing import List, Optional
from datetime import date
from datetime import date, datetime
from sqlalchemy import select, delete, func
import uuid
from app.core.database import get_db
from shared.auth.decorators import (
get_current_user_dep,
get_current_tenant_id_dep
require_admin_role
)
from app.services.forecasting_service import ForecastingService
from app.schemas.forecasts import (
@@ -22,6 +24,7 @@ from app.schemas.forecasts import (
BatchForecastResponse, AlertResponse
)
from app.models.forecasts import Forecast, PredictionBatch, ForecastAlert
from app.services.messaging import publish_forecasts_deleted_event
logger = structlog.get_logger()
router = APIRouter()
@@ -318,4 +321,197 @@ async def acknowledge_alert(
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Internal server error"
)
)
@router.delete("/forecasts/tenant/{tenant_id}")
async def delete_tenant_forecasts_complete(
tenant_id: str,
current_user = Depends(get_current_user_dep),
_admin_check = Depends(require_admin_role),
db: AsyncSession = Depends(get_db)
):
"""
Delete all forecasts and predictions for a tenant.
**WARNING: This operation is irreversible!**
This endpoint:
1. Cancels any active prediction batches
2. Clears prediction cache
3. Deletes all forecast records
4. Deletes prediction batch records
5. Deletes model performance metrics
6. Publishes deletion event
Used by admin user deletion process to clean up all forecasting data.
"""
try:
tenant_uuid = uuid.UUID(tenant_id)
except ValueError:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid tenant ID format"
)
try:
from app.models.forecasts import Forecast, PredictionBatch
from app.models.predictions import ModelPerformanceMetric, PredictionCache
deletion_stats = {
"tenant_id": tenant_id,
"deleted_at": datetime.utcnow().isoformat(),
"batches_cancelled": 0,
"forecasts_deleted": 0,
"prediction_batches_deleted": 0,
"performance_metrics_deleted": 0,
"cache_entries_deleted": 0,
"errors": []
}
# Step 1: Cancel active prediction batches
try:
active_batches_query = select(PredictionBatch).where(
PredictionBatch.tenant_id == tenant_uuid,
PredictionBatch.status.in_(["pending", "processing"])
)
active_batches_result = await db.execute(active_batches_query)
active_batches = active_batches_result.scalars().all()
for batch in active_batches:
batch.status = "cancelled"
batch.completed_at = datetime.utcnow()
deletion_stats["batches_cancelled"] += 1
if active_batches:
await db.commit()
logger.info("Cancelled active prediction batches",
tenant_id=tenant_id,
count=len(active_batches))
except Exception as e:
error_msg = f"Error cancelling prediction batches: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
# Step 2: Delete prediction cache
try:
cache_count_query = select(func.count(PredictionCache.id)).where(
PredictionCache.tenant_id == tenant_uuid
)
cache_count_result = await db.execute(cache_count_query)
cache_count = cache_count_result.scalar()
cache_delete_query = delete(PredictionCache).where(
PredictionCache.tenant_id == tenant_uuid
)
await db.execute(cache_delete_query)
deletion_stats["cache_entries_deleted"] = cache_count
logger.info("Deleted prediction cache entries",
tenant_id=tenant_id,
count=cache_count)
except Exception as e:
error_msg = f"Error deleting prediction cache: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
# Step 3: Delete model performance metrics
try:
metrics_count_query = select(func.count(ModelPerformanceMetric.id)).where(
ModelPerformanceMetric.tenant_id == tenant_uuid
)
metrics_count_result = await db.execute(metrics_count_query)
metrics_count = metrics_count_result.scalar()
metrics_delete_query = delete(ModelPerformanceMetric).where(
ModelPerformanceMetric.tenant_id == tenant_uuid
)
await db.execute(metrics_delete_query)
deletion_stats["performance_metrics_deleted"] = metrics_count
logger.info("Deleted performance metrics",
tenant_id=tenant_id,
count=metrics_count)
except Exception as e:
error_msg = f"Error deleting performance metrics: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
# Step 4: Delete prediction batches
try:
batches_count_query = select(func.count(PredictionBatch.id)).where(
PredictionBatch.tenant_id == tenant_uuid
)
batches_count_result = await db.execute(batches_count_query)
batches_count = batches_count_result.scalar()
batches_delete_query = delete(PredictionBatch).where(
PredictionBatch.tenant_id == tenant_uuid
)
await db.execute(batches_delete_query)
deletion_stats["prediction_batches_deleted"] = batches_count
logger.info("Deleted prediction batches",
tenant_id=tenant_id,
count=batches_count)
except Exception as e:
error_msg = f"Error deleting prediction batches: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
# Step 5: Delete forecasts (main data)
try:
forecasts_count_query = select(func.count(Forecast.id)).where(
Forecast.tenant_id == tenant_uuid
)
forecasts_count_result = await db.execute(forecasts_count_query)
forecasts_count = forecasts_count_result.scalar()
forecasts_delete_query = delete(Forecast).where(
Forecast.tenant_id == tenant_uuid
)
await db.execute(forecasts_delete_query)
deletion_stats["forecasts_deleted"] = forecasts_count
await db.commit()
logger.info("Deleted forecasts",
tenant_id=tenant_id,
count=forecasts_count)
except Exception as e:
await db.rollback()
error_msg = f"Error deleting forecasts: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=error_msg
)
# Step 6: Publish deletion event
try:
await publish_forecasts_deleted_event(tenant_id, deletion_stats)
except Exception as e:
logger.warning("Failed to publish forecasts deletion event", error=str(e))
return {
"success": True,
"message": f"All forecasting data for tenant {tenant_id} deleted successfully",
"deletion_details": deletion_stats
}
except HTTPException:
raise
except Exception as e:
logger.error("Unexpected error deleting tenant forecasts",
tenant_id=tenant_id,
error=str(e))
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Failed to delete tenant forecasts: {str(e)}"
)

View File

@@ -9,6 +9,7 @@ import structlog
import json
from typing import Dict, Any
import asyncio
import datetime
from shared.messaging.rabbitmq import RabbitMQClient
from shared.messaging.events import (
@@ -132,4 +133,20 @@ async def handle_weather_updated(data: Dict[str, Any]):
# Could trigger re-forecasting if needed
except Exception as e:
logger.error("Error handling weather updated event", error=str(e))
logger.error("Error handling weather updated event", error=str(e))
async def publish_forecasts_deleted_event(tenant_id: str, deletion_stats: Dict[str, Any]):
"""Publish forecasts deletion event to message queue"""
try:
await rabbitmq_client.publish_event(
exchange="forecasting_events",
routing_key="forecasting.tenant.deleted",
message={
"event_type": "tenant_forecasts_deleted",
"tenant_id": tenant_id,
"timestamp": datetime.utcnow().isoformat(),
"deletion_stats": deletion_stats
}
)
except Exception as e:
logger.error("Failed to publish forecasts deletion event", error=str(e))

View File

@@ -8,18 +8,20 @@ from sqlalchemy.ext.asyncio import AsyncSession
from typing import List, Dict, Any
import structlog
from uuid import UUID
from sqlalchemy import select, delete, func
from datetime import datetime
import uuid
from app.core.database import get_db
from app.services.messaging import publish_tenant_deleted_event
from app.schemas.tenants import (
BakeryRegistration, TenantResponse, TenantAccessResponse,
TenantUpdate, TenantMemberResponse
)
from app.services.tenant_service import TenantService
# Import unified authentication
from shared.auth.decorators import (
get_current_user_dep,
get_current_tenant_id_dep,
require_role
require_admin_role
)
logger = structlog.get_logger()
@@ -163,4 +165,150 @@ async def add_team_member(
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to add team member"
)
@router.delete("/tenants/{tenant_id}")
async def delete_tenant_complete(
tenant_id: str,
current_user = Depends(get_current_user_dep),
_admin_check = Depends(require_admin_role),
db: AsyncSession = Depends(get_db)
):
"""
Delete a tenant completely with all associated data.
**WARNING: This operation is irreversible!**
This endpoint:
1. Validates tenant exists and user has permissions
2. Deletes all tenant memberships
3. Deletes tenant subscription data
4. Deletes the tenant record
5. Publishes deletion event
Used by admin user deletion process when a tenant has no other admins.
"""
try:
tenant_uuid = uuid.UUID(tenant_id)
except ValueError:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid tenant ID format"
)
try:
from app.models.tenants import Tenant, TenantMember, Subscription
# Step 1: Verify tenant exists
tenant_query = select(Tenant).where(Tenant.id == tenant_uuid)
tenant_result = await db.execute(tenant_query)
tenant = tenant_result.scalar_one_or_none()
if not tenant:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Tenant {tenant_id} not found"
)
deletion_stats = {
"tenant_id": tenant_id,
"tenant_name": tenant.name,
"deleted_at": datetime.utcnow().isoformat(),
"memberships_deleted": 0,
"subscriptions_deleted": 0,
"errors": []
}
# Step 2: Delete all tenant memberships
try:
membership_count_query = select(func.count(TenantMember.id)).where(
TenantMember.tenant_id == tenant_uuid
)
membership_count_result = await db.execute(membership_count_query)
membership_count = membership_count_result.scalar()
membership_delete_query = delete(TenantMember).where(
TenantMember.tenant_id == tenant_uuid
)
await db.execute(membership_delete_query)
deletion_stats["memberships_deleted"] = membership_count
logger.info("Deleted tenant memberships",
tenant_id=tenant_id,
count=membership_count)
except Exception as e:
error_msg = f"Error deleting memberships: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
# Step 3: Delete subscription data
try:
subscription_count_query = select(func.count(Subscription.id)).where(
Subscription.tenant_id == tenant_uuid
)
subscription_count_result = await db.execute(subscription_count_query)
subscription_count = subscription_count_result.scalar()
subscription_delete_query = delete(Subscription).where(
Subscription.tenant_id == tenant_uuid
)
await db.execute(subscription_delete_query)
deletion_stats["subscriptions_deleted"] = subscription_count
logger.info("Deleted tenant subscriptions",
tenant_id=tenant_id,
count=subscription_count)
except Exception as e:
error_msg = f"Error deleting subscriptions: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
# Step 4: Delete the tenant record
try:
tenant_delete_query = delete(Tenant).where(Tenant.id == tenant_uuid)
tenant_result = await db.execute(tenant_delete_query)
if tenant_result.rowcount == 0:
raise Exception("Tenant record was not deleted")
await db.commit()
logger.info("Tenant deleted successfully",
tenant_id=tenant_id,
tenant_name=tenant.name)
except Exception as e:
await db.rollback()
error_msg = f"Error deleting tenant record: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=error_msg
)
# Step 5: Publish tenant deletion event
try:
await publish_tenant_deleted_event(tenant_id, deletion_stats)
except Exception as e:
logger.warning("Failed to publish tenant deletion event", error=str(e))
return {
"success": True,
"message": f"Tenant {tenant_id} deleted successfully",
"deletion_details": deletion_stats
}
except HTTPException:
raise
except Exception as e:
logger.error("Unexpected error deleting tenant",
tenant_id=tenant_id,
error=str(e))
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Failed to delete tenant: {str(e)}"
)

View File

@@ -6,6 +6,7 @@ from shared.messaging.rabbitmq import RabbitMQClient
from app.core.config import settings
import structlog
from datetime import datetime
from typing import Dict, Any
logger = structlog.get_logger()
@@ -40,4 +41,20 @@ async def publish_member_added(tenant_id: str, user_id: str, role: str):
}
)
except Exception as e:
logger.error(f"Failed to publish tenant.member.added event: {e}")
logger.error(f"Failed to publish tenant.member.added event: {e}")
async def publish_tenant_deleted_event(tenant_id: str, deletion_stats: Dict[str, Any]):
"""Publish tenant deletion event to message queue"""
try:
await data_publisher.publish_event(
exchange="tenant_events",
routing_key="tenant.deleted",
message={
"event_type": "tenant_deleted",
"tenant_id": tenant_id,
"timestamp": datetime.utcnow().isoformat(),
"deletion_stats": deletion_stats
}
)
except Exception as e:
logger.error("Failed to publish tenant deletion event", error=str(e))

View File

@@ -12,9 +12,15 @@ from app.core.database import get_db
from app.schemas.training import TrainedModelResponse, ModelMetricsResponse
from app.services.training_service import TrainingService
from datetime import datetime
from sqlalchemy import select, delete, func
import uuid
import shutil
from app.services.messaging import publish_models_deleted_event
from shared.auth.decorators import (
get_current_tenant_id_dep
get_current_user_dep,
require_admin_role
)
logger = structlog.get_logger()
@@ -212,4 +218,244 @@ async def list_models(
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to retrieve models"
)
@router.delete("/models/tenant/{tenant_id}")
async def delete_tenant_models_complete(
tenant_id: str,
current_user = Depends(get_current_user_dep),
_admin_check = Depends(require_admin_role),
db: AsyncSession = Depends(get_db)
):
"""
Delete all trained models and artifacts for a tenant.
**WARNING: This operation is irreversible!**
This endpoint:
1. Cancels any active training jobs for the tenant
2. Deletes all model artifacts (files) from storage
3. Deletes model records from database
4. Deletes training logs and performance metrics
5. Publishes deletion event
Used by admin user deletion process to clean up all training data.
"""
try:
tenant_uuid = uuid.UUID(tenant_id)
except ValueError:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid tenant ID format"
)
try:
from app.models.training import (
ModelTrainingLog,
TrainedModel,
ModelArtifact,
ModelPerformanceMetric,
TrainingJobQueue
)
from app.core.config import settings
deletion_stats = {
"tenant_id": tenant_id,
"deleted_at": datetime.utcnow().isoformat(),
"jobs_cancelled": 0,
"models_deleted": 0,
"artifacts_deleted": 0,
"artifacts_files_deleted": 0,
"training_logs_deleted": 0,
"performance_metrics_deleted": 0,
"storage_freed_bytes": 0,
"errors": []
}
# Step 1: Cancel active training jobs
try:
active_jobs_query = select(TrainingJobQueue).where(
TrainingJobQueue.tenant_id == tenant_uuid,
TrainingJobQueue.status.in_(["queued", "running", "pending"])
)
active_jobs_result = await db.execute(active_jobs_query)
active_jobs = active_jobs_result.scalars().all()
for job in active_jobs:
job.status = "cancelled"
job.updated_at = datetime.utcnow()
deletion_stats["jobs_cancelled"] += 1
if active_jobs:
await db.commit()
logger.info("Cancelled active training jobs",
tenant_id=tenant_id,
count=len(active_jobs))
except Exception as e:
error_msg = f"Error cancelling training jobs: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
# Step 2: Delete model artifact files from storage
try:
artifacts_query = select(ModelArtifact).where(
ModelArtifact.tenant_id == tenant_uuid
)
artifacts_result = await db.execute(artifacts_query)
artifacts = artifacts_result.scalars().all()
storage_freed = 0
files_deleted = 0
for artifact in artifacts:
try:
file_path = Path(artifact.file_path)
if file_path.exists():
file_size = file_path.stat().st_size
file_path.unlink() # Delete file
storage_freed += file_size
files_deleted += 1
logger.debug("Deleted artifact file",
file_path=str(file_path),
size_bytes=file_size)
# Also try to delete parent directories if empty
try:
if file_path.parent.exists() and not any(file_path.parent.iterdir()):
file_path.parent.rmdir()
except:
pass # Ignore errors cleaning up directories
except Exception as e:
error_msg = f"Error deleting artifact file {artifact.file_path}: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.warning(error_msg)
deletion_stats["artifacts_files_deleted"] = files_deleted
deletion_stats["storage_freed_bytes"] = storage_freed
logger.info("Deleted artifact files",
tenant_id=tenant_id,
files_deleted=files_deleted,
storage_freed_mb=storage_freed / (1024 * 1024))
except Exception as e:
error_msg = f"Error processing artifact files: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
# Step 3: Delete database records
try:
# Delete model performance metrics
metrics_count_query = select(func.count(ModelPerformanceMetric.id)).where(
ModelPerformanceMetric.tenant_id == tenant_uuid
)
metrics_count_result = await db.execute(metrics_count_query)
metrics_count = metrics_count_result.scalar()
metrics_delete_query = delete(ModelPerformanceMetric).where(
ModelPerformanceMetric.tenant_id == tenant_uuid
)
await db.execute(metrics_delete_query)
deletion_stats["performance_metrics_deleted"] = metrics_count
# Delete model artifacts records
artifacts_count_query = select(func.count(ModelArtifact.id)).where(
ModelArtifact.tenant_id == tenant_uuid
)
artifacts_count_result = await db.execute(artifacts_count_query)
artifacts_count = artifacts_count_result.scalar()
artifacts_delete_query = delete(ModelArtifact).where(
ModelArtifact.tenant_id == tenant_uuid
)
await db.execute(artifacts_delete_query)
deletion_stats["artifacts_deleted"] = artifacts_count
# Delete trained models
models_count_query = select(func.count(TrainedModel.id)).where(
TrainedModel.tenant_id == tenant_uuid
)
models_count_result = await db.execute(models_count_query)
models_count = models_count_result.scalar()
models_delete_query = delete(TrainedModel).where(
TrainedModel.tenant_id == tenant_uuid
)
await db.execute(models_delete_query)
deletion_stats["models_deleted"] = models_count
# Delete training logs
logs_count_query = select(func.count(ModelTrainingLog.id)).where(
ModelTrainingLog.tenant_id == tenant_uuid
)
logs_count_result = await db.execute(logs_count_query)
logs_count = logs_count_result.scalar()
logs_delete_query = delete(ModelTrainingLog).where(
ModelTrainingLog.tenant_id == tenant_uuid
)
await db.execute(logs_delete_query)
deletion_stats["training_logs_deleted"] = logs_count
# Delete job queue entries
queue_delete_query = delete(TrainingJobQueue).where(
TrainingJobQueue.tenant_id == tenant_uuid
)
await db.execute(queue_delete_query)
await db.commit()
logger.info("Deleted training database records",
tenant_id=tenant_id,
models=models_count,
artifacts=artifacts_count,
logs=logs_count,
metrics=metrics_count)
except Exception as e:
await db.rollback()
error_msg = f"Error deleting database records: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.error(error_msg)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=error_msg
)
# Step 4: Clean up tenant model directory
try:
tenant_model_dir = Path(settings.MODEL_STORAGE_PATH) / tenant_id
if tenant_model_dir.exists():
shutil.rmtree(tenant_model_dir)
logger.info("Deleted tenant model directory",
directory=str(tenant_model_dir))
except Exception as e:
error_msg = f"Error deleting model directory: {str(e)}"
deletion_stats["errors"].append(error_msg)
logger.warning(error_msg)
# Step 5: Publish deletion event
try:
await publish_models_deleted_event(tenant_id, deletion_stats)
except Exception as e:
logger.warning("Failed to publish models deletion event", error=str(e))
return {
"success": True,
"message": f"All training data for tenant {tenant_id} deleted successfully",
"deletion_details": deletion_stats
}
except HTTPException:
raise
except Exception as e:
logger.error("Unexpected error deleting tenant models",
tenant_id=tenant_id,
error=str(e))
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Failed to delete tenant models: {str(e)}"
)

View File

@@ -442,6 +442,24 @@ async def publish_data_validation_completed(
}
)
async def publish_models_deleted_event(tenant_id: str, deletion_stats: Dict[str, Any]):
"""Publish models deletion event to message queue"""
try:
await training_publisher.publish_event(
exchange="training_events",
routing_key="training.tenant.models.deleted",
message={
"event_type": "tenant_models_deleted",
"tenant_id": tenant_id,
"timestamp": datetime.utcnow().isoformat(),
"deletion_stats": deletion_stats
}
)
except Exception as e:
logger.error("Failed to publish models deletion event", error=str(e))
# =========================================
# UTILITY FUNCTIONS FOR BATCH PUBLISHING
# =========================================
@@ -549,4 +567,5 @@ class TrainingStatusPublisher:
async def job_failed(self, error: str, error_details: Optional[Dict] = None):
"""Publish job failure with clean error details"""
clean_error_details = safe_json_serialize(error_details) if error_details else None
await publish_job_failed(self.job_id, self.tenant_id, error, clean_error_details)
await publish_job_failed(self.job_id, self.tenant_id, error, clean_error_details)