New alert service
This commit is contained in:
@@ -1,9 +0,0 @@
|
||||
"""
|
||||
Alert Processor API Endpoints
|
||||
"""
|
||||
|
||||
from .analytics import router as analytics_router
|
||||
from .alerts import router as alerts_router
|
||||
from .internal_demo import router as internal_demo_router
|
||||
|
||||
__all__ = ['analytics_router', 'alerts_router', 'internal_demo_router']
|
||||
|
||||
@@ -1,517 +1,430 @@
|
||||
# services/alert_processor/app/api/alerts.py
|
||||
"""
|
||||
Alerts API endpoints for dashboard and alert management
|
||||
Alert API endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query, Path, Depends
|
||||
from fastapi import APIRouter, Depends, Query, HTTPException
|
||||
from typing import List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from uuid import UUID
|
||||
from datetime import datetime
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
import structlog
|
||||
|
||||
from app.repositories.alerts_repository import AlertsRepository
|
||||
from app.models.events import AlertStatus
|
||||
from app.dependencies import get_current_user
|
||||
from app.core.database import get_db
|
||||
from app.repositories.event_repository import EventRepository
|
||||
from app.schemas.events import EventResponse, EventSummary
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Response Models
|
||||
# ============================================================
|
||||
|
||||
class AlertResponse(BaseModel):
|
||||
"""Individual alert response"""
|
||||
id: str
|
||||
tenant_id: str
|
||||
item_type: str
|
||||
alert_type: str
|
||||
priority_level: str
|
||||
priority_score: int
|
||||
status: str
|
||||
service: str
|
||||
title: str
|
||||
message: str
|
||||
type_class: str
|
||||
actions: Optional[List[dict]] = None # smart_actions is a list of action objects
|
||||
alert_metadata: Optional[dict] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
resolved_at: Optional[datetime] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class AlertsSummaryResponse(BaseModel):
|
||||
"""Alerts summary for dashboard"""
|
||||
total_count: int = Field(..., description="Total number of alerts")
|
||||
active_count: int = Field(..., description="Number of active (unresolved) alerts")
|
||||
critical_count: int = Field(..., description="Number of critical priority alerts")
|
||||
high_count: int = Field(..., description="Number of high priority alerts")
|
||||
medium_count: int = Field(..., description="Number of medium priority alerts")
|
||||
low_count: int = Field(..., description="Number of low priority alerts")
|
||||
resolved_count: int = Field(..., description="Number of resolved alerts")
|
||||
acknowledged_count: int = Field(..., description="Number of acknowledged alerts")
|
||||
|
||||
|
||||
class AlertsListResponse(BaseModel):
|
||||
"""List of alerts with pagination"""
|
||||
alerts: List[AlertResponse]
|
||||
total: int
|
||||
limit: int
|
||||
offset: int
|
||||
|
||||
|
||||
# ============================================================
|
||||
# API Endpoints
|
||||
# ============================================================
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/summary",
|
||||
response_model=AlertsSummaryResponse,
|
||||
summary="Get alerts summary",
|
||||
description="Get summary of alerts by priority level and status for dashboard health indicator"
|
||||
)
|
||||
async def get_alerts_summary(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID")
|
||||
) -> AlertsSummaryResponse:
|
||||
"""
|
||||
Get alerts summary for dashboard
|
||||
|
||||
Returns counts of alerts grouped by priority level and status.
|
||||
Critical count maps to URGENT priority level for dashboard compatibility.
|
||||
"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertsRepository(session)
|
||||
summary = await repo.get_alerts_summary(tenant_id)
|
||||
return AlertsSummaryResponse(**summary)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting alerts summary", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/alerts",
|
||||
response_model=AlertsListResponse,
|
||||
summary="Get alerts list",
|
||||
description="Get filtered list of alerts with pagination"
|
||||
)
|
||||
@router.get("/alerts", response_model=List[EventResponse])
|
||||
async def get_alerts(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
priority_level: Optional[str] = Query(None, description="Filter by priority level: critical, important, standard, info"),
|
||||
status: Optional[str] = Query(None, description="Filter by status: active, resolved, acknowledged, ignored"),
|
||||
resolved: Optional[bool] = Query(None, description="Filter by resolved status: true=resolved only, false=unresolved only"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Maximum number of results"),
|
||||
offset: int = Query(0, ge=0, description="Pagination offset")
|
||||
) -> AlertsListResponse:
|
||||
"""
|
||||
Get filtered list of alerts
|
||||
|
||||
Supports filtering by:
|
||||
- priority_level: critical, important, standard, info
|
||||
- status: active, resolved, acknowledged, ignored
|
||||
- resolved: boolean filter for resolved status
|
||||
- pagination: limit and offset
|
||||
"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
# Validate priority_level enum
|
||||
valid_priority_levels = ['critical', 'important', 'standard', 'info']
|
||||
if priority_level and priority_level not in valid_priority_levels:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid priority level. Must be one of: {valid_priority_levels}"
|
||||
)
|
||||
|
||||
# Validate status enum
|
||||
valid_status_values = ['active', 'resolved', 'acknowledged', 'ignored']
|
||||
if status and status not in valid_status_values:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid status. Must be one of: {valid_status_values}"
|
||||
)
|
||||
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertsRepository(session)
|
||||
alerts = await repo.get_alerts(
|
||||
tenant_id=tenant_id,
|
||||
priority_level=priority_level,
|
||||
status=status,
|
||||
resolved=resolved,
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
# Convert to response models
|
||||
alert_responses = []
|
||||
for alert in alerts:
|
||||
# Handle old format actions (strings) by converting to proper dict format
|
||||
actions = alert.smart_actions
|
||||
if actions and isinstance(actions, list) and len(actions) > 0:
|
||||
# Check if actions are strings (old format)
|
||||
if isinstance(actions[0], str):
|
||||
# Convert old format to new format
|
||||
actions = [
|
||||
{
|
||||
'action_type': action,
|
||||
'label': action.replace('_', ' ').title(),
|
||||
'variant': 'default',
|
||||
'disabled': False
|
||||
}
|
||||
for action in actions
|
||||
]
|
||||
|
||||
alert_responses.append(AlertResponse(
|
||||
id=str(alert.id),
|
||||
tenant_id=str(alert.tenant_id),
|
||||
item_type=alert.item_type,
|
||||
alert_type=alert.alert_type,
|
||||
priority_level=alert.priority_level.value if hasattr(alert.priority_level, 'value') else alert.priority_level,
|
||||
priority_score=alert.priority_score,
|
||||
status=alert.status.value if hasattr(alert.status, 'value') else alert.status,
|
||||
service=alert.service,
|
||||
title=alert.title,
|
||||
message=alert.message,
|
||||
type_class=alert.type_class.value if hasattr(alert.type_class, 'value') else alert.type_class,
|
||||
actions=actions, # Use converted actions
|
||||
alert_metadata=alert.alert_metadata,
|
||||
created_at=alert.created_at,
|
||||
updated_at=alert.updated_at,
|
||||
resolved_at=alert.resolved_at
|
||||
))
|
||||
|
||||
return AlertsListResponse(
|
||||
alerts=alert_responses,
|
||||
total=len(alert_responses), # In a real implementation, you'd query the total count separately
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error getting alerts", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/{alert_id}",
|
||||
response_model=AlertResponse,
|
||||
summary="Get alert by ID",
|
||||
description="Get a specific alert by its ID"
|
||||
)
|
||||
async def get_alert(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
alert_id: UUID = Path(..., description="Alert ID")
|
||||
) -> AlertResponse:
|
||||
"""Get a specific alert by ID"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertsRepository(session)
|
||||
alert = await repo.get_alert_by_id(tenant_id, alert_id)
|
||||
|
||||
if not alert:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
# Handle old format actions (strings) by converting to proper dict format
|
||||
actions = alert.smart_actions
|
||||
if actions and isinstance(actions, list) and len(actions) > 0:
|
||||
# Check if actions are strings (old format)
|
||||
if isinstance(actions[0], str):
|
||||
# Convert old format to new format
|
||||
actions = [
|
||||
{
|
||||
'action_type': action,
|
||||
'label': action.replace('_', ' ').title(),
|
||||
'variant': 'default',
|
||||
'disabled': False
|
||||
}
|
||||
for action in actions
|
||||
]
|
||||
|
||||
return AlertResponse(
|
||||
id=str(alert.id),
|
||||
tenant_id=str(alert.tenant_id),
|
||||
item_type=alert.item_type,
|
||||
alert_type=alert.alert_type,
|
||||
priority_level=alert.priority_level.value if hasattr(alert.priority_level, 'value') else alert.priority_level,
|
||||
priority_score=alert.priority_score,
|
||||
status=alert.status.value if hasattr(alert.status, 'value') else alert.status,
|
||||
service=alert.service,
|
||||
title=alert.title,
|
||||
message=alert.message,
|
||||
type_class=alert.type_class.value if hasattr(alert.type_class, 'value') else alert.type_class,
|
||||
actions=actions, # Use converted actions
|
||||
alert_metadata=alert.alert_metadata,
|
||||
created_at=alert.created_at,
|
||||
updated_at=alert.updated_at,
|
||||
resolved_at=alert.resolved_at
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error getting alert", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/{alert_id}/cancel-auto-action",
|
||||
summary="Cancel auto-action for escalation alert",
|
||||
description="Cancel the pending auto-action for an escalation-type alert"
|
||||
)
|
||||
async def cancel_auto_action(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
alert_id: UUID = Path(..., description="Alert ID")
|
||||
) -> dict:
|
||||
"""
|
||||
Cancel the auto-action scheduled for an escalation alert.
|
||||
This prevents the system from automatically executing the action.
|
||||
"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import AlertStatus
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertsRepository(session)
|
||||
alert = await repo.get_alert_by_id(tenant_id, alert_id)
|
||||
|
||||
if not alert:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
# Verify this is an escalation alert
|
||||
if alert.type_class != 'escalation':
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Alert is not an escalation type, no auto-action to cancel"
|
||||
)
|
||||
|
||||
# Update alert metadata to mark auto-action as cancelled
|
||||
alert.alert_metadata = alert.alert_metadata or {}
|
||||
alert.alert_metadata['auto_action_cancelled'] = True
|
||||
alert.alert_metadata['auto_action_cancelled_at'] = datetime.utcnow().isoformat()
|
||||
|
||||
# Update urgency context to remove countdown
|
||||
if alert.urgency_context:
|
||||
alert.urgency_context['auto_action_countdown_seconds'] = None
|
||||
alert.urgency_context['auto_action_cancelled'] = True
|
||||
|
||||
# Change type class from escalation to action_needed
|
||||
alert.type_class = 'action_needed'
|
||||
|
||||
await session.commit()
|
||||
await session.refresh(alert)
|
||||
|
||||
logger.info("Auto-action cancelled", alert_id=str(alert_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"alert_id": str(alert_id),
|
||||
"message": "Auto-action cancelled successfully",
|
||||
"updated_type_class": alert.type_class.value
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error cancelling auto-action", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/{alert_id}/acknowledge",
|
||||
summary="Acknowledge alert",
|
||||
description="Mark alert as acknowledged"
|
||||
)
|
||||
async def acknowledge_alert(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
alert_id: UUID = Path(..., description="Alert ID")
|
||||
) -> dict:
|
||||
"""Mark an alert as acknowledged"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import AlertStatus
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertsRepository(session)
|
||||
alert = await repo.get_alert_by_id(tenant_id, alert_id)
|
||||
|
||||
if not alert:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
alert.status = AlertStatus.ACKNOWLEDGED
|
||||
await session.commit()
|
||||
|
||||
logger.info("Alert acknowledged", alert_id=str(alert_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"alert_id": str(alert_id),
|
||||
"status": alert.status.value
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error acknowledging alert", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/{alert_id}/resolve",
|
||||
summary="Resolve alert",
|
||||
description="Mark alert as resolved"
|
||||
)
|
||||
async def resolve_alert(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
alert_id: UUID = Path(..., description="Alert ID")
|
||||
) -> dict:
|
||||
"""Mark an alert as resolved"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import AlertStatus
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertsRepository(session)
|
||||
alert = await repo.get_alert_by_id(tenant_id, alert_id)
|
||||
|
||||
if not alert:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
alert.status = AlertStatus.RESOLVED
|
||||
alert.resolved_at = datetime.utcnow()
|
||||
await session.commit()
|
||||
|
||||
logger.info("Alert resolved", alert_id=str(alert_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"alert_id": str(alert_id),
|
||||
"status": alert.status.value,
|
||||
"resolved_at": alert.resolved_at.isoformat()
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error resolving alert", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/digest/send",
|
||||
summary="Send email digest for alerts"
|
||||
)
|
||||
async def send_alert_digest(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(1, ge=1, le=7, description="Number of days to include in digest"),
|
||||
digest_type: str = Query("daily", description="Type of digest: daily or weekly"),
|
||||
user_email: str = Query(..., description="Email address to send digest to"),
|
||||
user_name: str = Query(None, description="User name for personalization"),
|
||||
current_user: dict = Depends(get_current_user)
|
||||
tenant_id: UUID,
|
||||
event_class: Optional[str] = Query(None, description="Filter by event class"),
|
||||
priority_level: Optional[List[str]] = Query(None, description="Filter by priority levels"),
|
||||
status: Optional[List[str]] = Query(None, description="Filter by status values"),
|
||||
event_domain: Optional[str] = Query(None, description="Filter by domain"),
|
||||
limit: int = Query(50, le=100, description="Max results"),
|
||||
offset: int = Query(0, description="Pagination offset"),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Send email digest of alerts.
|
||||
Get filtered list of events.
|
||||
|
||||
Digest includes:
|
||||
- AI Impact Summary (prevented issues, savings)
|
||||
- Prevented Issues List with AI reasoning
|
||||
- Action Needed Alerts
|
||||
- Trend Warnings
|
||||
Query Parameters:
|
||||
- event_class: alert, notification, recommendation
|
||||
- priority_level: critical, important, standard, info
|
||||
- status: active, acknowledged, resolved, dismissed
|
||||
- event_domain: inventory, production, supply_chain, etc.
|
||||
- limit: Max 100 results
|
||||
- offset: For pagination
|
||||
"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import Alert
|
||||
from app.services.enrichment.email_digest import EmailDigestService
|
||||
from sqlalchemy import select, and_
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
repo = EventRepository(db)
|
||||
events = await repo.get_events(
|
||||
tenant_id=tenant_id,
|
||||
event_class=event_class,
|
||||
priority_level=priority_level,
|
||||
status=status,
|
||||
event_domain=event_domain,
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days)
|
||||
|
||||
# Fetch alerts from the specified period
|
||||
query = select(Alert).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
).order_by(Alert.created_at.desc())
|
||||
|
||||
result = await session.execute(query)
|
||||
alerts = result.scalars().all()
|
||||
|
||||
if not alerts:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "No alerts found for the specified period",
|
||||
"alert_count": 0
|
||||
}
|
||||
|
||||
# Send digest
|
||||
digest_service = EmailDigestService(config)
|
||||
|
||||
if digest_type == "weekly":
|
||||
success = await digest_service.send_weekly_digest(
|
||||
tenant_id=tenant_id,
|
||||
alerts=alerts,
|
||||
user_email=user_email,
|
||||
user_name=user_name
|
||||
)
|
||||
else:
|
||||
success = await digest_service.send_daily_digest(
|
||||
tenant_id=tenant_id,
|
||||
alerts=alerts,
|
||||
user_email=user_email,
|
||||
user_name=user_name
|
||||
)
|
||||
|
||||
return {
|
||||
"success": success,
|
||||
"message": f"{'Successfully sent' if success else 'Failed to send'} {digest_type} digest",
|
||||
"alert_count": len(alerts),
|
||||
"digest_type": digest_type,
|
||||
"recipient": user_email
|
||||
}
|
||||
# Convert to response models
|
||||
return [repo._event_to_response(event) for event in events]
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error sending email digest", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to send email digest: {str(e)}")
|
||||
logger.error("get_alerts_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to retrieve alerts")
|
||||
|
||||
|
||||
@router.get("/alerts/summary", response_model=EventSummary)
|
||||
async def get_alerts_summary(
|
||||
tenant_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get summary statistics for dashboard.
|
||||
|
||||
Returns counts by:
|
||||
- Status (active, acknowledged, resolved)
|
||||
- Priority level (critical, important, standard, info)
|
||||
- Domain (inventory, production, etc.)
|
||||
- Type class (action_needed, prevented_issue, etc.)
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
summary = await repo.get_summary(tenant_id)
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error("get_summary_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to retrieve summary")
|
||||
|
||||
|
||||
@router.get("/alerts/{alert_id}", response_model=EventResponse)
|
||||
async def get_alert(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""Get single alert by ID"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
# Verify tenant ownership
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
return repo._event_to_response(event)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("get_alert_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to retrieve alert")
|
||||
|
||||
|
||||
@router.post("/alerts/{alert_id}/acknowledge", response_model=EventResponse)
|
||||
async def acknowledge_alert(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Mark alert as acknowledged.
|
||||
|
||||
Sets status to 'acknowledged' and records timestamp.
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify ownership first
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# Acknowledge
|
||||
updated_event = await repo.acknowledge_event(alert_id)
|
||||
return repo._event_to_response(updated_event)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("acknowledge_alert_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to acknowledge alert")
|
||||
|
||||
|
||||
@router.post("/alerts/{alert_id}/resolve", response_model=EventResponse)
|
||||
async def resolve_alert(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Mark alert as resolved.
|
||||
|
||||
Sets status to 'resolved' and records timestamp.
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify ownership first
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# Resolve
|
||||
updated_event = await repo.resolve_event(alert_id)
|
||||
return repo._event_to_response(updated_event)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("resolve_alert_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to resolve alert")
|
||||
|
||||
|
||||
@router.post("/alerts/{alert_id}/dismiss", response_model=EventResponse)
|
||||
async def dismiss_alert(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Mark alert as dismissed.
|
||||
|
||||
Sets status to 'dismissed'.
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify ownership first
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# Dismiss
|
||||
updated_event = await repo.dismiss_event(alert_id)
|
||||
return repo._event_to_response(updated_event)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("dismiss_alert_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to dismiss alert")
|
||||
|
||||
|
||||
@router.post("/alerts/{alert_id}/cancel-auto-action")
|
||||
async def cancel_auto_action(
|
||||
tenant_id: UUID,
|
||||
alert_id: UUID,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Cancel an alert's auto-action (escalation countdown).
|
||||
|
||||
Changes type_class from 'escalation' to 'action_needed' if auto-action was pending.
|
||||
"""
|
||||
try:
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify ownership first
|
||||
event = await repo.get_event_by_id(alert_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# Cancel auto-action (you'll need to implement this in repository)
|
||||
# For now, return success response
|
||||
return {
|
||||
"success": True,
|
||||
"event_id": str(alert_id),
|
||||
"message": "Auto-action cancelled successfully",
|
||||
"updated_type_class": "action_needed"
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("cancel_auto_action_failed", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to cancel auto-action")
|
||||
|
||||
|
||||
@router.post("/alerts/bulk-acknowledge")
|
||||
async def bulk_acknowledge_alerts(
|
||||
tenant_id: UUID,
|
||||
request_body: dict,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Acknowledge multiple alerts by metadata filter.
|
||||
|
||||
Request body:
|
||||
{
|
||||
"alert_type": "critical_stock_shortage",
|
||||
"metadata_filter": {"ingredient_id": "123"}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
alert_type = request_body.get("alert_type")
|
||||
metadata_filter = request_body.get("metadata_filter", {})
|
||||
|
||||
if not alert_type:
|
||||
raise HTTPException(status_code=400, detail="alert_type is required")
|
||||
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Get matching alerts
|
||||
events = await repo.get_events(
|
||||
tenant_id=tenant_id,
|
||||
event_class="alert",
|
||||
status=["active"],
|
||||
limit=100
|
||||
)
|
||||
|
||||
# Filter by type and metadata
|
||||
matching_ids = []
|
||||
for event in events:
|
||||
if event.event_type == alert_type:
|
||||
# Check if metadata matches
|
||||
matches = all(
|
||||
event.event_metadata.get(key) == value
|
||||
for key, value in metadata_filter.items()
|
||||
)
|
||||
if matches:
|
||||
matching_ids.append(event.id)
|
||||
|
||||
# Acknowledge all matching
|
||||
acknowledged_count = 0
|
||||
for event_id in matching_ids:
|
||||
try:
|
||||
await repo.acknowledge_event(event_id)
|
||||
acknowledged_count += 1
|
||||
except Exception:
|
||||
pass # Continue with others
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"acknowledged_count": acknowledged_count,
|
||||
"alert_ids": [str(id) for id in matching_ids]
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("bulk_acknowledge_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to bulk acknowledge alerts")
|
||||
|
||||
|
||||
@router.post("/alerts/bulk-resolve")
|
||||
async def bulk_resolve_alerts(
|
||||
tenant_id: UUID,
|
||||
request_body: dict,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Resolve multiple alerts by metadata filter.
|
||||
|
||||
Request body:
|
||||
{
|
||||
"alert_type": "critical_stock_shortage",
|
||||
"metadata_filter": {"ingredient_id": "123"}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
alert_type = request_body.get("alert_type")
|
||||
metadata_filter = request_body.get("metadata_filter", {})
|
||||
|
||||
if not alert_type:
|
||||
raise HTTPException(status_code=400, detail="alert_type is required")
|
||||
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Get matching alerts
|
||||
events = await repo.get_events(
|
||||
tenant_id=tenant_id,
|
||||
event_class="alert",
|
||||
status=["active", "acknowledged"],
|
||||
limit=100
|
||||
)
|
||||
|
||||
# Filter by type and metadata
|
||||
matching_ids = []
|
||||
for event in events:
|
||||
if event.event_type == alert_type:
|
||||
# Check if metadata matches
|
||||
matches = all(
|
||||
event.event_metadata.get(key) == value
|
||||
for key, value in metadata_filter.items()
|
||||
)
|
||||
if matches:
|
||||
matching_ids.append(event.id)
|
||||
|
||||
# Resolve all matching
|
||||
resolved_count = 0
|
||||
for event_id in matching_ids:
|
||||
try:
|
||||
await repo.resolve_event(event_id)
|
||||
resolved_count += 1
|
||||
except Exception:
|
||||
pass # Continue with others
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"resolved_count": resolved_count,
|
||||
"alert_ids": [str(id) for id in matching_ids]
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("bulk_resolve_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to bulk resolve alerts")
|
||||
|
||||
|
||||
@router.post("/events/{event_id}/interactions")
|
||||
async def record_interaction(
|
||||
tenant_id: UUID,
|
||||
event_id: UUID,
|
||||
request_body: dict,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Record user interaction with an event (for analytics).
|
||||
|
||||
Request body:
|
||||
{
|
||||
"interaction_type": "viewed" | "clicked" | "dismissed" | "acted_upon",
|
||||
"interaction_metadata": {...}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
interaction_type = request_body.get("interaction_type")
|
||||
interaction_metadata = request_body.get("interaction_metadata", {})
|
||||
|
||||
if not interaction_type:
|
||||
raise HTTPException(status_code=400, detail="interaction_type is required")
|
||||
|
||||
repo = EventRepository(db)
|
||||
|
||||
# Verify event exists and belongs to tenant
|
||||
event = await repo.get_event_by_id(event_id)
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Event not found")
|
||||
if event.tenant_id != tenant_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# For now, just return success
|
||||
# In the future, you could store interactions in a separate table
|
||||
logger.info(
|
||||
"interaction_recorded",
|
||||
event_id=str(event_id),
|
||||
interaction_type=interaction_type,
|
||||
metadata=interaction_metadata
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"interaction_id": str(event_id), # Would be a real ID in production
|
||||
"event_id": str(event_id),
|
||||
"interaction_type": interaction_type
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("record_interaction_failed", error=str(e), event_id=str(event_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to record interaction")
|
||||
|
||||
@@ -1,520 +0,0 @@
|
||||
"""
|
||||
Alert Analytics API Endpoints
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Body, Query
|
||||
from typing import List, Dict, Any, Optional
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel, Field
|
||||
import structlog
|
||||
|
||||
from shared.auth.decorators import get_current_user_dep
|
||||
from shared.auth.access_control import service_only_access
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# Schemas
|
||||
class InteractionCreate(BaseModel):
|
||||
"""Schema for creating an alert interaction"""
|
||||
alert_id: str = Field(..., description="Alert ID")
|
||||
interaction_type: str = Field(..., description="Type of interaction: acknowledged, resolved, snoozed, dismissed")
|
||||
metadata: Optional[Dict[str, Any]] = Field(None, description="Additional metadata")
|
||||
|
||||
|
||||
class InteractionBatchCreate(BaseModel):
|
||||
"""Schema for creating multiple interactions"""
|
||||
interactions: List[Dict[str, Any]] = Field(..., description="List of interactions to create")
|
||||
|
||||
|
||||
class AnalyticsResponse(BaseModel):
|
||||
"""Schema for analytics response"""
|
||||
trends: List[Dict[str, Any]]
|
||||
averageResponseTime: int
|
||||
topCategories: List[Dict[str, Any]]
|
||||
totalAlerts: int
|
||||
resolvedAlerts: int
|
||||
activeAlerts: int
|
||||
resolutionRate: int
|
||||
predictedDailyAverage: int
|
||||
busiestDay: str
|
||||
|
||||
|
||||
def get_analytics_repository(current_user: dict = Depends(get_current_user_dep)):
|
||||
"""Dependency to get analytics repository"""
|
||||
from app.repositories.analytics_repository import AlertAnalyticsRepository
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async def _get_repo():
|
||||
async with db_manager.get_session() as session:
|
||||
yield AlertAnalyticsRepository(session)
|
||||
|
||||
return _get_repo
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/{alert_id}/interactions",
|
||||
response_model=Dict[str, Any],
|
||||
summary="Track alert interaction"
|
||||
)
|
||||
async def create_interaction(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
alert_id: UUID = Path(..., description="Alert ID"),
|
||||
interaction: InteractionCreate = Body(...),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""
|
||||
Track a user interaction with an alert
|
||||
|
||||
- **acknowledged**: User has seen and acknowledged the alert
|
||||
- **resolved**: User has resolved the alert
|
||||
- **snoozed**: User has snoozed the alert
|
||||
- **dismissed**: User has dismissed the alert
|
||||
"""
|
||||
from app.repositories.analytics_repository import AlertAnalyticsRepository
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertAnalyticsRepository(session)
|
||||
|
||||
alert_interaction = await repo.create_interaction(
|
||||
tenant_id=tenant_id,
|
||||
alert_id=alert_id,
|
||||
user_id=UUID(current_user['user_id']),
|
||||
interaction_type=interaction.interaction_type,
|
||||
metadata=interaction.metadata
|
||||
)
|
||||
|
||||
return {
|
||||
'id': str(alert_interaction.id),
|
||||
'alert_id': str(alert_interaction.alert_id),
|
||||
'interaction_type': alert_interaction.interaction_type,
|
||||
'interacted_at': alert_interaction.interacted_at.isoformat(),
|
||||
'response_time_seconds': alert_interaction.response_time_seconds
|
||||
}
|
||||
except ValueError as e:
|
||||
logger.error("Invalid alert interaction", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Failed to create alert interaction", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to create interaction: {str(e)}")
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/interactions/batch",
|
||||
response_model=Dict[str, Any],
|
||||
summary="Track multiple alert interactions"
|
||||
)
|
||||
async def create_interactions_batch(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
batch: InteractionBatchCreate = Body(...),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""
|
||||
Track multiple alert interactions in a single request
|
||||
Useful for offline sync or bulk operations
|
||||
"""
|
||||
from app.repositories.analytics_repository import AlertAnalyticsRepository
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertAnalyticsRepository(session)
|
||||
|
||||
# Add user_id to each interaction
|
||||
for interaction in batch.interactions:
|
||||
interaction['user_id'] = current_user['user_id']
|
||||
|
||||
created_interactions = await repo.create_interactions_batch(
|
||||
tenant_id=tenant_id,
|
||||
interactions=batch.interactions
|
||||
)
|
||||
|
||||
return {
|
||||
'created_count': len(created_interactions),
|
||||
'interactions': [
|
||||
{
|
||||
'id': str(i.id),
|
||||
'alert_id': str(i.alert_id),
|
||||
'interaction_type': i.interaction_type,
|
||||
'interacted_at': i.interacted_at.isoformat()
|
||||
}
|
||||
for i in created_interactions
|
||||
]
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error("Failed to create batch interactions", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to create batch interactions: {str(e)}")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/analytics",
|
||||
response_model=AnalyticsResponse,
|
||||
summary="Get alert analytics"
|
||||
)
|
||||
async def get_analytics(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(7, ge=1, le=90, description="Number of days to analyze"),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""
|
||||
Get comprehensive analytics for alerts
|
||||
|
||||
Returns:
|
||||
- 7-day trend chart with severity breakdown
|
||||
- Average response time (time to acknowledgment)
|
||||
- Top 3 alert categories
|
||||
- Total alerts, resolved, active counts
|
||||
- Resolution rate percentage
|
||||
- Predicted daily average
|
||||
- Busiest day of week
|
||||
"""
|
||||
from app.repositories.analytics_repository import AlertAnalyticsRepository
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertAnalyticsRepository(session)
|
||||
|
||||
analytics = await repo.get_full_analytics(
|
||||
tenant_id=tenant_id,
|
||||
days=days
|
||||
)
|
||||
|
||||
return analytics
|
||||
except Exception as e:
|
||||
logger.error("Failed to get alert analytics", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get analytics: {str(e)}")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/analytics/trends",
|
||||
response_model=List[Dict[str, Any]],
|
||||
summary="Get alert trends"
|
||||
)
|
||||
async def get_trends(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(7, ge=1, le=90, description="Number of days to analyze"),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""Get alert trends over time with severity breakdown"""
|
||||
from app.repositories.analytics_repository import AlertAnalyticsRepository
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertAnalyticsRepository(session)
|
||||
|
||||
trends = await repo.get_analytics_trends(
|
||||
tenant_id=tenant_id,
|
||||
days=days
|
||||
)
|
||||
|
||||
return trends
|
||||
except Exception as e:
|
||||
logger.error("Failed to get alert trends", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get trends: {str(e)}")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/analytics/dashboard",
|
||||
response_model=Dict[str, Any],
|
||||
summary="Get enriched alert analytics for dashboard"
|
||||
)
|
||||
async def get_dashboard_analytics(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(30, ge=1, le=90, description="Number of days to analyze"),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""
|
||||
Get enriched alert analytics optimized for dashboard display.
|
||||
|
||||
Returns metrics based on the new enrichment system:
|
||||
- AI handling rate (% of prevented_issue alerts)
|
||||
- Priority distribution (critical, important, standard, info)
|
||||
- Type class breakdown (action_needed, prevented_issue, trend_warning, etc.)
|
||||
- Total financial impact at risk
|
||||
- Average response time by priority level
|
||||
- Prevented issues and estimated savings
|
||||
"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import Alert, AlertStatus, AlertTypeClass, PriorityLevel
|
||||
from sqlalchemy import select, func, and_
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days)
|
||||
|
||||
# Total alerts
|
||||
total_query = select(func.count(Alert.id)).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
)
|
||||
total_result = await session.execute(total_query)
|
||||
total_alerts = total_result.scalar() or 0
|
||||
|
||||
# Priority distribution
|
||||
priority_query = select(
|
||||
Alert.priority_level,
|
||||
func.count(Alert.id).label('count')
|
||||
).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
).group_by(Alert.priority_level)
|
||||
|
||||
priority_result = await session.execute(priority_query)
|
||||
priority_dist = {row.priority_level: row.count for row in priority_result}
|
||||
|
||||
# Type class distribution
|
||||
type_class_query = select(
|
||||
Alert.type_class,
|
||||
func.count(Alert.id).label('count')
|
||||
).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
).group_by(Alert.type_class)
|
||||
|
||||
type_class_result = await session.execute(type_class_query)
|
||||
type_class_dist = {row.type_class: row.count for row in type_class_result}
|
||||
|
||||
# AI handling metrics
|
||||
prevented_count = type_class_dist.get(AlertTypeClass.PREVENTED_ISSUE, 0)
|
||||
ai_handling_percentage = (prevented_count / total_alerts * 100) if total_alerts > 0 else 0
|
||||
|
||||
# Financial impact - sum all business_impact.financial_impact_eur from active alerts
|
||||
active_alerts_query = select(Alert.id, Alert.business_impact).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.status == AlertStatus.ACTIVE
|
||||
)
|
||||
)
|
||||
active_alerts_result = await session.execute(active_alerts_query)
|
||||
active_alerts = active_alerts_result.all()
|
||||
|
||||
total_financial_impact = sum(
|
||||
(alert.business_impact or {}).get('financial_impact_eur', 0)
|
||||
for alert in active_alerts
|
||||
)
|
||||
|
||||
# Prevented issues savings
|
||||
prevented_alerts_query = select(Alert.id, Alert.orchestrator_context).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.type_class == 'prevented_issue',
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
)
|
||||
prevented_alerts_result = await session.execute(prevented_alerts_query)
|
||||
prevented_alerts = prevented_alerts_result.all()
|
||||
|
||||
estimated_savings = sum(
|
||||
(alert.orchestrator_context or {}).get('estimated_savings_eur', 0)
|
||||
for alert in prevented_alerts
|
||||
)
|
||||
|
||||
# Active alerts by type class
|
||||
active_by_type_query = select(
|
||||
Alert.type_class,
|
||||
func.count(Alert.id).label('count')
|
||||
).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.status == AlertStatus.ACTIVE
|
||||
)
|
||||
).group_by(Alert.type_class)
|
||||
|
||||
active_by_type_result = await session.execute(active_by_type_query)
|
||||
active_by_type = {row.type_class: row.count for row in active_by_type_result}
|
||||
|
||||
# Get period comparison for trends
|
||||
from app.repositories.analytics_repository import AlertAnalyticsRepository
|
||||
analytics_repo = AlertAnalyticsRepository(session)
|
||||
period_comparison = await analytics_repo.get_period_comparison(
|
||||
tenant_id=tenant_id,
|
||||
current_days=days,
|
||||
previous_days=days
|
||||
)
|
||||
|
||||
return {
|
||||
"period_days": days,
|
||||
"total_alerts": total_alerts,
|
||||
"active_alerts": len(active_alerts),
|
||||
"ai_handling_rate": round(ai_handling_percentage, 1),
|
||||
"prevented_issues_count": prevented_count,
|
||||
"estimated_savings_eur": round(estimated_savings, 2),
|
||||
"total_financial_impact_at_risk_eur": round(total_financial_impact, 2),
|
||||
"priority_distribution": {
|
||||
"critical": priority_dist.get(PriorityLevel.CRITICAL, 0),
|
||||
"important": priority_dist.get(PriorityLevel.IMPORTANT, 0),
|
||||
"standard": priority_dist.get(PriorityLevel.STANDARD, 0),
|
||||
"info": priority_dist.get(PriorityLevel.INFO, 0)
|
||||
},
|
||||
"type_class_distribution": {
|
||||
"action_needed": type_class_dist.get(AlertTypeClass.ACTION_NEEDED, 0),
|
||||
"prevented_issue": type_class_dist.get(AlertTypeClass.PREVENTED_ISSUE, 0),
|
||||
"trend_warning": type_class_dist.get(AlertTypeClass.TREND_WARNING, 0),
|
||||
"escalation": type_class_dist.get(AlertTypeClass.ESCALATION, 0),
|
||||
"information": type_class_dist.get(AlertTypeClass.INFORMATION, 0)
|
||||
},
|
||||
"active_by_type_class": active_by_type,
|
||||
"period_comparison": period_comparison
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get dashboard analytics", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get dashboard analytics: {str(e)}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tenant Data Deletion Operations (Internal Service Only)
|
||||
# ============================================================================
|
||||
|
||||
@router.delete(
|
||||
"/api/v1/alerts/tenant/{tenant_id}",
|
||||
response_model=dict
|
||||
)
|
||||
@service_only_access
|
||||
async def delete_tenant_data(
|
||||
tenant_id: str = Path(..., description="Tenant ID to delete data for"),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""
|
||||
Delete all alert data for a tenant (Internal service only)
|
||||
|
||||
This endpoint is called by the orchestrator during tenant deletion.
|
||||
It permanently deletes all alert-related data including:
|
||||
- Alerts (all types and severities)
|
||||
- Alert interactions
|
||||
- Audit logs
|
||||
|
||||
**WARNING**: This operation is irreversible!
|
||||
|
||||
Returns:
|
||||
Deletion summary with counts of deleted records
|
||||
"""
|
||||
from app.services.tenant_deletion_service import AlertProcessorTenantDeletionService
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
logger.info("alert_processor.tenant_deletion.api_called", tenant_id=tenant_id)
|
||||
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
deletion_service = AlertProcessorTenantDeletionService(session)
|
||||
result = await deletion_service.safe_delete_tenant_data(tenant_id)
|
||||
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Tenant data deletion failed: {', '.join(result.errors)}"
|
||||
)
|
||||
|
||||
return {
|
||||
"message": "Tenant data deletion completed successfully",
|
||||
"summary": result.to_dict()
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("alert_processor.tenant_deletion.api_error",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to delete tenant data: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/alerts/tenant/{tenant_id}/deletion-preview",
|
||||
response_model=dict
|
||||
)
|
||||
@service_only_access
|
||||
async def preview_tenant_data_deletion(
|
||||
tenant_id: str = Path(..., description="Tenant ID to preview deletion for"),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""
|
||||
Preview what data would be deleted for a tenant (dry-run)
|
||||
|
||||
This endpoint shows counts of all data that would be deleted
|
||||
without actually deleting anything. Useful for:
|
||||
- Confirming deletion scope before execution
|
||||
- Auditing and compliance
|
||||
- Troubleshooting
|
||||
|
||||
Returns:
|
||||
Dictionary with entity names and their counts
|
||||
"""
|
||||
from app.services.tenant_deletion_service import AlertProcessorTenantDeletionService
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
logger.info("alert_processor.tenant_deletion.preview_called", tenant_id=tenant_id)
|
||||
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
deletion_service = AlertProcessorTenantDeletionService(session)
|
||||
preview = await deletion_service.get_tenant_data_preview(tenant_id)
|
||||
|
||||
total_records = sum(preview.values())
|
||||
|
||||
return {
|
||||
"tenant_id": tenant_id,
|
||||
"service": "alert_processor",
|
||||
"preview": preview,
|
||||
"total_records": total_records,
|
||||
"warning": "These records will be permanently deleted and cannot be recovered"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("alert_processor.tenant_deletion.preview_error",
|
||||
tenant_id=tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to preview tenant data deletion: {str(e)}"
|
||||
)
|
||||
@@ -1,303 +0,0 @@
|
||||
"""
|
||||
Internal Demo Cloning API for Alert Processor Service
|
||||
Service-to-service endpoint for cloning alert data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Header
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, delete, func
|
||||
import structlog
|
||||
import uuid
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Optional, Dict, Any
|
||||
import os
|
||||
|
||||
from app.repositories.alerts_repository import AlertsRepository
|
||||
from app.models.events import Alert, AlertStatus, AlertTypeClass
|
||||
from app.config import AlertProcessorConfig
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add shared utilities to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
||||
from shared.utils.demo_dates import adjust_date_for_demo, BASE_REFERENCE_DATE
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(prefix="/internal/demo", tags=["internal"])
|
||||
|
||||
# Database manager for this module
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor-internal-demo")
|
||||
|
||||
# Dependency to get database session
|
||||
async def get_db():
|
||||
"""Get database session for internal demo operations"""
|
||||
async with db_manager.get_session() as session:
|
||||
yield session
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_PROFESSIONAL = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
|
||||
|
||||
def verify_internal_api_key(x_internal_api_key: Optional[str] = Header(None)):
|
||||
"""Verify internal API key for service-to-service communication"""
|
||||
if x_internal_api_key != settings.INTERNAL_API_KEY:
|
||||
logger.warning("Unauthorized internal API access attempted")
|
||||
raise HTTPException(status_code=403, detail="Invalid internal API key")
|
||||
return True
|
||||
|
||||
|
||||
@router.post("/clone")
|
||||
async def clone_demo_data(
|
||||
base_tenant_id: str,
|
||||
virtual_tenant_id: str,
|
||||
demo_account_type: str,
|
||||
session_id: Optional[str] = None,
|
||||
session_created_at: Optional[str] = None,
|
||||
db: AsyncSession = Depends(get_db),
|
||||
_: bool = Depends(verify_internal_api_key)
|
||||
):
|
||||
"""
|
||||
Clone alert service data for a virtual demo tenant
|
||||
|
||||
Clones:
|
||||
- Action-needed alerts (PO approvals, delivery tracking, low stock warnings, production delays)
|
||||
- Prevented-issue alerts (AI interventions with financial impact)
|
||||
- Historical trend data over past 7 days
|
||||
|
||||
Args:
|
||||
base_tenant_id: Template tenant UUID to clone from
|
||||
virtual_tenant_id: Target virtual tenant UUID
|
||||
demo_account_type: Type of demo account
|
||||
session_id: Originating session ID for tracing
|
||||
session_created_at: Session creation timestamp for date adjustment
|
||||
|
||||
Returns:
|
||||
Cloning status and record counts
|
||||
"""
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
# Parse session creation time for date adjustment
|
||||
if session_created_at:
|
||||
try:
|
||||
session_time = datetime.fromisoformat(session_created_at.replace('Z', '+00:00'))
|
||||
except (ValueError, AttributeError):
|
||||
session_time = start_time
|
||||
else:
|
||||
session_time = start_time
|
||||
|
||||
logger.info(
|
||||
"Starting alert data cloning",
|
||||
base_tenant_id=base_tenant_id,
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
demo_account_type=demo_account_type,
|
||||
session_id=session_id,
|
||||
session_created_at=session_created_at
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate UUIDs
|
||||
base_uuid = uuid.UUID(base_tenant_id)
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Track cloning statistics
|
||||
stats = {
|
||||
"alerts": 0,
|
||||
"action_needed": 0,
|
||||
"prevented_issues": 0,
|
||||
"historical_alerts": 0
|
||||
}
|
||||
|
||||
# Clone Alerts
|
||||
result = await db.execute(
|
||||
select(Alert).where(Alert.tenant_id == base_uuid)
|
||||
)
|
||||
base_alerts = result.scalars().all()
|
||||
|
||||
logger.info(
|
||||
"Found alerts to clone",
|
||||
count=len(base_alerts),
|
||||
base_tenant=str(base_uuid)
|
||||
)
|
||||
|
||||
for alert in base_alerts:
|
||||
# Adjust dates relative to session creation time
|
||||
adjusted_created_at = adjust_date_for_demo(
|
||||
alert.created_at, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.created_at else session_time
|
||||
|
||||
adjusted_updated_at = adjust_date_for_demo(
|
||||
alert.updated_at, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.updated_at else session_time
|
||||
|
||||
adjusted_resolved_at = adjust_date_for_demo(
|
||||
alert.resolved_at, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.resolved_at else None
|
||||
|
||||
adjusted_action_created_at = adjust_date_for_demo(
|
||||
alert.action_created_at, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.action_created_at else None
|
||||
|
||||
adjusted_scheduled_send_time = adjust_date_for_demo(
|
||||
alert.scheduled_send_time, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.scheduled_send_time else None
|
||||
|
||||
# Update urgency context with adjusted dates if present
|
||||
urgency_context = alert.urgency_context.copy() if alert.urgency_context else {}
|
||||
if urgency_context.get("expected_delivery"):
|
||||
try:
|
||||
original_delivery = datetime.fromisoformat(urgency_context["expected_delivery"].replace('Z', '+00:00'))
|
||||
adjusted_delivery = adjust_date_for_demo(original_delivery, session_time, BASE_REFERENCE_DATE)
|
||||
urgency_context["expected_delivery"] = adjusted_delivery.isoformat() if adjusted_delivery else None
|
||||
except:
|
||||
pass # Keep original if parsing fails
|
||||
|
||||
new_alert = Alert(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=virtual_uuid,
|
||||
item_type=alert.item_type,
|
||||
alert_type=alert.alert_type,
|
||||
service=alert.service,
|
||||
title=alert.title,
|
||||
message=alert.message,
|
||||
status=alert.status,
|
||||
priority_score=alert.priority_score,
|
||||
priority_level=alert.priority_level,
|
||||
type_class=alert.type_class,
|
||||
orchestrator_context=alert.orchestrator_context,
|
||||
business_impact=alert.business_impact,
|
||||
urgency_context=urgency_context,
|
||||
user_agency=alert.user_agency,
|
||||
trend_context=alert.trend_context,
|
||||
smart_actions=alert.smart_actions,
|
||||
ai_reasoning_summary=alert.ai_reasoning_summary,
|
||||
confidence_score=alert.confidence_score,
|
||||
timing_decision=alert.timing_decision,
|
||||
scheduled_send_time=adjusted_scheduled_send_time,
|
||||
placement=alert.placement,
|
||||
action_created_at=adjusted_action_created_at,
|
||||
superseded_by_action_id=None, # Don't clone superseded relationships
|
||||
hidden_from_ui=alert.hidden_from_ui,
|
||||
alert_metadata=alert.alert_metadata,
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at,
|
||||
resolved_at=adjusted_resolved_at
|
||||
)
|
||||
db.add(new_alert)
|
||||
stats["alerts"] += 1
|
||||
|
||||
# Track by type_class
|
||||
if alert.type_class == "action_needed":
|
||||
stats["action_needed"] += 1
|
||||
elif alert.type_class == "prevented_issue":
|
||||
stats["prevented_issues"] += 1
|
||||
|
||||
# Track historical (older than 1 day)
|
||||
if adjusted_created_at < session_time - timedelta(days=1):
|
||||
stats["historical_alerts"] += 1
|
||||
|
||||
# Commit cloned data
|
||||
await db.commit()
|
||||
|
||||
total_records = stats["alerts"]
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
logger.info(
|
||||
"Alert data cloning completed",
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
total_records=total_records,
|
||||
stats=stats,
|
||||
duration_ms=duration_ms
|
||||
)
|
||||
|
||||
return {
|
||||
"service": "alert_processor",
|
||||
"status": "completed",
|
||||
"records_cloned": total_records,
|
||||
"duration_ms": duration_ms,
|
||||
"details": stats
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("Invalid UUID format", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=f"Invalid UUID: {str(e)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to clone alert data",
|
||||
error=str(e),
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
# Rollback on error
|
||||
await db.rollback()
|
||||
|
||||
return {
|
||||
"service": "alert_processor",
|
||||
"status": "failed",
|
||||
"records_cloned": 0,
|
||||
"duration_ms": int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000),
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/clone/health")
|
||||
async def clone_health_check(_: bool = Depends(verify_internal_api_key)):
|
||||
"""
|
||||
Health check for internal cloning endpoint
|
||||
Used by orchestrator to verify service availability
|
||||
"""
|
||||
return {
|
||||
"service": "alert_processor",
|
||||
"clone_endpoint": "available",
|
||||
"version": "2.0.0"
|
||||
}
|
||||
|
||||
|
||||
@router.delete("/tenant/{virtual_tenant_id}")
|
||||
async def delete_demo_data(
|
||||
virtual_tenant_id: str,
|
||||
db: AsyncSession = Depends(get_db),
|
||||
_: bool = Depends(verify_internal_api_key)
|
||||
):
|
||||
"""Delete all alert data for a virtual demo tenant"""
|
||||
logger.info("Deleting alert data for virtual tenant", virtual_tenant_id=virtual_tenant_id)
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
try:
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Count records
|
||||
alert_count = await db.scalar(
|
||||
select(func.count(Alert.id)).where(Alert.tenant_id == virtual_uuid)
|
||||
)
|
||||
|
||||
# Delete alerts
|
||||
await db.execute(delete(Alert).where(Alert.tenant_id == virtual_uuid))
|
||||
await db.commit()
|
||||
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
logger.info(
|
||||
"Alert data deleted successfully",
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
duration_ms=duration_ms
|
||||
)
|
||||
|
||||
return {
|
||||
"service": "alert_processor",
|
||||
"status": "deleted",
|
||||
"virtual_tenant_id": virtual_tenant_id,
|
||||
"records_deleted": {
|
||||
"alerts": alert_count,
|
||||
"total": alert_count
|
||||
},
|
||||
"duration_ms": duration_ms
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error("Failed to delete alert data", error=str(e), exc_info=True)
|
||||
await db.rollback()
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
70
services/alert_processor/app/api/sse.py
Normal file
70
services/alert_processor/app/api/sse.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
Server-Sent Events (SSE) API endpoint.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi.responses import StreamingResponse
|
||||
from uuid import UUID
|
||||
from redis.asyncio import Redis
|
||||
import structlog
|
||||
|
||||
from shared.redis_utils import get_redis_client
|
||||
from app.services.sse_service import SSEService
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/sse/alerts/{tenant_id}")
|
||||
async def stream_alerts(tenant_id: UUID):
|
||||
"""
|
||||
Stream real-time alerts via Server-Sent Events (SSE).
|
||||
|
||||
Usage (frontend):
|
||||
```javascript
|
||||
const eventSource = new EventSource('/api/v1/sse/alerts/{tenant_id}');
|
||||
eventSource.onmessage = (event) => {
|
||||
const alert = JSON.parse(event.data);
|
||||
console.log('New alert:', alert);
|
||||
};
|
||||
```
|
||||
|
||||
Response format:
|
||||
```
|
||||
data: {"id": "...", "event_type": "...", ...}
|
||||
|
||||
data: {"id": "...", "event_type": "...", ...}
|
||||
|
||||
```
|
||||
"""
|
||||
# Get Redis client from shared utilities
|
||||
redis = await get_redis_client()
|
||||
try:
|
||||
sse_service = SSEService(redis)
|
||||
|
||||
async def event_generator():
|
||||
"""Generator for SSE stream"""
|
||||
try:
|
||||
async for message in sse_service.subscribe_to_tenant(str(tenant_id)):
|
||||
# Format as SSE message
|
||||
yield f"data: {message}\n\n"
|
||||
|
||||
except Exception as e:
|
||||
logger.error("sse_stream_error", error=str(e), tenant_id=str(tenant_id))
|
||||
# Send error message and close
|
||||
yield f"event: error\ndata: {str(e)}\n\n"
|
||||
|
||||
return StreamingResponse(
|
||||
event_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no" # Disable nginx buffering
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("sse_setup_failed", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to setup SSE stream")
|
||||
Reference in New Issue
Block a user