New alert system and panel de control page
This commit is contained in:
@@ -4,5 +4,6 @@ Alert Processor API Endpoints
|
||||
|
||||
from .analytics import router as analytics_router
|
||||
from .alerts import router as alerts_router
|
||||
from .internal_demo import router as internal_demo_router
|
||||
|
||||
__all__ = ['analytics_router', 'alerts_router']
|
||||
__all__ = ['analytics_router', 'alerts_router', 'internal_demo_router']
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Alerts API endpoints for dashboard and alert management
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query, Path
|
||||
from fastapi import APIRouter, HTTPException, Query, Path, Depends
|
||||
from typing import List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from uuid import UUID
|
||||
@@ -11,7 +11,8 @@ from datetime import datetime
|
||||
import structlog
|
||||
|
||||
from app.repositories.alerts_repository import AlertsRepository
|
||||
from app.models.alerts import AlertSeverity, AlertStatus
|
||||
from app.models.events import AlertStatus
|
||||
from app.dependencies import get_current_user
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
@@ -28,12 +29,14 @@ class AlertResponse(BaseModel):
|
||||
tenant_id: str
|
||||
item_type: str
|
||||
alert_type: str
|
||||
severity: str
|
||||
priority_level: str
|
||||
priority_score: int
|
||||
status: str
|
||||
service: str
|
||||
title: str
|
||||
message: str
|
||||
actions: Optional[dict] = None
|
||||
type_class: str
|
||||
actions: Optional[List[dict]] = None # smart_actions is a list of action objects
|
||||
alert_metadata: Optional[dict] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
@@ -47,10 +50,10 @@ class AlertsSummaryResponse(BaseModel):
|
||||
"""Alerts summary for dashboard"""
|
||||
total_count: int = Field(..., description="Total number of alerts")
|
||||
active_count: int = Field(..., description="Number of active (unresolved) alerts")
|
||||
critical_count: int = Field(..., description="Number of critical/urgent alerts")
|
||||
high_count: int = Field(..., description="Number of high severity alerts")
|
||||
medium_count: int = Field(..., description="Number of medium severity alerts")
|
||||
low_count: int = Field(..., description="Number of low severity alerts")
|
||||
critical_count: int = Field(..., description="Number of critical priority alerts")
|
||||
high_count: int = Field(..., description="Number of high priority alerts")
|
||||
medium_count: int = Field(..., description="Number of medium priority alerts")
|
||||
low_count: int = Field(..., description="Number of low priority alerts")
|
||||
resolved_count: int = Field(..., description="Number of resolved alerts")
|
||||
acknowledged_count: int = Field(..., description="Number of acknowledged alerts")
|
||||
|
||||
@@ -71,7 +74,7 @@ class AlertsListResponse(BaseModel):
|
||||
"/api/v1/tenants/{tenant_id}/alerts/summary",
|
||||
response_model=AlertsSummaryResponse,
|
||||
summary="Get alerts summary",
|
||||
description="Get summary of alerts by severity and status for dashboard health indicator"
|
||||
description="Get summary of alerts by priority level and status for dashboard health indicator"
|
||||
)
|
||||
async def get_alerts_summary(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID")
|
||||
@@ -79,8 +82,8 @@ async def get_alerts_summary(
|
||||
"""
|
||||
Get alerts summary for dashboard
|
||||
|
||||
Returns counts of alerts grouped by severity and status.
|
||||
Critical count maps to URGENT severity for dashboard compatibility.
|
||||
Returns counts of alerts grouped by priority level and status.
|
||||
Critical count maps to URGENT priority level for dashboard compatibility.
|
||||
"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
@@ -107,7 +110,7 @@ async def get_alerts_summary(
|
||||
)
|
||||
async def get_alerts(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
severity: Optional[str] = Query(None, description="Filter by severity: low, medium, high, urgent"),
|
||||
priority_level: Optional[str] = Query(None, description="Filter by priority level: critical, important, standard, info"),
|
||||
status: Optional[str] = Query(None, description="Filter by status: active, resolved, acknowledged, ignored"),
|
||||
resolved: Optional[bool] = Query(None, description="Filter by resolved status: true=resolved only, false=unresolved only"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Maximum number of results"),
|
||||
@@ -117,7 +120,7 @@ async def get_alerts(
|
||||
Get filtered list of alerts
|
||||
|
||||
Supports filtering by:
|
||||
- severity: low, medium, high, urgent (maps to "critical" in dashboard)
|
||||
- priority_level: critical, important, standard, info
|
||||
- status: active, resolved, acknowledged, ignored
|
||||
- resolved: boolean filter for resolved status
|
||||
- pagination: limit and offset
|
||||
@@ -126,18 +129,20 @@ async def get_alerts(
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
try:
|
||||
# Validate severity enum
|
||||
if severity and severity not in [s.value for s in AlertSeverity]:
|
||||
# Validate priority_level enum
|
||||
valid_priority_levels = ['critical', 'important', 'standard', 'info']
|
||||
if priority_level and priority_level not in valid_priority_levels:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid severity. Must be one of: {[s.value for s in AlertSeverity]}"
|
||||
detail=f"Invalid priority level. Must be one of: {valid_priority_levels}"
|
||||
)
|
||||
|
||||
# Validate status enum
|
||||
if status and status not in [s.value for s in AlertStatus]:
|
||||
valid_status_values = ['active', 'resolved', 'acknowledged', 'ignored']
|
||||
if status and status not in valid_status_values:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid status. Must be one of: {[s.value for s in AlertStatus]}"
|
||||
detail=f"Invalid status. Must be one of: {valid_status_values}"
|
||||
)
|
||||
|
||||
config = AlertProcessorConfig()
|
||||
@@ -147,7 +152,7 @@ async def get_alerts(
|
||||
repo = AlertsRepository(session)
|
||||
alerts = await repo.get_alerts(
|
||||
tenant_id=tenant_id,
|
||||
severity=severity,
|
||||
priority_level=priority_level,
|
||||
status=status,
|
||||
resolved=resolved,
|
||||
limit=limit,
|
||||
@@ -155,25 +160,42 @@ async def get_alerts(
|
||||
)
|
||||
|
||||
# Convert to response models
|
||||
alert_responses = [
|
||||
AlertResponse(
|
||||
alert_responses = []
|
||||
for alert in alerts:
|
||||
# Handle old format actions (strings) by converting to proper dict format
|
||||
actions = alert.smart_actions
|
||||
if actions and isinstance(actions, list) and len(actions) > 0:
|
||||
# Check if actions are strings (old format)
|
||||
if isinstance(actions[0], str):
|
||||
# Convert old format to new format
|
||||
actions = [
|
||||
{
|
||||
'action_type': action,
|
||||
'label': action.replace('_', ' ').title(),
|
||||
'variant': 'default',
|
||||
'disabled': False
|
||||
}
|
||||
for action in actions
|
||||
]
|
||||
|
||||
alert_responses.append(AlertResponse(
|
||||
id=str(alert.id),
|
||||
tenant_id=str(alert.tenant_id),
|
||||
item_type=alert.item_type,
|
||||
alert_type=alert.alert_type,
|
||||
severity=alert.severity,
|
||||
status=alert.status,
|
||||
priority_level=alert.priority_level.value if hasattr(alert.priority_level, 'value') else alert.priority_level,
|
||||
priority_score=alert.priority_score,
|
||||
status=alert.status.value if hasattr(alert.status, 'value') else alert.status,
|
||||
service=alert.service,
|
||||
title=alert.title,
|
||||
message=alert.message,
|
||||
actions=alert.actions,
|
||||
type_class=alert.type_class.value if hasattr(alert.type_class, 'value') else alert.type_class,
|
||||
actions=actions, # Use converted actions
|
||||
alert_metadata=alert.alert_metadata,
|
||||
created_at=alert.created_at,
|
||||
updated_at=alert.updated_at,
|
||||
resolved_at=alert.resolved_at
|
||||
)
|
||||
for alert in alerts
|
||||
]
|
||||
))
|
||||
|
||||
return AlertsListResponse(
|
||||
alerts=alert_responses,
|
||||
@@ -214,17 +236,35 @@ async def get_alert(
|
||||
if not alert:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
# Handle old format actions (strings) by converting to proper dict format
|
||||
actions = alert.smart_actions
|
||||
if actions and isinstance(actions, list) and len(actions) > 0:
|
||||
# Check if actions are strings (old format)
|
||||
if isinstance(actions[0], str):
|
||||
# Convert old format to new format
|
||||
actions = [
|
||||
{
|
||||
'action_type': action,
|
||||
'label': action.replace('_', ' ').title(),
|
||||
'variant': 'default',
|
||||
'disabled': False
|
||||
}
|
||||
for action in actions
|
||||
]
|
||||
|
||||
return AlertResponse(
|
||||
id=str(alert.id),
|
||||
tenant_id=str(alert.tenant_id),
|
||||
item_type=alert.item_type,
|
||||
alert_type=alert.alert_type,
|
||||
severity=alert.severity,
|
||||
status=alert.status,
|
||||
priority_level=alert.priority_level.value if hasattr(alert.priority_level, 'value') else alert.priority_level,
|
||||
priority_score=alert.priority_score,
|
||||
status=alert.status.value if hasattr(alert.status, 'value') else alert.status,
|
||||
service=alert.service,
|
||||
title=alert.title,
|
||||
message=alert.message,
|
||||
actions=alert.actions,
|
||||
type_class=alert.type_class.value if hasattr(alert.type_class, 'value') else alert.type_class,
|
||||
actions=actions, # Use converted actions
|
||||
alert_metadata=alert.alert_metadata,
|
||||
created_at=alert.created_at,
|
||||
updated_at=alert.updated_at,
|
||||
@@ -236,3 +276,242 @@ async def get_alert(
|
||||
except Exception as e:
|
||||
logger.error("Error getting alert", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/{alert_id}/cancel-auto-action",
|
||||
summary="Cancel auto-action for escalation alert",
|
||||
description="Cancel the pending auto-action for an escalation-type alert"
|
||||
)
|
||||
async def cancel_auto_action(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
alert_id: UUID = Path(..., description="Alert ID")
|
||||
) -> dict:
|
||||
"""
|
||||
Cancel the auto-action scheduled for an escalation alert.
|
||||
This prevents the system from automatically executing the action.
|
||||
"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import AlertStatus
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertsRepository(session)
|
||||
alert = await repo.get_alert_by_id(tenant_id, alert_id)
|
||||
|
||||
if not alert:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
# Verify this is an escalation alert
|
||||
if alert.type_class != 'escalation':
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Alert is not an escalation type, no auto-action to cancel"
|
||||
)
|
||||
|
||||
# Update alert metadata to mark auto-action as cancelled
|
||||
alert.alert_metadata = alert.alert_metadata or {}
|
||||
alert.alert_metadata['auto_action_cancelled'] = True
|
||||
alert.alert_metadata['auto_action_cancelled_at'] = datetime.utcnow().isoformat()
|
||||
|
||||
# Update urgency context to remove countdown
|
||||
if alert.urgency_context:
|
||||
alert.urgency_context['auto_action_countdown_seconds'] = None
|
||||
alert.urgency_context['auto_action_cancelled'] = True
|
||||
|
||||
# Change type class from escalation to action_needed
|
||||
alert.type_class = 'action_needed'
|
||||
|
||||
await session.commit()
|
||||
await session.refresh(alert)
|
||||
|
||||
logger.info("Auto-action cancelled", alert_id=str(alert_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"alert_id": str(alert_id),
|
||||
"message": "Auto-action cancelled successfully",
|
||||
"updated_type_class": alert.type_class.value
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error cancelling auto-action", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/{alert_id}/acknowledge",
|
||||
summary="Acknowledge alert",
|
||||
description="Mark alert as acknowledged"
|
||||
)
|
||||
async def acknowledge_alert(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
alert_id: UUID = Path(..., description="Alert ID")
|
||||
) -> dict:
|
||||
"""Mark an alert as acknowledged"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import AlertStatus
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertsRepository(session)
|
||||
alert = await repo.get_alert_by_id(tenant_id, alert_id)
|
||||
|
||||
if not alert:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
alert.status = AlertStatus.ACKNOWLEDGED
|
||||
await session.commit()
|
||||
|
||||
logger.info("Alert acknowledged", alert_id=str(alert_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"alert_id": str(alert_id),
|
||||
"status": alert.status.value
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error acknowledging alert", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/{alert_id}/resolve",
|
||||
summary="Resolve alert",
|
||||
description="Mark alert as resolved"
|
||||
)
|
||||
async def resolve_alert(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
alert_id: UUID = Path(..., description="Alert ID")
|
||||
) -> dict:
|
||||
"""Mark an alert as resolved"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import AlertStatus
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
repo = AlertsRepository(session)
|
||||
alert = await repo.get_alert_by_id(tenant_id, alert_id)
|
||||
|
||||
if not alert:
|
||||
raise HTTPException(status_code=404, detail="Alert not found")
|
||||
|
||||
alert.status = AlertStatus.RESOLVED
|
||||
alert.resolved_at = datetime.utcnow()
|
||||
await session.commit()
|
||||
|
||||
logger.info("Alert resolved", alert_id=str(alert_id), tenant_id=str(tenant_id))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"alert_id": str(alert_id),
|
||||
"status": alert.status.value,
|
||||
"resolved_at": alert.resolved_at.isoformat()
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Error resolving alert", error=str(e), alert_id=str(alert_id))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/digest/send",
|
||||
summary="Send email digest for alerts"
|
||||
)
|
||||
async def send_alert_digest(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(1, ge=1, le=7, description="Number of days to include in digest"),
|
||||
digest_type: str = Query("daily", description="Type of digest: daily or weekly"),
|
||||
user_email: str = Query(..., description="Email address to send digest to"),
|
||||
user_name: str = Query(None, description="User name for personalization"),
|
||||
current_user: dict = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Send email digest of alerts.
|
||||
|
||||
Digest includes:
|
||||
- AI Impact Summary (prevented issues, savings)
|
||||
- Prevented Issues List with AI reasoning
|
||||
- Action Needed Alerts
|
||||
- Trend Warnings
|
||||
"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import Alert
|
||||
from app.services.enrichment.email_digest import EmailDigestService
|
||||
from sqlalchemy import select, and_
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days)
|
||||
|
||||
# Fetch alerts from the specified period
|
||||
query = select(Alert).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
).order_by(Alert.created_at.desc())
|
||||
|
||||
result = await session.execute(query)
|
||||
alerts = result.scalars().all()
|
||||
|
||||
if not alerts:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "No alerts found for the specified period",
|
||||
"alert_count": 0
|
||||
}
|
||||
|
||||
# Send digest
|
||||
digest_service = EmailDigestService(config)
|
||||
|
||||
if digest_type == "weekly":
|
||||
success = await digest_service.send_weekly_digest(
|
||||
tenant_id=tenant_id,
|
||||
alerts=alerts,
|
||||
user_email=user_email,
|
||||
user_name=user_name
|
||||
)
|
||||
else:
|
||||
success = await digest_service.send_daily_digest(
|
||||
tenant_id=tenant_id,
|
||||
alerts=alerts,
|
||||
user_email=user_email,
|
||||
user_name=user_name
|
||||
)
|
||||
|
||||
return {
|
||||
"success": success,
|
||||
"message": f"{'Successfully sent' if success else 'Failed to send'} {digest_type} digest",
|
||||
"alert_count": len(alerts),
|
||||
"digest_type": digest_type,
|
||||
"recipient": user_email
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error sending email digest", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to send email digest: {str(e)}")
|
||||
|
||||
@@ -239,6 +239,166 @@ async def get_trends(
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get trends: {str(e)}")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/tenants/{tenant_id}/alerts/analytics/dashboard",
|
||||
response_model=Dict[str, Any],
|
||||
summary="Get enriched alert analytics for dashboard"
|
||||
)
|
||||
async def get_dashboard_analytics(
|
||||
tenant_id: UUID = Path(..., description="Tenant ID"),
|
||||
days: int = Query(30, ge=1, le=90, description="Number of days to analyze"),
|
||||
current_user: dict = Depends(get_current_user_dep)
|
||||
):
|
||||
"""
|
||||
Get enriched alert analytics optimized for dashboard display.
|
||||
|
||||
Returns metrics based on the new enrichment system:
|
||||
- AI handling rate (% of prevented_issue alerts)
|
||||
- Priority distribution (critical, important, standard, info)
|
||||
- Type class breakdown (action_needed, prevented_issue, trend_warning, etc.)
|
||||
- Total financial impact at risk
|
||||
- Average response time by priority level
|
||||
- Prevented issues and estimated savings
|
||||
"""
|
||||
from app.config import AlertProcessorConfig
|
||||
from shared.database.base import create_database_manager
|
||||
from app.models.events import Alert, AlertStatus, AlertTypeClass, PriorityLevel
|
||||
from sqlalchemy import select, func, and_
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
try:
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor")
|
||||
|
||||
async with db_manager.get_session() as session:
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days)
|
||||
|
||||
# Total alerts
|
||||
total_query = select(func.count(Alert.id)).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
)
|
||||
total_result = await session.execute(total_query)
|
||||
total_alerts = total_result.scalar() or 0
|
||||
|
||||
# Priority distribution
|
||||
priority_query = select(
|
||||
Alert.priority_level,
|
||||
func.count(Alert.id).label('count')
|
||||
).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
).group_by(Alert.priority_level)
|
||||
|
||||
priority_result = await session.execute(priority_query)
|
||||
priority_dist = {row.priority_level: row.count for row in priority_result}
|
||||
|
||||
# Type class distribution
|
||||
type_class_query = select(
|
||||
Alert.type_class,
|
||||
func.count(Alert.id).label('count')
|
||||
).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
).group_by(Alert.type_class)
|
||||
|
||||
type_class_result = await session.execute(type_class_query)
|
||||
type_class_dist = {row.type_class: row.count for row in type_class_result}
|
||||
|
||||
# AI handling metrics
|
||||
prevented_count = type_class_dist.get(AlertTypeClass.PREVENTED_ISSUE, 0)
|
||||
ai_handling_percentage = (prevented_count / total_alerts * 100) if total_alerts > 0 else 0
|
||||
|
||||
# Financial impact - sum all business_impact.financial_impact_eur from active alerts
|
||||
active_alerts_query = select(Alert.id, Alert.business_impact).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.status == AlertStatus.ACTIVE
|
||||
)
|
||||
)
|
||||
active_alerts_result = await session.execute(active_alerts_query)
|
||||
active_alerts = active_alerts_result.all()
|
||||
|
||||
total_financial_impact = sum(
|
||||
(alert.business_impact or {}).get('financial_impact_eur', 0)
|
||||
for alert in active_alerts
|
||||
)
|
||||
|
||||
# Prevented issues savings
|
||||
prevented_alerts_query = select(Alert.id, Alert.orchestrator_context).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.type_class == 'prevented_issue',
|
||||
Alert.created_at >= cutoff_date
|
||||
)
|
||||
)
|
||||
prevented_alerts_result = await session.execute(prevented_alerts_query)
|
||||
prevented_alerts = prevented_alerts_result.all()
|
||||
|
||||
estimated_savings = sum(
|
||||
(alert.orchestrator_context or {}).get('estimated_savings_eur', 0)
|
||||
for alert in prevented_alerts
|
||||
)
|
||||
|
||||
# Active alerts by type class
|
||||
active_by_type_query = select(
|
||||
Alert.type_class,
|
||||
func.count(Alert.id).label('count')
|
||||
).where(
|
||||
and_(
|
||||
Alert.tenant_id == tenant_id,
|
||||
Alert.status == AlertStatus.ACTIVE
|
||||
)
|
||||
).group_by(Alert.type_class)
|
||||
|
||||
active_by_type_result = await session.execute(active_by_type_query)
|
||||
active_by_type = {row.type_class: row.count for row in active_by_type_result}
|
||||
|
||||
# Get period comparison for trends
|
||||
from app.repositories.analytics_repository import AlertAnalyticsRepository
|
||||
analytics_repo = AlertAnalyticsRepository(session)
|
||||
period_comparison = await analytics_repo.get_period_comparison(
|
||||
tenant_id=tenant_id,
|
||||
current_days=days,
|
||||
previous_days=days
|
||||
)
|
||||
|
||||
return {
|
||||
"period_days": days,
|
||||
"total_alerts": total_alerts,
|
||||
"active_alerts": len(active_alerts),
|
||||
"ai_handling_rate": round(ai_handling_percentage, 1),
|
||||
"prevented_issues_count": prevented_count,
|
||||
"estimated_savings_eur": round(estimated_savings, 2),
|
||||
"total_financial_impact_at_risk_eur": round(total_financial_impact, 2),
|
||||
"priority_distribution": {
|
||||
"critical": priority_dist.get(PriorityLevel.CRITICAL, 0),
|
||||
"important": priority_dist.get(PriorityLevel.IMPORTANT, 0),
|
||||
"standard": priority_dist.get(PriorityLevel.STANDARD, 0),
|
||||
"info": priority_dist.get(PriorityLevel.INFO, 0)
|
||||
},
|
||||
"type_class_distribution": {
|
||||
"action_needed": type_class_dist.get(AlertTypeClass.ACTION_NEEDED, 0),
|
||||
"prevented_issue": type_class_dist.get(AlertTypeClass.PREVENTED_ISSUE, 0),
|
||||
"trend_warning": type_class_dist.get(AlertTypeClass.TREND_WARNING, 0),
|
||||
"escalation": type_class_dist.get(AlertTypeClass.ESCALATION, 0),
|
||||
"information": type_class_dist.get(AlertTypeClass.INFORMATION, 0)
|
||||
},
|
||||
"active_by_type_class": active_by_type,
|
||||
"period_comparison": period_comparison
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to get dashboard analytics", error=str(e), tenant_id=str(tenant_id))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get dashboard analytics: {str(e)}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tenant Data Deletion Operations (Internal Service Only)
|
||||
# ============================================================================
|
||||
|
||||
305
services/alert_processor/app/api/internal_demo.py
Normal file
305
services/alert_processor/app/api/internal_demo.py
Normal file
@@ -0,0 +1,305 @@
|
||||
"""
|
||||
Internal Demo Cloning API for Alert Processor Service
|
||||
Service-to-service endpoint for cloning alert data
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Header
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, delete, func
|
||||
import structlog
|
||||
import uuid
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Optional, Dict, Any
|
||||
import os
|
||||
|
||||
from app.repositories.alerts_repository import AlertsRepository
|
||||
from app.models.events import Alert, AlertStatus, AlertTypeClass
|
||||
from app.config import AlertProcessorConfig
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add shared utilities to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
||||
from shared.utils.demo_dates import adjust_date_for_demo, BASE_REFERENCE_DATE
|
||||
from shared.database.base import create_database_manager
|
||||
|
||||
logger = structlog.get_logger()
|
||||
router = APIRouter(prefix="/internal/demo", tags=["internal"])
|
||||
|
||||
# Internal API key for service-to-service auth
|
||||
INTERNAL_API_KEY = os.getenv("INTERNAL_API_KEY", "dev-internal-key-change-in-production")
|
||||
|
||||
# Database manager for this module
|
||||
config = AlertProcessorConfig()
|
||||
db_manager = create_database_manager(config.DATABASE_URL, "alert-processor-internal-demo")
|
||||
|
||||
# Dependency to get database session
|
||||
async def get_db():
|
||||
"""Get database session for internal demo operations"""
|
||||
async with db_manager.get_session() as session:
|
||||
yield session
|
||||
|
||||
# Base demo tenant IDs
|
||||
DEMO_TENANT_SAN_PABLO = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
DEMO_TENANT_LA_ESPIGA = "b2c3d4e5-f6a7-48b9-c0d1-e2f3a4b5c6d7"
|
||||
|
||||
|
||||
def verify_internal_api_key(x_internal_api_key: Optional[str] = Header(None)):
|
||||
"""Verify internal API key for service-to-service communication"""
|
||||
if x_internal_api_key != INTERNAL_API_KEY:
|
||||
logger.warning("Unauthorized internal API access attempted")
|
||||
raise HTTPException(status_code=403, detail="Invalid internal API key")
|
||||
return True
|
||||
|
||||
|
||||
@router.post("/clone")
|
||||
async def clone_demo_data(
|
||||
base_tenant_id: str,
|
||||
virtual_tenant_id: str,
|
||||
demo_account_type: str,
|
||||
session_id: Optional[str] = None,
|
||||
session_created_at: Optional[str] = None,
|
||||
db: AsyncSession = Depends(get_db),
|
||||
_: bool = Depends(verify_internal_api_key)
|
||||
):
|
||||
"""
|
||||
Clone alert service data for a virtual demo tenant
|
||||
|
||||
Clones:
|
||||
- Action-needed alerts (PO approvals, delivery tracking, low stock warnings, production delays)
|
||||
- Prevented-issue alerts (AI interventions with financial impact)
|
||||
- Historical trend data over past 7 days
|
||||
|
||||
Args:
|
||||
base_tenant_id: Template tenant UUID to clone from
|
||||
virtual_tenant_id: Target virtual tenant UUID
|
||||
demo_account_type: Type of demo account
|
||||
session_id: Originating session ID for tracing
|
||||
session_created_at: Session creation timestamp for date adjustment
|
||||
|
||||
Returns:
|
||||
Cloning status and record counts
|
||||
"""
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
# Parse session creation time for date adjustment
|
||||
if session_created_at:
|
||||
try:
|
||||
session_time = datetime.fromisoformat(session_created_at.replace('Z', '+00:00'))
|
||||
except (ValueError, AttributeError):
|
||||
session_time = start_time
|
||||
else:
|
||||
session_time = start_time
|
||||
|
||||
logger.info(
|
||||
"Starting alert data cloning",
|
||||
base_tenant_id=base_tenant_id,
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
demo_account_type=demo_account_type,
|
||||
session_id=session_id,
|
||||
session_created_at=session_created_at
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate UUIDs
|
||||
base_uuid = uuid.UUID(base_tenant_id)
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Track cloning statistics
|
||||
stats = {
|
||||
"alerts": 0,
|
||||
"action_needed": 0,
|
||||
"prevented_issues": 0,
|
||||
"historical_alerts": 0
|
||||
}
|
||||
|
||||
# Clone Alerts
|
||||
result = await db.execute(
|
||||
select(Alert).where(Alert.tenant_id == base_uuid)
|
||||
)
|
||||
base_alerts = result.scalars().all()
|
||||
|
||||
logger.info(
|
||||
"Found alerts to clone",
|
||||
count=len(base_alerts),
|
||||
base_tenant=str(base_uuid)
|
||||
)
|
||||
|
||||
for alert in base_alerts:
|
||||
# Adjust dates relative to session creation time
|
||||
adjusted_created_at = adjust_date_for_demo(
|
||||
alert.created_at, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.created_at else session_time
|
||||
|
||||
adjusted_updated_at = adjust_date_for_demo(
|
||||
alert.updated_at, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.updated_at else session_time
|
||||
|
||||
adjusted_resolved_at = adjust_date_for_demo(
|
||||
alert.resolved_at, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.resolved_at else None
|
||||
|
||||
adjusted_action_created_at = adjust_date_for_demo(
|
||||
alert.action_created_at, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.action_created_at else None
|
||||
|
||||
adjusted_scheduled_send_time = adjust_date_for_demo(
|
||||
alert.scheduled_send_time, session_time, BASE_REFERENCE_DATE
|
||||
) if alert.scheduled_send_time else None
|
||||
|
||||
# Update urgency context with adjusted dates if present
|
||||
urgency_context = alert.urgency_context.copy() if alert.urgency_context else {}
|
||||
if urgency_context.get("expected_delivery"):
|
||||
try:
|
||||
original_delivery = datetime.fromisoformat(urgency_context["expected_delivery"].replace('Z', '+00:00'))
|
||||
adjusted_delivery = adjust_date_for_demo(original_delivery, session_time, BASE_REFERENCE_DATE)
|
||||
urgency_context["expected_delivery"] = adjusted_delivery.isoformat() if adjusted_delivery else None
|
||||
except:
|
||||
pass # Keep original if parsing fails
|
||||
|
||||
new_alert = Alert(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=virtual_uuid,
|
||||
item_type=alert.item_type,
|
||||
alert_type=alert.alert_type,
|
||||
service=alert.service,
|
||||
title=alert.title,
|
||||
message=alert.message,
|
||||
status=alert.status,
|
||||
priority_score=alert.priority_score,
|
||||
priority_level=alert.priority_level,
|
||||
type_class=alert.type_class,
|
||||
orchestrator_context=alert.orchestrator_context,
|
||||
business_impact=alert.business_impact,
|
||||
urgency_context=urgency_context,
|
||||
user_agency=alert.user_agency,
|
||||
trend_context=alert.trend_context,
|
||||
smart_actions=alert.smart_actions,
|
||||
ai_reasoning_summary=alert.ai_reasoning_summary,
|
||||
confidence_score=alert.confidence_score,
|
||||
timing_decision=alert.timing_decision,
|
||||
scheduled_send_time=adjusted_scheduled_send_time,
|
||||
placement=alert.placement,
|
||||
action_created_at=adjusted_action_created_at,
|
||||
superseded_by_action_id=None, # Don't clone superseded relationships
|
||||
hidden_from_ui=alert.hidden_from_ui,
|
||||
alert_metadata=alert.alert_metadata,
|
||||
created_at=adjusted_created_at,
|
||||
updated_at=adjusted_updated_at,
|
||||
resolved_at=adjusted_resolved_at
|
||||
)
|
||||
db.add(new_alert)
|
||||
stats["alerts"] += 1
|
||||
|
||||
# Track by type_class
|
||||
if alert.type_class == "action_needed":
|
||||
stats["action_needed"] += 1
|
||||
elif alert.type_class == "prevented_issue":
|
||||
stats["prevented_issues"] += 1
|
||||
|
||||
# Track historical (older than 1 day)
|
||||
if adjusted_created_at < session_time - timedelta(days=1):
|
||||
stats["historical_alerts"] += 1
|
||||
|
||||
# Commit cloned data
|
||||
await db.commit()
|
||||
|
||||
total_records = stats["alerts"]
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
|
||||
logger.info(
|
||||
"Alert data cloning completed",
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
total_records=total_records,
|
||||
stats=stats,
|
||||
duration_ms=duration_ms
|
||||
)
|
||||
|
||||
return {
|
||||
"service": "alert_processor",
|
||||
"status": "completed",
|
||||
"records_cloned": total_records,
|
||||
"duration_ms": duration_ms,
|
||||
"details": stats
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("Invalid UUID format", error=str(e))
|
||||
raise HTTPException(status_code=400, detail=f"Invalid UUID: {str(e)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to clone alert data",
|
||||
error=str(e),
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
# Rollback on error
|
||||
await db.rollback()
|
||||
|
||||
return {
|
||||
"service": "alert_processor",
|
||||
"status": "failed",
|
||||
"records_cloned": 0,
|
||||
"duration_ms": int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000),
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/clone/health")
|
||||
async def clone_health_check(_: bool = Depends(verify_internal_api_key)):
|
||||
"""
|
||||
Health check for internal cloning endpoint
|
||||
Used by orchestrator to verify service availability
|
||||
"""
|
||||
return {
|
||||
"service": "alert_processor",
|
||||
"clone_endpoint": "available",
|
||||
"version": "2.0.0"
|
||||
}
|
||||
|
||||
|
||||
@router.delete("/tenant/{virtual_tenant_id}")
|
||||
async def delete_demo_data(
|
||||
virtual_tenant_id: str,
|
||||
db: AsyncSession = Depends(get_db),
|
||||
_: bool = Depends(verify_internal_api_key)
|
||||
):
|
||||
"""Delete all alert data for a virtual demo tenant"""
|
||||
logger.info("Deleting alert data for virtual tenant", virtual_tenant_id=virtual_tenant_id)
|
||||
start_time = datetime.now(timezone.utc)
|
||||
|
||||
try:
|
||||
virtual_uuid = uuid.UUID(virtual_tenant_id)
|
||||
|
||||
# Count records
|
||||
alert_count = await db.scalar(
|
||||
select(func.count(Alert.id)).where(Alert.tenant_id == virtual_uuid)
|
||||
)
|
||||
|
||||
# Delete alerts
|
||||
await db.execute(delete(Alert).where(Alert.tenant_id == virtual_uuid))
|
||||
await db.commit()
|
||||
|
||||
duration_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000)
|
||||
logger.info(
|
||||
"Alert data deleted successfully",
|
||||
virtual_tenant_id=virtual_tenant_id,
|
||||
duration_ms=duration_ms
|
||||
)
|
||||
|
||||
return {
|
||||
"service": "alert_processor",
|
||||
"status": "deleted",
|
||||
"virtual_tenant_id": virtual_tenant_id,
|
||||
"records_deleted": {
|
||||
"alerts": alert_count,
|
||||
"total": alert_count
|
||||
},
|
||||
"duration_ms": duration_ms
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error("Failed to delete alert data", error=str(e), exc_info=True)
|
||||
await db.rollback()
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
Reference in New Issue
Block a user