New alert service

This commit is contained in:
Urtzi Alfaro
2025-12-05 20:07:01 +01:00
parent 1fe3a73549
commit 667e6e0404
393 changed files with 26002 additions and 61033 deletions

View File

@@ -1,7 +0,0 @@
"""
Alert Processor Repositories
"""
from .analytics_repository import AlertAnalyticsRepository
__all__ = ['AlertAnalyticsRepository']

View File

@@ -1,189 +0,0 @@
# services/alert_processor/app/repositories/alerts_repository.py
"""
Alerts Repository - Database access layer for alerts
"""
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, func, and_, or_
from typing import List, Dict, Any, Optional
from uuid import UUID
import structlog
from app.models.events import Alert, AlertStatus
logger = structlog.get_logger()
class AlertsRepository:
"""Repository for alert database operations"""
def __init__(self, db: AsyncSession):
self.db = db
async def get_alerts(
self,
tenant_id: UUID,
priority_level: Optional[str] = None,
status: Optional[str] = None,
resolved: Optional[bool] = None,
limit: int = 100,
offset: int = 0
) -> List[Alert]:
"""
Get alerts with optional filters
Args:
tenant_id: Tenant UUID
priority_level: Filter by priority level (critical, important, standard, info)
status: Filter by status (active, resolved, acknowledged, ignored)
resolved: Filter by resolved status (True = resolved, False = not resolved, None = all)
limit: Maximum number of results
offset: Pagination offset
Returns:
List of Alert objects
"""
try:
query = select(Alert).where(Alert.tenant_id == tenant_id)
# Apply filters
if priority_level:
query = query.where(Alert.priority_level == priority_level)
if status:
# Convert string status to enum value
try:
status_enum = AlertStatus(status.lower())
query = query.where(Alert.status == status_enum)
except ValueError:
# Invalid status value, log and continue without filtering
logger.warning("Invalid status value provided", status=status)
pass
if resolved is not None:
if resolved:
query = query.where(Alert.status == AlertStatus.RESOLVED)
else:
query = query.where(Alert.status != AlertStatus.RESOLVED)
# Order by created_at descending (newest first)
query = query.order_by(Alert.created_at.desc())
# Apply pagination
query = query.limit(limit).offset(offset)
result = await self.db.execute(query)
alerts = result.scalars().all()
logger.info(
"Retrieved alerts",
tenant_id=str(tenant_id),
count=len(alerts),
filters={"priority_level": priority_level, "status": status, "resolved": resolved}
)
return list(alerts)
except Exception as e:
logger.error("Error retrieving alerts", error=str(e), tenant_id=str(tenant_id))
raise
async def get_alerts_summary(self, tenant_id: UUID) -> Dict[str, Any]:
"""
Get summary of alerts by priority level and status
Args:
tenant_id: Tenant UUID
Returns:
Dict with counts by priority level and status
"""
try:
# Count by priority level
priority_query = (
select(
Alert.priority_level,
func.count(Alert.id).label("count")
)
.where(
and_(
Alert.tenant_id == tenant_id,
Alert.status != AlertStatus.RESOLVED
)
)
.group_by(Alert.priority_level)
)
priority_result = await self.db.execute(priority_query)
priority_counts = {row[0]: row[1] for row in priority_result.all()}
# Count by status
status_query = (
select(
Alert.status,
func.count(Alert.id).label("count")
)
.where(Alert.tenant_id == tenant_id)
.group_by(Alert.status)
)
status_result = await self.db.execute(status_query)
status_counts = {row[0]: row[1] for row in status_result.all()}
# Count active alerts (not resolved)
active_count = sum(
count for status, count in status_counts.items()
if status != AlertStatus.RESOLVED
)
# Convert enum values to strings for dictionary lookups
status_counts_str = {status.value if hasattr(status, 'value') else status: count
for status, count in status_counts.items()}
# Map to expected field names (dashboard expects "critical")
summary = {
"total_count": sum(status_counts.values()),
"active_count": active_count,
"critical_count": priority_counts.get('critical', 0),
"high_count": priority_counts.get('important', 0),
"medium_count": priority_counts.get('standard', 0),
"low_count": priority_counts.get('info', 0),
"resolved_count": status_counts_str.get('resolved', 0),
"acknowledged_count": status_counts_str.get('acknowledged', 0),
}
logger.info(
"Retrieved alerts summary",
tenant_id=str(tenant_id),
summary=summary
)
return summary
except Exception as e:
logger.error("Error retrieving alerts summary", error=str(e), tenant_id=str(tenant_id))
raise
async def get_alert_by_id(self, tenant_id: UUID, alert_id: UUID) -> Optional[Alert]:
"""Get a specific alert by ID"""
try:
query = select(Alert).where(
and_(
Alert.tenant_id == tenant_id,
Alert.id == alert_id
)
)
result = await self.db.execute(query)
alert = result.scalar_one_or_none()
if alert:
logger.info("Retrieved alert", alert_id=str(alert_id), tenant_id=str(tenant_id))
else:
logger.warning("Alert not found", alert_id=str(alert_id), tenant_id=str(tenant_id))
return alert
except Exception as e:
logger.error("Error retrieving alert", error=str(e), alert_id=str(alert_id))
raise

View File

@@ -1,508 +0,0 @@
"""
Alert Analytics Repository
Handles all database operations for alert analytics
"""
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
from uuid import UUID
from sqlalchemy import select, func, and_, extract, case
from sqlalchemy.ext.asyncio import AsyncSession
import structlog
from app.models.events import Alert, EventInteraction, AlertStatus
logger = structlog.get_logger()
class AlertAnalyticsRepository:
"""Repository for alert analytics operations"""
def __init__(self, session: AsyncSession):
self.session = session
async def create_interaction(
self,
tenant_id: UUID,
alert_id: UUID,
user_id: UUID,
interaction_type: str,
metadata: Optional[Dict[str, Any]] = None
) -> EventInteraction:
"""Create a new alert interaction"""
# Get alert to calculate response time
alert_query = select(Alert).where(Alert.id == alert_id)
result = await self.session.execute(alert_query)
alert = result.scalar_one_or_none()
if not alert:
raise ValueError(f"Alert {alert_id} not found")
# Calculate response time
now = datetime.utcnow()
response_time_seconds = int((now - alert.created_at).total_seconds())
# Create interaction
interaction = EventInteraction(
tenant_id=tenant_id,
alert_id=alert_id,
user_id=user_id,
interaction_type=interaction_type,
interacted_at=now,
response_time_seconds=response_time_seconds,
interaction_metadata=metadata or {}
)
self.session.add(interaction)
# Update alert status if applicable
if interaction_type == 'acknowledged' and alert.status == AlertStatus.ACTIVE:
alert.status = AlertStatus.ACKNOWLEDGED
elif interaction_type == 'resolved':
alert.status = AlertStatus.RESOLVED
alert.resolved_at = now
elif interaction_type == 'dismissed':
alert.status = AlertStatus.IGNORED
await self.session.commit()
await self.session.refresh(interaction)
logger.info(
"Alert interaction created",
alert_id=str(alert_id),
interaction_type=interaction_type,
response_time=response_time_seconds
)
return interaction
async def create_interactions_batch(
self,
tenant_id: UUID,
interactions: List[Dict[str, Any]]
) -> List[EventInteraction]:
"""Create multiple interactions in batch"""
created_interactions = []
for interaction_data in interactions:
try:
interaction = await self.create_interaction(
tenant_id=tenant_id,
alert_id=UUID(interaction_data['alert_id']),
user_id=UUID(interaction_data['user_id']),
interaction_type=interaction_data['interaction_type'],
metadata=interaction_data.get('metadata')
)
created_interactions.append(interaction)
except Exception as e:
logger.error(
"Failed to create interaction in batch",
error=str(e),
alert_id=interaction_data.get('alert_id')
)
continue
return created_interactions
async def get_analytics_trends(
self,
tenant_id: UUID,
days: int = 7
) -> List[Dict[str, Any]]:
"""Get alert trends for the last N days"""
start_date = datetime.utcnow() - timedelta(days=days)
# Query alerts grouped by date and priority_level (mapping to severity equivalents)
# Critical priority_level maps to urgent severity
# Important priority_level maps to high severity
# Standard priority_level maps to medium severity
# Info priority_level maps to low severity
query = (
select(
func.date(Alert.created_at).label('date'),
func.count(Alert.id).label('total_count'),
func.sum(
case((Alert.priority_level == 'critical', 1), else_=0)
).label('urgent_count'),
func.sum(
case((Alert.priority_level == 'important', 1), else_=0)
).label('high_count'),
func.sum(
case((Alert.priority_level == 'standard', 1), else_=0)
).label('medium_count'),
func.sum(
case((Alert.priority_level == 'info', 1), else_=0)
).label('low_count')
)
.where(
and_(
Alert.tenant_id == tenant_id,
Alert.created_at >= start_date
)
)
.group_by(func.date(Alert.created_at))
.order_by(func.date(Alert.created_at))
)
result = await self.session.execute(query)
rows = result.all()
# Fill in missing dates with zeros
trends = []
current_date = start_date.date()
end_date = datetime.utcnow().date()
# Create a dict for quick lookup
data_by_date = {row.date: row for row in rows}
while current_date <= end_date:
date_str = current_date.isoformat()
row = data_by_date.get(current_date)
trends.append({
'date': date_str,
'count': int(row.total_count) if row else 0,
'urgentCount': int(row.urgent_count) if row else 0,
'highCount': int(row.high_count) if row else 0,
'mediumCount': int(row.medium_count) if row else 0,
'lowCount': int(row.low_count) if row else 0,
})
current_date += timedelta(days=1)
return trends
async def get_average_response_time(
self,
tenant_id: UUID,
days: int = 7
) -> int:
"""Get average response time in minutes for acknowledged alerts"""
start_date = datetime.utcnow() - timedelta(days=days)
query = (
select(func.avg(EventInteraction.response_time_seconds))
.where(
and_(
EventInteraction.tenant_id == tenant_id,
EventInteraction.interaction_type == 'acknowledged',
EventInteraction.interacted_at >= start_date,
EventInteraction.response_time_seconds < 86400 # Less than 24 hours
)
)
)
result = await self.session.execute(query)
avg_seconds = result.scalar_one_or_none()
if avg_seconds is None:
return 0
# Convert to minutes
return round(avg_seconds / 60)
async def get_top_categories(
self,
tenant_id: UUID,
days: int = 7,
limit: int = 3
) -> List[Dict[str, Any]]:
"""Get top alert categories"""
start_date = datetime.utcnow() - timedelta(days=days)
query = (
select(
Alert.alert_type,
func.count(Alert.id).label('count')
)
.where(
and_(
Alert.tenant_id == tenant_id,
Alert.created_at >= start_date
)
)
.group_by(Alert.alert_type)
.order_by(func.count(Alert.id).desc())
.limit(limit)
)
result = await self.session.execute(query)
rows = result.all()
# Calculate total for percentages
total_query = (
select(func.count(Alert.id))
.where(
and_(
Alert.tenant_id == tenant_id,
Alert.created_at >= start_date
)
)
)
total_result = await self.session.execute(total_query)
total = total_result.scalar_one() or 1
categories = []
for row in rows:
percentage = round((row.count / total) * 100) if total > 0 else 0
categories.append({
'category': row.alert_type,
'count': row.count,
'percentage': percentage
})
return categories
async def get_resolution_stats(
self,
tenant_id: UUID,
days: int = 7
) -> Dict[str, Any]:
"""Get resolution statistics"""
start_date = datetime.utcnow() - timedelta(days=days)
# Total alerts
total_query = (
select(func.count(Alert.id))
.where(
and_(
Alert.tenant_id == tenant_id,
Alert.created_at >= start_date
)
)
)
total_result = await self.session.execute(total_query)
total_alerts = total_result.scalar_one() or 0
# Resolved alerts
resolved_query = (
select(func.count(Alert.id))
.where(
and_(
Alert.tenant_id == tenant_id,
Alert.created_at >= start_date,
Alert.status == AlertStatus.RESOLVED
)
)
)
resolved_result = await self.session.execute(resolved_query)
resolved_alerts = resolved_result.scalar_one() or 0
# Active alerts
active_query = (
select(func.count(Alert.id))
.where(
and_(
Alert.tenant_id == tenant_id,
Alert.created_at >= start_date,
Alert.status == AlertStatus.ACTIVE
)
)
)
active_result = await self.session.execute(active_query)
active_alerts = active_result.scalar_one() or 0
resolution_rate = round((resolved_alerts / total_alerts) * 100) if total_alerts > 0 else 0
return {
'totalAlerts': total_alerts,
'resolvedAlerts': resolved_alerts,
'activeAlerts': active_alerts,
'resolutionRate': resolution_rate
}
async def get_busiest_day(
self,
tenant_id: UUID,
days: int = 7
) -> str:
"""Get busiest day of week"""
start_date = datetime.utcnow() - timedelta(days=days)
query = (
select(
extract('dow', Alert.created_at).label('day_of_week'),
func.count(Alert.id).label('count')
)
.where(
and_(
Alert.tenant_id == tenant_id,
Alert.created_at >= start_date
)
)
.group_by(extract('dow', Alert.created_at))
.order_by(func.count(Alert.id).desc())
.limit(1)
)
result = await self.session.execute(query)
row = result.first()
if not row:
return 'N/A'
day_names = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
return day_names[int(row.day_of_week)]
async def get_predicted_daily_average(
self,
tenant_id: UUID,
days: int = 7
) -> int:
"""Calculate predicted daily average based on trends"""
trends = await self.get_analytics_trends(tenant_id, days)
if not trends:
return 0
total_count = sum(trend['count'] for trend in trends)
return round(total_count / len(trends))
async def get_full_analytics(
self,
tenant_id: UUID,
days: int = 7
) -> Dict[str, Any]:
"""Get complete analytics data"""
trends = await self.get_analytics_trends(tenant_id, days)
avg_response_time = await self.get_average_response_time(tenant_id, days)
top_categories = await self.get_top_categories(tenant_id, days)
resolution_stats = await self.get_resolution_stats(tenant_id, days)
busiest_day = await self.get_busiest_day(tenant_id, days)
predicted_avg = await self.get_predicted_daily_average(tenant_id, days)
return {
'trends': trends,
'averageResponseTime': avg_response_time,
'topCategories': top_categories,
'totalAlerts': resolution_stats['totalAlerts'],
'resolvedAlerts': resolution_stats['resolvedAlerts'],
'activeAlerts': resolution_stats['activeAlerts'],
'resolutionRate': resolution_stats['resolutionRate'],
'predictedDailyAverage': predicted_avg,
'busiestDay': busiest_day
}
async def get_period_comparison(
self,
tenant_id: UUID,
current_days: int = 7,
previous_days: int = 7
) -> Dict[str, Any]:
"""
Compare current period metrics with previous period.
Used for week-over-week trend analysis in dashboard cards.
Args:
tenant_id: Tenant ID
current_days: Number of days in current period (default 7)
previous_days: Number of days in previous period (default 7)
Returns:
Dictionary with current/previous metrics and percentage changes
"""
from datetime import datetime, timedelta
now = datetime.utcnow()
current_start = now - timedelta(days=current_days)
previous_start = current_start - timedelta(days=previous_days)
previous_end = current_start
# Current period: AI handling rate (prevented issues / total)
current_total_query = select(func.count(Alert.id)).where(
and_(
Alert.tenant_id == tenant_id,
Alert.created_at >= current_start,
Alert.created_at <= now
)
)
current_total_result = await self.session.execute(current_total_query)
current_total = current_total_result.scalar() or 0
current_prevented_query = select(func.count(Alert.id)).where(
and_(
Alert.tenant_id == tenant_id,
Alert.type_class == 'prevented_issue',
Alert.created_at >= current_start,
Alert.created_at <= now
)
)
current_prevented_result = await self.session.execute(current_prevented_query)
current_prevented = current_prevented_result.scalar() or 0
current_handling_rate = (
(current_prevented / current_total * 100)
if current_total > 0 else 0
)
# Previous period: AI handling rate
previous_total_query = select(func.count(Alert.id)).where(
and_(
Alert.tenant_id == tenant_id,
Alert.created_at >= previous_start,
Alert.created_at < previous_end
)
)
previous_total_result = await self.session.execute(previous_total_query)
previous_total = previous_total_result.scalar() or 0
previous_prevented_query = select(func.count(Alert.id)).where(
and_(
Alert.tenant_id == tenant_id,
Alert.type_class == 'prevented_issue',
Alert.created_at >= previous_start,
Alert.created_at < previous_end
)
)
previous_prevented_result = await self.session.execute(previous_prevented_query)
previous_prevented = previous_prevented_result.scalar() or 0
previous_handling_rate = (
(previous_prevented / previous_total * 100)
if previous_total > 0 else 0
)
# Calculate percentage change
if previous_handling_rate > 0:
handling_rate_change = round(
((current_handling_rate - previous_handling_rate) / previous_handling_rate) * 100,
1
)
elif current_handling_rate > 0:
handling_rate_change = 100.0 # Went from 0% to something
else:
handling_rate_change = 0.0
# Alert count change
if previous_total > 0:
alert_count_change = round(
((current_total - previous_total) / previous_total) * 100,
1
)
elif current_total > 0:
alert_count_change = 100.0
else:
alert_count_change = 0.0
return {
'current_period': {
'days': current_days,
'total_alerts': current_total,
'prevented_issues': current_prevented,
'handling_rate_percentage': round(current_handling_rate, 1)
},
'previous_period': {
'days': previous_days,
'total_alerts': previous_total,
'prevented_issues': previous_prevented,
'handling_rate_percentage': round(previous_handling_rate, 1)
},
'changes': {
'handling_rate_change_percentage': handling_rate_change,
'alert_count_change_percentage': alert_count_change,
'trend_direction': 'up' if handling_rate_change > 0 else ('down' if handling_rate_change < 0 else 'stable')
}
}

View File

@@ -0,0 +1,306 @@
"""
Event repository for database operations.
"""
from typing import List, Optional, Dict, Any
from uuid import UUID
from datetime import datetime, timezone
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, func, and_, desc
from sqlalchemy.dialects.postgresql import insert
import structlog
from app.models.events import Event
from app.schemas.events import EnrichedEvent, EventSummary, EventResponse, I18nContent, SmartAction
logger = structlog.get_logger()
class EventRepository:
"""Repository for event database operations"""
def __init__(self, session: AsyncSession):
self.session = session
async def create_event(self, enriched_event: EnrichedEvent) -> Event:
"""
Store enriched event in database.
Args:
enriched_event: Enriched event with all context
Returns:
Stored Event model
"""
# Convert enriched event to database model
event = Event(
id=enriched_event.id,
tenant_id=UUID(enriched_event.tenant_id),
event_class=enriched_event.event_class,
event_domain=enriched_event.event_domain,
event_type=enriched_event.event_type,
service=enriched_event.service,
# i18n content
i18n_title_key=enriched_event.i18n.title_key,
i18n_title_params=enriched_event.i18n.title_params,
i18n_message_key=enriched_event.i18n.message_key,
i18n_message_params=enriched_event.i18n.message_params,
# Priority
priority_score=enriched_event.priority_score,
priority_level=enriched_event.priority_level,
type_class=enriched_event.type_class,
# Enrichment contexts
orchestrator_context=enriched_event.orchestrator_context.dict() if enriched_event.orchestrator_context else None,
business_impact=enriched_event.business_impact.dict() if enriched_event.business_impact else None,
urgency=enriched_event.urgency.dict() if enriched_event.urgency else None,
user_agency=enriched_event.user_agency.dict() if enriched_event.user_agency else None,
trend_context=enriched_event.trend_context,
# Smart actions
smart_actions=[action.dict() for action in enriched_event.smart_actions],
# AI reasoning
ai_reasoning_summary_key=enriched_event.ai_reasoning_summary_key,
ai_reasoning_summary_params=enriched_event.ai_reasoning_summary_params,
ai_reasoning_details=enriched_event.ai_reasoning_details,
confidence_score=enriched_event.confidence_score,
# Entity links
entity_links=enriched_event.entity_links,
# Status
status=enriched_event.status,
# Metadata
event_metadata=enriched_event.event_metadata
)
self.session.add(event)
await self.session.commit()
await self.session.refresh(event)
logger.info("event_stored", event_id=event.id, event_type=event.event_type)
return event
async def get_events(
self,
tenant_id: UUID,
event_class: Optional[str] = None,
priority_level: Optional[List[str]] = None,
status: Optional[List[str]] = None,
event_domain: Optional[str] = None,
limit: int = 50,
offset: int = 0
) -> List[Event]:
"""
Get filtered list of events.
Args:
tenant_id: Tenant UUID
event_class: Filter by event class (alert, notification, recommendation)
priority_level: Filter by priority levels
status: Filter by status values
event_domain: Filter by domain
limit: Max results
offset: Pagination offset
Returns:
List of Event models
"""
query = select(Event).where(Event.tenant_id == tenant_id)
# Apply filters
if event_class:
query = query.where(Event.event_class == event_class)
if priority_level:
query = query.where(Event.priority_level.in_(priority_level))
if status:
query = query.where(Event.status.in_(status))
if event_domain:
query = query.where(Event.event_domain == event_domain)
# Order by priority and creation time
query = query.order_by(
desc(Event.priority_score),
desc(Event.created_at)
)
# Pagination
query = query.limit(limit).offset(offset)
result = await self.session.execute(query)
events = result.scalars().all()
return list(events)
async def get_event_by_id(self, event_id: UUID) -> Optional[Event]:
"""Get single event by ID"""
query = select(Event).where(Event.id == event_id)
result = await self.session.execute(query)
return result.scalar_one_or_none()
async def get_summary(self, tenant_id: UUID) -> EventSummary:
"""
Get summary statistics for dashboard.
Args:
tenant_id: Tenant UUID
Returns:
EventSummary with counts and statistics
"""
# Count by status
status_query = select(
Event.status,
func.count(Event.id).label('count')
).where(
Event.tenant_id == tenant_id
).group_by(Event.status)
status_result = await self.session.execute(status_query)
status_counts = {row.status: row.count for row in status_result}
# Count by priority
priority_query = select(
Event.priority_level,
func.count(Event.id).label('count')
).where(
and_(
Event.tenant_id == tenant_id,
Event.status == "active"
)
).group_by(Event.priority_level)
priority_result = await self.session.execute(priority_query)
priority_counts = {row.priority_level: row.count for row in priority_result}
# Count by domain
domain_query = select(
Event.event_domain,
func.count(Event.id).label('count')
).where(
and_(
Event.tenant_id == tenant_id,
Event.status == "active"
)
).group_by(Event.event_domain)
domain_result = await self.session.execute(domain_query)
domain_counts = {row.event_domain: row.count for row in domain_result}
# Count by type class
type_class_query = select(
Event.type_class,
func.count(Event.id).label('count')
).where(
and_(
Event.tenant_id == tenant_id,
Event.status == "active"
)
).group_by(Event.type_class)
type_class_result = await self.session.execute(type_class_query)
type_class_counts = {row.type_class: row.count for row in type_class_result}
return EventSummary(
total_active=status_counts.get("active", 0),
total_acknowledged=status_counts.get("acknowledged", 0),
total_resolved=status_counts.get("resolved", 0),
by_priority=priority_counts,
by_domain=domain_counts,
by_type_class=type_class_counts,
critical_alerts=priority_counts.get("critical", 0),
important_alerts=priority_counts.get("important", 0)
)
async def acknowledge_event(self, event_id: UUID) -> Event:
"""Mark event as acknowledged"""
event = await self.get_event_by_id(event_id)
if not event:
raise ValueError(f"Event {event_id} not found")
event.status = "acknowledged"
event.acknowledged_at = datetime.now(timezone.utc)
await self.session.commit()
await self.session.refresh(event)
logger.info("event_acknowledged", event_id=event_id)
return event
async def resolve_event(self, event_id: UUID) -> Event:
"""Mark event as resolved"""
event = await self.get_event_by_id(event_id)
if not event:
raise ValueError(f"Event {event_id} not found")
event.status = "resolved"
event.resolved_at = datetime.now(timezone.utc)
await self.session.commit()
await self.session.refresh(event)
logger.info("event_resolved", event_id=event_id)
return event
async def dismiss_event(self, event_id: UUID) -> Event:
"""Mark event as dismissed"""
event = await self.get_event_by_id(event_id)
if not event:
raise ValueError(f"Event {event_id} not found")
event.status = "dismissed"
await self.session.commit()
await self.session.refresh(event)
logger.info("event_dismissed", event_id=event_id)
return event
def _event_to_response(self, event: Event) -> EventResponse:
"""Convert Event model to EventResponse"""
return EventResponse(
id=event.id,
tenant_id=event.tenant_id,
created_at=event.created_at,
event_class=event.event_class,
event_domain=event.event_domain,
event_type=event.event_type,
i18n=I18nContent(
title_key=event.i18n_title_key,
title_params=event.i18n_title_params,
message_key=event.i18n_message_key,
message_params=event.i18n_message_params
),
priority_score=event.priority_score,
priority_level=event.priority_level,
type_class=event.type_class,
smart_actions=[SmartAction(**action) for action in event.smart_actions],
status=event.status,
orchestrator_context=event.orchestrator_context,
business_impact=event.business_impact,
urgency=event.urgency,
user_agency=event.user_agency,
ai_reasoning_summary_key=event.ai_reasoning_summary_key,
ai_reasoning_summary_params=event.ai_reasoning_summary_params,
ai_reasoning_details=event.ai_reasoning_details,
confidence_score=event.confidence_score,
entity_links=event.entity_links,
event_metadata=event.event_metadata
)