REFACTOR external service and improve websocket training

This commit is contained in:
Urtzi Alfaro
2025-10-09 14:11:02 +02:00
parent 7c72f83c51
commit 3c689b4f98
111 changed files with 13289 additions and 2374 deletions

View File

@@ -6,7 +6,7 @@ Service-specific repository base class with forecasting utilities
from typing import Optional, List, Dict, Any, Type
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import text
from datetime import datetime, date, timedelta
from datetime import datetime, date, timedelta, timezone
import structlog
from shared.database.repository import BaseRepository
@@ -113,15 +113,15 @@ class ForecastingBaseRepository(BaseRepository):
limit: int = 100
) -> List:
"""Get recent records for a tenant"""
cutoff_time = datetime.utcnow() - timedelta(hours=hours)
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=hours)
return await self.get_by_date_range(
tenant_id, cutoff_time, datetime.utcnow(), skip, limit
tenant_id, cutoff_time, datetime.now(timezone.utc), skip, limit
)
async def cleanup_old_records(self, days_old: int = 90) -> int:
"""Clean up old forecasting records"""
try:
cutoff_date = datetime.utcnow() - timedelta(days=days_old)
cutoff_date = datetime.now(timezone.utc) - timedelta(days=days_old)
table_name = self.model.__tablename__
# Use created_at or forecast_date for cleanup
@@ -156,9 +156,9 @@ class ForecastingBaseRepository(BaseRepository):
total_records = await self.count(filters={"tenant_id": tenant_id})
# Get recent activity (records in last 7 days)
seven_days_ago = datetime.utcnow() - timedelta(days=7)
seven_days_ago = datetime.now(timezone.utc) - timedelta(days=7)
recent_records = len(await self.get_by_date_range(
tenant_id, seven_days_ago, datetime.utcnow(), limit=1000
tenant_id, seven_days_ago, datetime.now(timezone.utc), limit=1000
))
# Get records by product if applicable

View File

@@ -6,7 +6,7 @@ Repository for forecast operations
from typing import Optional, List, Dict, Any
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, and_, text, desc, func
from datetime import datetime, timedelta, date
from datetime import datetime, timedelta, date, timezone
import structlog
from .base import ForecastingBaseRepository
@@ -159,7 +159,7 @@ class ForecastRepository(ForecastingBaseRepository):
) -> Dict[str, Any]:
"""Get forecast accuracy metrics"""
try:
cutoff_date = datetime.utcnow() - timedelta(days=days_back)
cutoff_date = datetime.now(timezone.utc) - timedelta(days=days_back)
# Build base query conditions
conditions = ["tenant_id = :tenant_id", "forecast_date >= :cutoff_date"]
@@ -238,7 +238,7 @@ class ForecastRepository(ForecastingBaseRepository):
) -> Dict[str, Any]:
"""Get demand trends for a product"""
try:
cutoff_date = datetime.utcnow() - timedelta(days=days_back)
cutoff_date = datetime.now(timezone.utc) - timedelta(days=days_back)
query_text = """
SELECT

View File

@@ -6,7 +6,7 @@ Repository for model performance metrics in forecasting service
from typing import Optional, List, Dict, Any
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import text
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
import structlog
from .base import ForecastingBaseRepository
@@ -98,7 +98,7 @@ class PerformanceMetricRepository(ForecastingBaseRepository):
) -> Dict[str, Any]:
"""Get performance trends over time"""
try:
start_date = datetime.utcnow() - timedelta(days=days)
start_date = datetime.now(timezone.utc) - timedelta(days=days)
conditions = [
"tenant_id = :tenant_id",

View File

@@ -6,7 +6,7 @@ Repository for prediction batch operations
from typing import Optional, List, Dict, Any
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import text
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
import structlog
from .base import ForecastingBaseRepository
@@ -81,7 +81,7 @@ class PredictionBatchRepository(ForecastingBaseRepository):
if status:
update_data["status"] = status
if status in ["completed", "failed"]:
update_data["completed_at"] = datetime.utcnow()
update_data["completed_at"] = datetime.now(timezone.utc)
if not update_data:
return await self.get_by_id(batch_id)
@@ -110,7 +110,7 @@ class PredictionBatchRepository(ForecastingBaseRepository):
try:
update_data = {
"status": "completed",
"completed_at": datetime.utcnow()
"completed_at": datetime.now(timezone.utc)
}
if processing_time_ms:
@@ -140,7 +140,7 @@ class PredictionBatchRepository(ForecastingBaseRepository):
try:
update_data = {
"status": "failed",
"completed_at": datetime.utcnow(),
"completed_at": datetime.now(timezone.utc),
"error_message": error_message
}
@@ -180,7 +180,7 @@ class PredictionBatchRepository(ForecastingBaseRepository):
update_data = {
"status": "cancelled",
"completed_at": datetime.utcnow(),
"completed_at": datetime.now(timezone.utc),
"cancelled_by": cancelled_by,
"error_message": f"Cancelled by {cancelled_by}" if cancelled_by else "Cancelled"
}
@@ -270,7 +270,7 @@ class PredictionBatchRepository(ForecastingBaseRepository):
avg_processing_times[row.status] = float(row.avg_processing_time_ms)
# Get recent activity (batches in last 7 days)
seven_days_ago = datetime.utcnow() - timedelta(days=7)
seven_days_ago = datetime.now(timezone.utc) - timedelta(days=7)
recent_query = text(f"""
SELECT COUNT(*) as count
FROM prediction_batches
@@ -315,7 +315,7 @@ class PredictionBatchRepository(ForecastingBaseRepository):
async def cleanup_old_batches(self, days_old: int = 30) -> int:
"""Clean up old completed/failed batches"""
try:
cutoff_date = datetime.utcnow() - timedelta(days=days_old)
cutoff_date = datetime.now(timezone.utc) - timedelta(days=days_old)
query_text = """
DELETE FROM prediction_batches
@@ -354,7 +354,7 @@ class PredictionBatchRepository(ForecastingBaseRepository):
if batch.completed_at:
elapsed_time_ms = int((batch.completed_at - batch.requested_at).total_seconds() * 1000)
elif batch.status in ["pending", "processing"]:
elapsed_time_ms = int((datetime.utcnow() - batch.requested_at).total_seconds() * 1000)
elapsed_time_ms = int((datetime.now(timezone.utc) - batch.requested_at).total_seconds() * 1000)
return {
"batch_id": str(batch.id),

View File

@@ -6,7 +6,7 @@ Repository for prediction cache operations
from typing import Optional, List, Dict, Any
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import text
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
import structlog
import hashlib
@@ -50,7 +50,7 @@ class PredictionCacheRepository(ForecastingBaseRepository):
"""Cache a prediction result"""
try:
cache_key = self._generate_cache_key(tenant_id, inventory_product_id, location, forecast_date)
expires_at = datetime.utcnow() + timedelta(hours=expires_in_hours)
expires_at = datetime.now(timezone.utc) + timedelta(hours=expires_in_hours)
cache_data = {
"cache_key": cache_key,
@@ -102,7 +102,7 @@ class PredictionCacheRepository(ForecastingBaseRepository):
return None
# Check if cache entry has expired
if cache_entry.expires_at < datetime.utcnow():
if cache_entry.expires_at < datetime.now(timezone.utc):
logger.debug("Cache expired", cache_key=cache_key)
await self.delete(cache_entry.id)
return None
@@ -172,7 +172,7 @@ class PredictionCacheRepository(ForecastingBaseRepository):
WHERE expires_at < :now
"""
result = await self.session.execute(text(query_text), {"now": datetime.utcnow()})
result = await self.session.execute(text(query_text), {"now": datetime.now(timezone.utc)})
deleted_count = result.rowcount
logger.info("Cleaned up expired cache entries",
@@ -209,7 +209,7 @@ class PredictionCacheRepository(ForecastingBaseRepository):
{base_filter}
""")
params["now"] = datetime.utcnow()
params["now"] = datetime.now(timezone.utc)
result = await self.session.execute(stats_query, params)
row = result.fetchone()