Initial commit - production deployment

This commit is contained in:
2026-01-21 17:17:16 +01:00
commit c23d00dd92
2289 changed files with 638440 additions and 0 deletions

View File

@@ -0,0 +1,54 @@
# ================================================================
# services/forecasting/tests/conftest.py
# ================================================================
"""
Test configuration and fixtures for forecasting service
"""
import pytest
import asyncio
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
from sqlalchemy.pool import StaticPool
from app.core.config import settings
from shared.database.base import Base
# Test database URL
TEST_DATABASE_URL = "sqlite+aiosqlite:///:memory:"
@pytest.fixture(scope="session")
def event_loop():
"""Create an instance of the default event loop for the test session."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture
async def test_db():
"""Create test database session"""
# Create test engine
engine = create_async_engine(
TEST_DATABASE_URL,
poolclass=StaticPool,
connect_args={"check_same_thread": False},
echo=False
)
# Create tables
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
# Create session factory
TestSessionLocal = async_sessionmaker(
engine,
class_=AsyncSession,
expire_on_commit=False
)
# Provide session
async with TestSessionLocal() as session:
yield session
# Cleanup
await engine.dispose()

View File

@@ -0,0 +1,114 @@
# ================================================================
# Integration Tests: tests/integration/test_forecasting_flow.py
# ================================================================
"""
Integration tests for complete forecasting flow
"""
import pytest
import httpx
import asyncio
from datetime import date, timedelta
import json
class TestForecastingFlow:
"""Test complete forecasting workflow"""
@pytest.mark.asyncio
async def test_complete_forecast_flow(self):
"""Test complete flow from training to forecasting"""
base_url = "http://localhost:8000" # API Gateway
# Test data
tenant_id = "test-tenant-123"
product_name = "Pan Integral"
location = "madrid_centro"
async with httpx.AsyncClient() as client:
# 1. Check if model exists
model_response = await client.get(
f"{base_url}/api/v1/training/models/latest",
params={
"tenant_id": tenant_id,
"product_name": product_name,
"location": location
}
)
# 2. Generate forecast
forecast_request = {
"tenant_id": tenant_id,
"product_name": product_name,
"location": location,
"forecast_date": (date.today() + timedelta(days=1)).isoformat(),
"business_type": "individual",
"include_weather": True,
"include_traffic": True,
"confidence_level": 0.8
}
forecast_response = await client.post(
f"{base_url}/api/v1/forecasting/single",
json=forecast_request
)
assert forecast_response.status_code == 200
forecast_data = forecast_response.json()
# Verify forecast structure
assert "id" in forecast_data
assert "predicted_demand" in forecast_data
assert "confidence_lower" in forecast_data
assert "confidence_upper" in forecast_data
assert forecast_data["product_name"] == product_name
# 3. Get forecast list
list_response = await client.get(
f"{base_url}/api/v1/forecasting/list",
params={"location": location}
)
assert list_response.status_code == 200
forecasts = list_response.json()
assert len(forecasts) > 0
# 4. Check for alerts
alerts_response = await client.get(
f"{base_url}/api/v1/forecasting/alerts"
)
assert alerts_response.status_code == 200
@pytest.mark.asyncio
async def test_batch_forecasting(self):
"""Test batch forecasting functionality"""
base_url = "http://localhost:8000"
batch_request = {
"tenant_id": "test-tenant-123",
"batch_name": "Weekly Forecast Batch",
"products": ["Pan Integral", "Croissant", "Café con Leche"],
"location": "madrid_centro",
"forecast_days": 7,
"business_type": "individual",
"include_weather": True,
"include_traffic": True,
"confidence_level": 0.8
}
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(
f"{base_url}/api/v1/forecasting/batch",
json=batch_request
)
assert response.status_code == 200
batch_data = response.json()
assert "id" in batch_data
assert batch_data["batch_name"] == "Weekly Forecast Batch"
assert batch_data["total_products"] == 21 # 3 products * 7 days
assert batch_data["status"] in ["completed", "partial"]

View File

@@ -0,0 +1,106 @@
# ================================================================
# Performance Tests: tests/performance/test_forecasting_performance.py
# ================================================================
"""
Performance tests for forecasting service
"""
import pytest
import httpx
import asyncio
import time
from concurrent.futures import ThreadPoolExecutor
import statistics
class TestForecastingPerformance:
"""Performance tests for forecasting operations"""
@pytest.mark.asyncio
async def test_single_forecast_performance(self):
"""Test single forecast generation performance"""
base_url = "http://localhost:8000"
forecast_request = {
"tenant_id": "perf-test-tenant",
"product_name": "Pan Integral",
"location": "madrid_centro",
"forecast_date": "2024-01-17",
"business_type": "individual",
"confidence_level": 0.8
}
times = []
async with httpx.AsyncClient() as client:
for _ in range(10):
start_time = time.time()
response = await client.post(
f"{base_url}/api/v1/forecasting/single",
json=forecast_request
)
end_time = time.time()
times.append(end_time - start_time)
assert response.status_code == 200
# Performance assertions
avg_time = statistics.mean(times)
p95_time = statistics.quantiles(times, n=20)[18] # 95th percentile
assert avg_time < 2.0, f"Average response time {avg_time}s exceeds 2s"
assert p95_time < 5.0, f"95th percentile {p95_time}s exceeds 5s"
print(f"Average response time: {avg_time:.2f}s")
print(f"95th percentile: {p95_time:.2f}s")
@pytest.mark.asyncio
async def test_concurrent_forecasts(self):
"""Test concurrent forecast generation"""
base_url = "http://localhost:8000"
async def make_forecast_request(product_id):
forecast_request = {
"tenant_id": "perf-test-tenant",
"product_name": f"Product_{product_id}",
"location": "madrid_centro",
"forecast_date": "2024-01-17",
"business_type": "individual"
}
async with httpx.AsyncClient() as client:
start_time = time.time()
response = await client.post(
f"{base_url}/api/v1/forecasting/single",
json=forecast_request
)
end_time = time.time()
return {
"status_code": response.status_code,
"response_time": end_time - start_time,
"product_id": product_id
}
# Run 20 concurrent requests
tasks = [make_forecast_request(i) for i in range(20)]
results = await asyncio.gather(*tasks, return_exceptions=True)
# Analyze results
successful = [r for r in results if isinstance(r, dict) and r["status_code"] == 200]
failed = [r for r in results if not isinstance(r, dict) or r["status_code"] != 200]
success_rate = len(successful) / len(results)
assert success_rate >= 0.95, f"Success rate {success_rate} below 95%"
if successful:
avg_concurrent_time = statistics.mean([r["response_time"] for r in successful])
assert avg_concurrent_time < 10.0, f"Average concurrent time {avg_concurrent_time}s exceeds 10s"
print(f"Concurrent success rate: {success_rate:.2%}")
print(f"Average concurrent response time: {avg_concurrent_time:.2f}s")

View File

@@ -0,0 +1,399 @@
"""
Tests for Dynamic Business Rules Engine
"""
import pytest
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from app.ml.dynamic_rules_engine import DynamicRulesEngine
@pytest.fixture
def sample_sales_data():
"""Generate sample sales data for testing."""
dates = pd.date_range(start='2024-01-01', end='2024-12-31', freq='D')
# Base demand with day-of-week pattern
base = 100
quantities = []
for date in dates:
# Day of week pattern (weekends higher)
dow_multiplier = 1.3 if date.dayofweek >= 5 else 1.0
# Monthly seasonality (summer higher)
month_multiplier = 1.2 if date.month in [6, 7, 8] else 1.0
# Random noise
noise = np.random.normal(1.0, 0.1)
quantity = base * dow_multiplier * month_multiplier * noise
quantities.append(quantity)
return pd.DataFrame({
'date': dates,
'ds': dates,
'quantity': quantities,
'y': quantities
})
@pytest.fixture
def sample_weather_data():
"""Generate sample weather data for testing."""
dates = pd.date_range(start='2024-01-01', end='2024-12-31', freq='D')
weather_conditions = []
temperatures = []
precipitation = []
for date in dates:
# Simulate weather patterns
if np.random.random() < 0.1: # 10% rainy days
weather_conditions.append('rain')
precipitation.append(np.random.uniform(5, 20))
elif np.random.random() < 0.05: # 5% snow
weather_conditions.append('snow')
precipitation.append(np.random.uniform(2, 10))
else:
weather_conditions.append('clear')
precipitation.append(0)
# Temperature varies by month
base_temp = 10 + (date.month - 1) * 2
temperatures.append(base_temp + np.random.normal(0, 5))
return pd.DataFrame({
'date': dates,
'weather_condition': weather_conditions,
'temperature': temperatures,
'precipitation': precipitation
})
@pytest.fixture
def sample_holiday_data():
"""Generate sample holiday data for testing."""
dates = pd.date_range(start='2024-01-01', end='2024-12-31', freq='D')
holidays = []
# Add some holidays
holiday_dates = {
'2024-01-01': ('New Year', 'national'),
'2024-03-29': ('Good Friday', 'religious'),
'2024-04-01': ('Easter Monday', 'religious'),
'2024-12-25': ('Christmas', 'religious'),
'2024-12-26': ('Boxing Day', 'national')
}
for date in dates:
date_str = date.strftime('%Y-%m-%d')
if date_str in holiday_dates:
name, htype = holiday_dates[date_str]
holidays.append({
'date': date,
'is_holiday': True,
'holiday_name': name,
'holiday_type': htype
})
else:
holidays.append({
'date': date,
'is_holiday': False,
'holiday_name': None,
'holiday_type': None
})
return pd.DataFrame(holidays)
@pytest.fixture
def sales_with_weather_impact(sample_sales_data, sample_weather_data):
"""Generate sales data with weather impact."""
merged = sample_sales_data.merge(sample_weather_data, on='date')
# Apply weather impact
for idx, row in merged.iterrows():
if row['weather_condition'] == 'rain':
merged.at[idx, 'quantity'] *= 0.85 # -15% for rain
merged.at[idx, 'y'] *= 0.85
elif row['weather_condition'] == 'snow':
merged.at[idx, 'quantity'] *= 0.75 # -25% for snow
merged.at[idx, 'y'] *= 0.75
return merged
@pytest.fixture
def sales_with_holiday_impact(sample_sales_data, sample_holiday_data):
"""Generate sales data with holiday impact."""
merged = sample_sales_data.merge(sample_holiday_data, on='date')
# Apply holiday impact
for idx, row in merged.iterrows():
if row['is_holiday'] and row['holiday_type'] == 'religious':
merged.at[idx, 'quantity'] *= 1.6 # +60% for religious holidays
merged.at[idx, 'y'] *= 1.6
elif row['is_holiday']:
merged.at[idx, 'quantity'] *= 1.3 # +30% for national holidays
merged.at[idx, 'y'] *= 1.3
return merged
@pytest.mark.asyncio
async def test_learn_weather_rules(sales_with_weather_impact, sample_weather_data):
"""Test weather rules learning."""
engine = DynamicRulesEngine()
results = await engine.learn_all_rules(
tenant_id='test-tenant',
inventory_product_id='test-product',
sales_data=sales_with_weather_impact,
external_data=sample_weather_data,
min_samples=5
)
# Check weather rules were learned
assert 'weather' in results['rules']
assert 'baseline_avg' in results['rules']['weather']
assert 'conditions' in results['rules']['weather']
# Check rain rule learned
if 'rain' in results['rules']['weather']['conditions']:
rain_rule = results['rules']['weather']['conditions']['rain']
assert 'learned_multiplier' in rain_rule
assert 'learned_impact_pct' in rain_rule
assert rain_rule['sample_size'] >= 5
# Learned multiplier should be close to 0.85 (we applied -15% impact)
assert 0.75 < rain_rule['learned_multiplier'] < 0.95
# Check insights generated
assert 'insights' in results
assert len(results['insights']) > 0
@pytest.mark.asyncio
async def test_learn_holiday_rules(sales_with_holiday_impact, sample_holiday_data):
"""Test holiday rules learning."""
engine = DynamicRulesEngine()
results = await engine.learn_all_rules(
tenant_id='test-tenant',
inventory_product_id='test-product',
sales_data=sales_with_holiday_impact,
external_data=sample_holiday_data,
min_samples=2
)
# Check holiday rules were learned
assert 'holidays' in results['rules']
assert 'baseline_avg' in results['rules']['holidays']
if 'holiday_types' in results['rules']['holidays']:
holiday_types = results['rules']['holidays']['holiday_types']
# Check religious holidays learned higher impact than national
if 'religious' in holiday_types and 'national' in holiday_types:
religious_mult = holiday_types['religious']['learned_multiplier']
national_mult = holiday_types['national']['learned_multiplier']
# Religious should have higher multiplier (we applied 1.6 vs 1.3)
assert religious_mult > national_mult
@pytest.mark.asyncio
async def test_learn_day_of_week_rules(sample_sales_data):
"""Test day-of-week pattern learning."""
engine = DynamicRulesEngine()
results = await engine.learn_all_rules(
tenant_id='test-tenant',
inventory_product_id='test-product',
sales_data=sample_sales_data,
external_data=None,
min_samples=10
)
# Check day-of-week rules learned
assert 'day_of_week' in results['rules']
assert 'days' in results['rules']['day_of_week']
days = results['rules']['day_of_week']['days']
# Weekend should have higher multipliers (we applied 1.3x)
if 'Saturday' in days and 'Monday' in days:
saturday_mult = days['Saturday']['learned_multiplier']
monday_mult = days['Monday']['learned_multiplier']
assert saturday_mult > monday_mult
@pytest.mark.asyncio
async def test_learn_month_rules(sample_sales_data):
"""Test monthly seasonality learning."""
engine = DynamicRulesEngine()
results = await engine.learn_all_rules(
tenant_id='test-tenant',
inventory_product_id='test-product',
sales_data=sample_sales_data,
external_data=None,
min_samples=10
)
# Check month rules learned
assert 'months' in results['rules']
assert 'months' in results['rules']['months']
months = results['rules']['months']['months']
# Summer months (June, July, August) should have higher multipliers
if 'July' in months and 'January' in months:
july_mult = months['July']['learned_multiplier']
january_mult = months['January']['learned_multiplier']
assert july_mult > january_mult
@pytest.mark.asyncio
async def test_insight_generation_weather_mismatch(sales_with_weather_impact, sample_weather_data):
"""Test that insights are generated when learned rules differ from hardcoded."""
engine = DynamicRulesEngine()
results = await engine.learn_all_rules(
tenant_id='test-tenant',
inventory_product_id='test-product',
sales_data=sales_with_weather_impact,
external_data=sample_weather_data,
min_samples=5
)
# Should generate insights comparing learned vs hardcoded
insights = results['insights']
# Check for weather-related insights
weather_insights = [i for i in insights if 'weather' in i.get('title', '').lower()]
if weather_insights:
insight = weather_insights[0]
assert 'type' in insight
assert 'priority' in insight
assert 'confidence' in insight
assert 'metrics_json' in insight
assert 'actionable' in insight
assert 'recommendation_actions' in insight
@pytest.mark.asyncio
async def test_confidence_calculation():
"""Test confidence score calculation."""
engine = DynamicRulesEngine()
# High confidence: large sample, low p-value
high_conf = engine._calculate_confidence(sample_size=150, p_value=0.001)
assert high_conf >= 90
# Medium confidence: moderate sample, moderate p-value
med_conf = engine._calculate_confidence(sample_size=50, p_value=0.03)
assert 60 <= med_conf < 90
# Low confidence: small sample, high p-value
low_conf = engine._calculate_confidence(sample_size=15, p_value=0.12)
assert low_conf < 60
def test_get_rule():
"""Test getting learned rules."""
engine = DynamicRulesEngine()
# Manually set some rules for testing
engine.weather_rules['product-1'] = {
'conditions': {
'rain': {
'learned_multiplier': 0.85
}
}
}
engine.dow_rules['product-1'] = {
'days': {
'Saturday': {
'learned_multiplier': 1.25
}
}
}
# Test retrieval
rain_mult = engine.get_rule('product-1', 'weather', 'rain')
assert rain_mult == 0.85
saturday_mult = engine.get_rule('product-1', 'day_of_week', 'Saturday')
assert saturday_mult == 1.25
# Test non-existent rule
unknown = engine.get_rule('product-1', 'weather', 'tornado')
assert unknown is None
def test_export_rules_for_prophet():
"""Test exporting rules for Prophet integration."""
engine = DynamicRulesEngine()
# Set up some test rules
engine.weather_rules['product-1'] = {'conditions': {'rain': {'learned_multiplier': 0.85}}}
engine.holiday_rules['product-1'] = {'holiday_types': {'Christmas': {'learned_multiplier': 1.7}}}
# Export
exported = engine.export_rules_for_prophet('product-1')
assert 'weather' in exported
assert 'holidays' in exported
assert 'events' in exported
assert 'day_of_week' in exported
assert 'months' in exported
@pytest.mark.asyncio
async def test_no_external_data(sample_sales_data):
"""Test that engine works with sales data only (no external data)."""
engine = DynamicRulesEngine()
results = await engine.learn_all_rules(
tenant_id='test-tenant',
inventory_product_id='test-product',
sales_data=sample_sales_data,
external_data=None,
min_samples=10
)
# Should still learn DOW and month patterns
assert 'day_of_week' in results['rules']
assert 'months' in results['rules']
# Weather/holiday/event rules should not be present
assert 'weather' not in results['rules'] or len(results['rules']['weather'].get('conditions', {})) == 0
@pytest.mark.asyncio
async def test_insufficient_samples(sample_sales_data):
"""Test handling of insufficient sample sizes."""
# Use only 30 days of data
small_data = sample_sales_data.head(30)
engine = DynamicRulesEngine()
results = await engine.learn_all_rules(
tenant_id='test-tenant',
inventory_product_id='test-product',
sales_data=small_data,
external_data=None,
min_samples=50 # Require more samples than available
)
# Should still return results but with fewer learned rules
assert 'rules' in results
assert 'insights' in results

View File

@@ -0,0 +1,135 @@
# ================================================================
# services/forecasting/tests/test_forecasting.py
# ================================================================
"""
Tests for forecasting service
"""
import pytest
import asyncio
from datetime import date, datetime, timedelta
from unittest.mock import Mock, AsyncMock, patch
import uuid
from app.services.forecasting_service import ForecastingService
from app.schemas.forecasts import ForecastRequest, BusinessType
from app.models.forecasts import Forecast
class TestForecastingService:
"""Test cases for ForecastingService"""
@pytest.fixture
def forecasting_service(self):
return ForecastingService()
@pytest.fixture
def sample_forecast_request(self):
return ForecastRequest(
tenant_id=str(uuid.uuid4()),
product_name="Pan Integral",
location="madrid_centro",
forecast_date=date.today() + timedelta(days=1),
business_type=BusinessType.INDIVIDUAL,
include_weather=True,
include_traffic=True,
confidence_level=0.8
)
@pytest.mark.asyncio
async def test_generate_forecast_success(self, forecasting_service, sample_forecast_request):
"""Test successful forecast generation"""
# Mock database session
mock_db = AsyncMock()
# Mock external dependencies
with patch.object(forecasting_service, '_get_latest_model') as mock_get_model, \
patch.object(forecasting_service, '_prepare_forecast_features') as mock_prepare_features, \
patch.object(forecasting_service.prediction_service, 'predict') as mock_predict, \
patch.object(forecasting_service, '_check_and_create_alerts') as mock_check_alerts:
# Setup mocks
mock_get_model.return_value = {
"model_id": str(uuid.uuid4()),
"version": "1.0.0",
"algorithm": "prophet"
}
mock_prepare_features.return_value = {
"date": "2024-01-16",
"day_of_week": 1,
"is_weekend": False,
"is_holiday": False,
"temperature": 15.0,
"precipitation": 0.0
}
mock_predict.return_value = {
"demand": 85.5,
"lower_bound": 70.2,
"upper_bound": 100.8
}
# Execute test
result = await forecasting_service.generate_forecast(sample_forecast_request, mock_db)
# Assertions
assert isinstance(result, Forecast)
assert result.product_name == "Pan Integral"
assert result.predicted_demand == 85.5
assert result.confidence_lower == 70.2
assert result.confidence_upper == 100.8
# Verify mocks were called
mock_get_model.assert_called_once()
mock_prepare_features.assert_called_once()
mock_predict.assert_called_once()
mock_check_alerts.assert_called_once()
@pytest.mark.asyncio
async def test_generate_forecast_no_model(self, forecasting_service, sample_forecast_request):
"""Test forecast generation when no model is found"""
mock_db = AsyncMock()
with patch.object(forecasting_service, '_get_latest_model') as mock_get_model:
mock_get_model.return_value = None
# Should raise ValueError
with pytest.raises(ValueError, match="No trained model found"):
await forecasting_service.generate_forecast(sample_forecast_request, mock_db)
@pytest.mark.asyncio
async def test_prepare_forecast_features(self, forecasting_service, sample_forecast_request):
"""Test feature preparation for forecasting"""
with patch.object(forecasting_service, '_is_spanish_holiday') as mock_holiday, \
patch.object(forecasting_service, '_get_weather_forecast') as mock_weather, \
patch.object(forecasting_service, '_get_traffic_forecast') as mock_traffic:
# Setup mocks
mock_holiday.return_value = False
mock_weather.return_value = {
"temperature": 18.5,
"precipitation": 0.0,
"humidity": 65.0,
"weather_description": "Clear"
}
mock_traffic.return_value = {
"traffic_volume": 1200,
"pedestrian_count": 850
}
# Execute test
features = await forecasting_service._prepare_forecast_features(sample_forecast_request)
# Assertions
assert "date" in features
assert "day_of_week" in features
assert "is_weekend" in features
assert "is_holiday" in features
assert features["business_type"] == "individual"
assert features["temperature"] == 18.5
assert features["traffic_volume"] == 1200