diff --git a/services/training/app/core/config.py b/services/training/app/core/config.py
index 04411e16..10112851 100644
--- a/services/training/app/core/config.py
+++ b/services/training/app/core/config.py
@@ -61,5 +61,10 @@ class TrainingSettings(BaseServiceSettings):
# Distributed Training (for future scaling)
DISTRIBUTED_TRAINING_ENABLED: bool = os.getenv("DISTRIBUTED_TRAINING_ENABLED", "false").lower() == "true"
TRAINING_WORKER_COUNT: int = int(os.getenv("TRAINING_WORKER_COUNT", "1"))
+
+ PROPHET_DAILY_SEASONALITY: bool = True
+ PROPHET_WEEKLY_SEASONALITY: bool = True
+ PROPHET_YEARLY_SEASONALITY: bool = True
+ PROPHET_SEASONALITY_MODE: str = "additive"
settings = TrainingSettings()
\ No newline at end of file
diff --git a/services/training/tests/conftest.py b/services/training/tests/conftest.py
index b50da08f..8a0087de 100644
--- a/services/training/tests/conftest.py
+++ b/services/training/tests/conftest.py
@@ -18,6 +18,7 @@ from unittest.mock import Mock, AsyncMock, patch
from typing import Dict, List, Any, Generator
from pathlib import Path
import logging
+from app.models.training import ModelTrainingLog, TrainedModel
# Configure pytest-asyncio
pytestmark = pytest.mark.asyncio
@@ -213,16 +214,14 @@ async def test_app():
from app.main import app
return app
-
@pytest.fixture
-async def test_client(test_app):
- """Test client for API testing"""
- from httpx import AsyncClient
+def test_client(test_app):
+ """Create test client for API testing - SYNC VERSION"""
+ from httpx import Client
- async with AsyncClient(app=test_app, base_url="http://test") as client:
+ with Client(app=test_app, base_url="http://test") as client:
yield client
-
-
+
@pytest.fixture
def auth_headers():
"""Mock authentication headers"""
@@ -452,7 +451,7 @@ def setup_test_environment():
yield
- # Cleanup environment
+ # Cleanup environment - FIXED: removed (scope="session")
test_vars = [
'ENVIRONMENT', 'LOG_LEVEL', 'MODEL_STORAGE_PATH',
'MAX_TRAINING_TIME_MINUTES', 'MIN_TRAINING_DATA_DAYS',
@@ -461,7 +460,8 @@ def setup_test_environment():
]
for var in test_vars:
- os.environ.pop(var, None)(scope="session")
+ os.environ.pop(var, None) # FIXED: removed the erroneous (scope="session")
+
def event_loop():
"""Create an instance of the default event loop for the test session."""
loop = asyncio.new_event_loop()
@@ -514,41 +514,60 @@ def pytest_collection_modifyitems(config, items):
# TEST DATABASE FIXTURES
# ================================================================
-@pytest.fixture
+@pytest_asyncio.fixture
async def test_db_session():
- """Mock database session for testing"""
- mock_session = AsyncMock()
+ """Create async test database session"""
+ from app.core.database import database_manager
- # Mock common database operations
- mock_session.add = Mock()
- mock_session.commit = AsyncMock()
- mock_session.rollback = AsyncMock()
- mock_session.refresh = AsyncMock()
- mock_session.close = AsyncMock()
- mock_session.execute = AsyncMock()
- mock_session.scalar = AsyncMock()
-
- return mock_session
-
+ async with database_manager.async_session_local() as session:
+ yield session
@pytest.fixture
-def training_job_in_db():
- """Mock training job already in database"""
- from app.models.training import ModelTrainingLog
+def training_job_in_db(test_db_session):
+ """Create a training job in database for testing"""
+ from app.models.training import ModelTrainingLog # Add this import
+ from datetime import datetime
job = ModelTrainingLog(
- job_id="test_job_123",
- tenant_id="test_tenant",
+ job_id="test-job-123",
+ tenant_id="test-tenant",
status="running",
progress=50,
- current_step="Training model for Pan Integral",
- config={"include_weather": True, "include_traffic": True},
- started_at=datetime.now(),
- logs=["Started training", "Processing data"]
+ current_step="Training models",
+ start_time=datetime.now(), # Use start_time, not started_at
+ config={"include_weather": True},
+ created_at=datetime.now(),
+ updated_at=datetime.now()
)
-
+ test_db_session.add(job)
+ test_db_session.commit()
+ test_db_session.refresh(job)
return job
+@pytest.fixture
+def trained_model_in_db(test_db_session):
+ """Create a trained model in database for testing"""
+ from app.models.training import TrainedModel # Add this import
+ from datetime import datetime
+
+ model = TrainedModel(
+ model_id="test-model-123",
+ tenant_id="test-tenant",
+ product_name="Pan Integral",
+ model_type="prophet",
+ model_path="/tmp/test_model.pkl",
+ version=1,
+ training_samples=100,
+ features=["temperature", "humidity"],
+ hyperparameters={"seasonality_mode": "additive"},
+ training_metrics={"mae": 2.5, "mse": 8.3},
+ is_active=True,
+ created_at=datetime.now()
+ )
+ test_db_session.add(model)
+ test_db_session.commit()
+ test_db_session.refresh(model)
+ return model
# ================================================================
# SAMPLE DATA FIXTURES
@@ -843,6 +862,24 @@ def mock_data_processor():
yield mock_instance
+@pytest.fixture
+def mock_data_service():
+ """Mock data service for testing"""
+ from unittest.mock import Mock, AsyncMock
+
+ mock_service = Mock()
+ mock_service.get_sales_data = AsyncMock(return_value=[
+ {"date": "2024-01-01", "product_name": "Pan Integral", "quantity": 45},
+ {"date": "2024-01-02", "product_name": "Pan Integral", "quantity": 38}
+ ])
+ mock_service.get_weather_data = AsyncMock(return_value=[
+ {"date": "2024-01-01", "temperature": 20.5, "humidity": 65}
+ ])
+ mock_service.get_traffic_data = AsyncMock(return_value=[
+ {"date": "2024-01-01", "traffic_index": 0.7}
+ ])
+
+ return mock_service
@pytest.fixture
def mock_prophet_manager():
diff --git a/services/training/tests/results/coverage_end_to_end.xml b/services/training/tests/results/coverage_end_to_end.xml
deleted file mode 100644
index efc0fce2..00000000
--- a/services/training/tests/results/coverage_end_to_end.xml
+++ /dev/null
@@ -1,1670 +0,0 @@
-
-
-
-
-
- /app/app
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/services/training/tests/results/coverage_integration.xml b/services/training/tests/results/coverage_integration.xml
deleted file mode 100644
index 50459467..00000000
--- a/services/training/tests/results/coverage_integration.xml
+++ /dev/null
@@ -1,1670 +0,0 @@
-
-
-
-
-
- /app/app
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/services/training/tests/results/coverage_performance.xml b/services/training/tests/results/coverage_performance.xml
deleted file mode 100644
index 23f7bfff..00000000
--- a/services/training/tests/results/coverage_performance.xml
+++ /dev/null
@@ -1,1670 +0,0 @@
-
-
-
-
-
- /app/app
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/services/training/tests/results/coverage_unit.xml b/services/training/tests/results/coverage_unit.xml
deleted file mode 100644
index 75018877..00000000
--- a/services/training/tests/results/coverage_unit.xml
+++ /dev/null
@@ -1,1670 +0,0 @@
-
-
-
-
-
- /app/app
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/services/training/tests/results/junit_end_to_end.xml b/services/training/tests/results/junit_end_to_end.xml
deleted file mode 100644
index b9859adf..00000000
--- a/services/training/tests/results/junit_end_to_end.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-tests/test_end_to_end.py:75: in real_bakery_data
- temp = 15 + 12 * np.sin((date.timetuple().tm_yday / 365) * 2 * np.pi)
-E UnboundLocalError: cannot access local variable 'np' where it is not associated with a valuetests/conftest.py:464: in setup_test_environment
- os.environ.pop(var, None)(scope="session")
-E TypeError: 'str' object is not callable
\ No newline at end of file
diff --git a/services/training/tests/results/junit_integration.xml b/services/training/tests/results/junit_integration.xml
deleted file mode 100644
index f3741a14..00000000
--- a/services/training/tests/results/junit_integration.xml
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/services/training/tests/results/junit_performance.xml b/services/training/tests/results/junit_performance.xml
deleted file mode 100644
index 39f7291f..00000000
--- a/services/training/tests/results/junit_performance.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-ImportError while importing test module '/app/tests/test_performance.py'.
-Hint: make sure your test modules/packages have valid Python names.
-Traceback:
-/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
- return _bootstrap._gcd_import(name[level:], package, level)
-tests/test_performance.py:16: in <module>
- import psutil
-E ModuleNotFoundError: No module named 'psutil'
\ No newline at end of file
diff --git a/services/training/tests/results/junit_unit.xml b/services/training/tests/results/junit_unit.xml
deleted file mode 100644
index 7e64b266..00000000
--- a/services/training/tests/results/junit_unit.xml
+++ /dev/null
@@ -1,649 +0,0 @@
-tests/test_api.py:20: in test_health_check
- response = await test_client.get("/health")
-E AttributeError: 'async_generator' object has no attribute 'get'tests/test_api.py:32: in test_readiness_check_ready
- with patch('app.main.app.state.ready', True):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <starlette.datastructures.State object at 0xffff5ae06a10> does not have the attribute 'ready'tests/test_api.py:42: in test_readiness_check_not_ready
- with patch('app.main.app.state.ready', False):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <starlette.datastructures.State object at 0xffff5ae06a10> does not have the attribute 'ready'tests/test_api.py:53: in test_liveness_check_healthy
- response = await test_client.get("/health/live")
-E AttributeError: 'async_generator' object has no attribute 'get'tests/test_api.py:63: in test_liveness_check_unhealthy
- response = await test_client.get("/health/live")
-E AttributeError: 'async_generator' object has no attribute 'get'tests/test_api.py:73: in test_metrics_endpoint
- response = await test_client.get("/metrics")
-E AttributeError: 'async_generator' object has no attribute 'get'tests/test_api.py:92: in test_root_endpoint
- response = await test_client.get("/")
-E AttributeError: 'async_generator' object has no attribute 'get'file /app/tests/test_api.py, line 104
- @pytest.mark.asyncio
- async def test_start_training_job_success(
- self,
- test_client: AsyncClient,
- mock_messaging,
- mock_ml_trainer,
- mock_data_service
- ):
- """Test starting a training job successfully"""
- request_data = {
- "include_weather": True,
- "include_traffic": True,
- "min_data_points": 30,
- "seasonality_mode": "additive"
- }
-
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
- response = await test_client.post("/training/jobs", json=request_data)
-
- assert response.status_code == status.HTTP_200_OK
- data = response.json()
-
- assert "job_id" in data
- assert data["status"] == "started"
- assert data["tenant_id"] == "test-tenant"
- assert "estimated_duration_minutes" in data
-E fixture 'mock_data_service' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_api.py:104tests/test_api.py:139: in test_start_training_job_validation_error
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogtests/test_api.py:167: in test_get_training_status_nonexistent_job
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogtests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogtests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogtests/test_api.py:233: in test_cancel_nonexistent_job
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogfile /app/tests/test_api.py, line 257
- @pytest.mark.asyncio
- async def test_validate_training_data_valid(
- self,
- test_client: AsyncClient,
- mock_data_service
- ):
- """Test validating valid training data"""
- request_data = {
- "include_weather": True,
- "include_traffic": True,
- "min_data_points": 30
- }
-
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
- response = await test_client.post("/training/validate", json=request_data)
-
- assert response.status_code == status.HTTP_200_OK
- data = response.json()
-
- assert "is_valid" in data
- assert "issues" in data
- assert "recommendations" in data
- assert "estimated_training_time" in data
-E fixture 'mock_data_service' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_api.py:257file /app/tests/test_api.py, line 285
- @pytest.mark.asyncio
- async def test_train_single_product_success(
- self,
- test_client: AsyncClient,
- mock_messaging,
- mock_ml_trainer,
- mock_data_service
- ):
- """Test training a single product successfully"""
- product_name = "Pan Integral"
- request_data = {
- "include_weather": True,
- "include_traffic": True,
- "seasonality_mode": "additive"
- }
-
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
- response = await test_client.post(
- f"/training/products/{product_name}",
- json=request_data
- )
-
- assert response.status_code == status.HTTP_200_OK
- data = response.json()
-
- assert "job_id" in data
- assert data["status"] == "started"
- assert data["tenant_id"] == "test-tenant"
- assert f"training started for {product_name}" in data["message"].lower()
-E fixture 'mock_data_service' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_api.py:285tests/test_api.py:323: in test_train_single_product_validation_error
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'file /app/tests/test_api.py, line 331
- @pytest.mark.asyncio
- async def test_train_single_product_special_characters(
- self,
- test_client: AsyncClient,
- mock_messaging,
- mock_ml_trainer,
- mock_data_service
- ):
- """Test training product with special characters in name"""
- product_name = "Pan Francés" # With accent
- request_data = {
- "include_weather": True,
- "seasonality_mode": "additive"
- }
-
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
- response = await test_client.post(
- f"/training/products/{product_name}",
- json=request_data
- )
-
- assert response.status_code == status.HTTP_200_OK
- data = response.json()
- assert "job_id" in data
-E fixture 'mock_data_service' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_api.py:331file /app/tests/test_api.py, line 360
- @pytest.mark.asyncio
- async def test_list_models(
- self,
- test_client: AsyncClient,
- trained_model_in_db
- ):
- """Test listing trained models"""
- with patch('app.api.models.get_current_tenant_id', return_value="test-tenant"):
- response = await test_client.get("/models")
-
- # This endpoint might not exist yet, so we expect either 200 or 404
- assert response.status_code in [status.HTTP_200_OK, status.HTTP_404_NOT_FOUND]
-
- if response.status_code == status.HTTP_200_OK:
- data = response.json()
- assert isinstance(data, list)
-E fixture 'trained_model_in_db' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_api.py:360file /app/tests/test_api.py, line 377
- @pytest.mark.asyncio
- async def test_get_model_details(
- self,
- test_client: AsyncClient,
- trained_model_in_db
- ):
- """Test getting model details"""
- model_id = trained_model_in_db.model_id
-
- with patch('app.api.models.get_current_tenant_id', return_value="test-tenant"):
- response = await test_client.get(f"/models/{model_id}")
-
- # This endpoint might not exist yet
- assert response.status_code in [
- status.HTTP_200_OK,
- status.HTTP_404_NOT_FOUND,
- status.HTTP_501_NOT_IMPLEMENTED
- ]
-E fixture 'trained_model_in_db' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_api.py:377tests/test_api.py:412: in test_database_error_handling
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/test_api.py:427: in test_missing_tenant_id
- response = await test_client.post("/training/jobs", json=request_data)
-E AttributeError: 'async_generator' object has no attribute 'post'tests/test_api.py:437: in test_invalid_job_id_format
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'file /app/tests/test_api.py, line 443
- @pytest.mark.asyncio
- async def test_messaging_failure_handling(
- self,
- test_client: AsyncClient,
- mock_data_service
- ):
- """Test handling when messaging fails"""
- request_data = {
- "include_weather": True,
- "include_traffic": True,
- "min_data_points": 30
- }
-
- with patch('app.services.messaging.publish_job_started', side_effect=Exception("Messaging failed")), \
- patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-
- response = await test_client.post("/training/jobs", json=request_data)
-
- # Should still succeed even if messaging fails
- assert response.status_code == status.HTTP_200_OK
- data = response.json()
- assert "job_id" in data
-E fixture 'mock_data_service' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_api.py:443tests/test_api.py:469: in test_invalid_json_payload
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/test_api.py:481: in test_unsupported_content_type
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/test_api.py:512: in test_endpoints_require_auth
- response = await test_client.post(endpoint, json={})
-E AttributeError: 'async_generator' object has no attribute 'post'tests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogtests/test_api.py:555: in test_training_request_validation
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/test_api.py:591: in test_single_product_request_validation
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/test_api.py:612: in test_query_parameter_validation
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/test_api.py:643: in test_concurrent_requests
- with patch('app.api.training.get_current_tenant_id', return_value=f"tenant-{i}"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/test_api.py:665: in test_large_payload_handling
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
-/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
- original, local = self.get_original()
-/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
- raise AttributeError(
-E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'tests/test_api.py:681: in test_rapid_successive_requests
- response = await test_client.get("/health")
-E AttributeError: 'async_generator' object has no attribute 'get'tests/test_ml.py:201: in test_prepare_training_data_insufficient_data
- with pytest.raises(Exception):
-E Failed: DID NOT RAISE <class 'Exception'>tests/test_ml.py:239: in test_train_bakery_model_success
- result = await prophet_manager.train_bakery_model(
-app/ml/prophet_manager.py:70: in train_bakery_model
- model = self._create_prophet_model(regressor_columns)
-app/ml/prophet_manager.py:238: in _create_prophet_model
- daily_seasonality=settings.PROPHET_DAILY_SEASONALITY,
-/usr/local/lib/python3.11/site-packages/pydantic/main.py:761: in __getattr__
- raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}')
-E AttributeError: 'TrainingSettings' object has no attribute 'PROPHET_DAILY_SEASONALITY'tests/test_ml.py:414: in test_train_single_product_success
- result = await ml_trainer.train_single_product(
-app/ml/trainer.py:149: in train_single_product
- model_info = await self.prophet_manager.train_bakery_model(
-app/ml/prophet_manager.py:61: in train_bakery_model
- await self._validate_training_data(df, product_name)
-app/ml/prophet_manager.py:158: in _validate_training_data
- raise ValueError(
-E ValueError: Insufficient training data for Pan Integral: 3 days, minimum required: 30tests/test_ml.py:438: in test_train_single_product_no_data
- await ml_trainer.train_single_product(
-app/ml/trainer.py:134: in train_single_product
- product_sales = sales_df[sales_df['product_name'] == product_name].copy()
-/usr/local/lib/python3.11/site-packages/pandas/core/frame.py:3893: in __getitem__
- indexer = self.columns.get_loc(key)
-/usr/local/lib/python3.11/site-packages/pandas/core/indexes/range.py:418: in get_loc
- raise KeyError(key)
-E KeyError: 'product_name'/app/tests/test_ml.py:508: Requires actual Prophet dependencies for integration test/app/tests/test_ml.py:513: Requires actual dependencies for integration testapp/services/training_service.py:52: in create_training_job
- db.add(training_log)
-E AttributeError: 'coroutine' object has no attribute 'add'
-
-During handling of the above exception, another exception occurred:
-tests/test_service.py:34: in test_create_training_job_success
- result = await training_service.create_training_job(
-app/services/training_service.py:61: in create_training_job
- await db.rollback()
-E AttributeError: 'coroutine' object has no attribute 'rollback'app/services/training_service.py:84: in create_single_product_job
- db.add(training_log)
-E AttributeError: 'coroutine' object has no attribute 'add'
-
-During handling of the above exception, another exception occurred:
-tests/test_service.py:60: in test_create_single_product_job_success
- result = await training_service.create_single_product_job(
-app/services/training_service.py:93: in create_single_product_job
- await db.rollback()
-E AttributeError: 'coroutine' object has no attribute 'rollback'tests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogtests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogtests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogtests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogapp/services/training_service.py:270: in cancel_training_job
- result = await db.execute(
-E AttributeError: 'coroutine' object has no attribute 'execute'
-
-During handling of the above exception, another exception occurred:
-tests/test_service.py:175: in test_cancel_nonexistent_job
- result = await training_service.cancel_training_job(
-app/services/training_service.py:297: in cancel_training_job
- await db.rollback()
-E AttributeError: 'coroutine' object has no attribute 'rollback'file /app/tests/test_service.py, line 183
- @pytest.mark.asyncio
- async def test_validate_training_data_valid(
- self,
- training_service,
- test_db_session,
- mock_data_service
- ):
- """Test validation with valid data"""
- config = {"min_data_points": 30}
-
- result = await training_service.validate_training_data(
- db=test_db_session,
- tenant_id="test-tenant",
- config=config
- )
-
- assert isinstance(result, dict)
- assert "is_valid" in result
- assert "issues" in result
- assert "recommendations" in result
- assert "estimated_time_minutes" in result
-E fixture 'mock_data_service' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_service.py:183tests/test_service.py:221: in test_validate_training_data_no_data
- assert result["is_valid"] is False
-E assert True is Falsetests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogapp/services/training_service.py:572: in _store_trained_models
- await db.execute(
-E AttributeError: 'coroutine' object has no attribute 'execute'
-
-During handling of the above exception, another exception occurred:
-tests/test_service.py:280: in test_store_trained_models
- await training_service._store_trained_models(
-app/services/training_service.py:592: in _store_trained_models
- await db.rollback()
-E AttributeError: 'coroutine' object has no attribute 'rollback'tests/conftest.py:539: in training_job_in_db
- job = ModelTrainingLog(
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
- with util.safe_reraise():
-/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
- raise exc_value.with_traceback(exc_tb)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
- manager.original_init(*mixed[1:], **kwargs)
-/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
- raise TypeError(
-E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLogfile /app/tests/test_service.py, line 468
- @pytest.mark.asyncio
- async def test_execute_training_job_success(
- self,
- training_service,
- test_db_session,
- mock_messaging,
- mock_data_service
- ):
- """Test successful training job execution"""
- # Create job first
- job_id = "test-execution-job"
- training_log = await training_service.create_training_job(
- db=test_db_session,
- tenant_id="test-tenant",
- job_id=job_id,
- config={"include_weather": True}
- )
-
- request = TrainingJobRequest(
- include_weather=True,
- include_traffic=True,
- min_data_points=30
- )
-
- with patch('app.services.training_service.TrainingService._fetch_sales_data') as mock_fetch_sales, \
- patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \
- patch('app.services.training_service.TrainingService._fetch_traffic_data') as mock_fetch_traffic, \
- patch('app.services.training_service.TrainingService._store_trained_models') as mock_store:
-
- mock_fetch_sales.return_value = [{"date": "2024-01-01", "product_name": "Pan Integral", "quantity": 45}]
- mock_fetch_weather.return_value = []
- mock_fetch_traffic.return_value = []
- mock_store.return_value = None
-
- await training_service.execute_training_job(
- db=test_db_session,
- job_id=job_id,
- tenant_id="test-tenant",
- request=request
- )
-
- # Verify job was completed
- updated_job = await training_service.get_job_status(
- db=test_db_session,
- job_id=job_id,
- tenant_id="test-tenant"
- )
-
- assert updated_job.status == "completed"
- assert updated_job.progress == 100
-E fixture 'mock_data_service' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_service.py:468app/services/training_service.py:52: in create_training_job
- db.add(training_log)
-E AttributeError: 'coroutine' object has no attribute 'add'
-
-During handling of the above exception, another exception occurred:
-tests/test_service.py:529: in test_execute_training_job_failure
- await training_service.create_training_job(
-app/services/training_service.py:61: in create_training_job
- await db.rollback()
-E AttributeError: 'coroutine' object has no attribute 'rollback'file /app/tests/test_service.py, line 559
- @pytest.mark.asyncio
- async def test_execute_single_product_training_success(
- self,
- training_service,
- test_db_session,
- mock_messaging,
- mock_data_service
- ):
- """Test successful single product training execution"""
- job_id = "test-single-product-job"
- product_name = "Pan Integral"
-
- await training_service.create_single_product_job(
- db=test_db_session,
- tenant_id="test-tenant",
- product_name=product_name,
- job_id=job_id,
- config={}
- )
-
- request = SingleProductTrainingRequest(
- include_weather=True,
- include_traffic=False
- )
-
- with patch('app.services.training_service.TrainingService._fetch_product_sales_data') as mock_fetch_sales, \
- patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \
- patch('app.services.training_service.TrainingService._store_single_trained_model') as mock_store:
-
- mock_fetch_sales.return_value = [{"date": "2024-01-01", "product_name": product_name, "quantity": 45}]
- mock_fetch_weather.return_value = []
- mock_store.return_value = None
-
- await training_service.execute_single_product_training(
- db=test_db_session,
- job_id=job_id,
- tenant_id="test-tenant",
- product_name=product_name,
- request=request
- )
-
- # Verify job was completed
- updated_job = await training_service.get_job_status(
- db=test_db_session,
- job_id=job_id,
- tenant_id="test-tenant"
- )
-
- assert updated_job.status == "completed"
- assert updated_job.progress == 100
-E fixture 'mock_data_service' not found
-> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
-> use 'pytest --fixtures [testpath]' for help on them.
-
-/app/tests/test_service.py:559app/services/training_service.py:52: in create_training_job
- db.add(training_log)
-E AttributeError: 'coroutine' object has no attribute 'add'
-
-During handling of the above exception, another exception occurred:
-tests/test_service.py:660: in test_concurrent_job_creation
- job = await training_service.create_training_job(
-app/services/training_service.py:61: in create_training_job
- await db.rollback()
-E AttributeError: 'coroutine' object has no attribute 'rollback'app/services/training_service.py:52: in create_training_job
- db.add(training_log)
-E AttributeError: 'coroutine' object has no attribute 'add'
-
-During handling of the above exception, another exception occurred:
-tests/test_service.py:681: in test_malformed_config_handling
- job = await training_service.create_training_job(
-app/services/training_service.py:61: in create_training_job
- await db.rollback()
-E AttributeError: 'coroutine' object has no attribute 'rollback'tests/conftest.py:464: in setup_test_environment
- os.environ.pop(var, None)(scope="session")
-E TypeError: 'str' object is not callable
\ No newline at end of file
diff --git a/services/training/tests/results/test_report.json b/services/training/tests/results/test_report.json
index 9e1e8427..da82d90d 100644
--- a/services/training/tests/results/test_report.json
+++ b/services/training/tests/results/test_report.json
@@ -1,50 +1,50 @@
{
"test_run_summary": {
- "timestamp": "2025-07-25T11:22:46.885733",
+ "timestamp": "2025-07-25T12:08:36.927797",
"total_suites": 4,
"passed_suites": 0,
"failed_suites": 4,
"error_suites": 0,
"timeout_suites": 0,
"success_rate": 0.0,
- "total_duration_seconds": 9.403800010681152
+ "total_duration_seconds": 1.9828898906707764
},
"suite_results": {
"unit": {
"suite": "unit",
"status": "failed",
- "return_code": 1,
- "duration": 6.222464323043823,
- "stdout": "\n================================================================================\nTRAINING SERVICE TEST SESSION STARTING\n================================================================================\n============================= test session starts ==============================\nplatform linux -- Python 3.11.13, pytest-7.4.3, pluggy-1.6.0 -- /usr/local/bin/python\ncachedir: .pytest_cache\nrootdir: /app\nplugins: anyio-3.7.1, mock-3.12.0, asyncio-0.21.1, cov-4.1.0\nasyncio: mode=Mode.STRICT\ncollecting ... collected 83 items\n\ntests/test_api.py::TestTrainingAPI::test_health_check FAILED\ntests/test_api.py::TestTrainingAPI::test_readiness_check_ready 2025-07-25 11:22:39 [INFO] shared.monitoring.logging: Logging configured for training-service at level INFO\nFAILED\ntests/test_api.py::TestTrainingAPI::test_readiness_check_not_ready FAILED\ntests/test_api.py::TestTrainingAPI::test_liveness_check_healthy FAILED\ntests/test_api.py::TestTrainingAPI::test_liveness_check_unhealthy FAILED\ntests/test_api.py::TestTrainingAPI::test_metrics_endpoint FAILED\ntests/test_api.py::TestTrainingAPI::test_root_endpoint FAILED\ntests/test_api.py::TestTrainingJobsAPI::test_start_training_job_success ERROR\ntests/test_api.py::TestTrainingJobsAPI::test_start_training_job_validation_error FAILED\ntests/test_api.py::TestTrainingJobsAPI::test_get_training_status_existing_job ERROR\ntests/test_api.py::TestTrainingJobsAPI::test_get_training_status_nonexistent_job FAILED\ntests/test_api.py::TestTrainingJobsAPI::test_list_training_jobs ERROR\ntests/test_api.py::TestTrainingJobsAPI::test_list_training_jobs_with_status_filter ERROR\ntests/test_api.py::TestTrainingJobsAPI::test_cancel_training_job_success ERROR\ntests/test_api.py::TestTrainingJobsAPI::test_cancel_nonexistent_job FAILED\ntests/test_api.py::TestTrainingJobsAPI::test_get_training_logs ERROR\ntests/test_api.py::TestTrainingJobsAPI::test_validate_training_data_valid ERROR\ntests/test_api.py::TestSingleProductTrainingAPI::test_train_single_product_success ERROR\ntests/test_api.py::TestSingleProductTrainingAPI::test_train_single_product_validation_error FAILED\ntests/test_api.py::TestSingleProductTrainingAPI::test_train_single_product_special_characters ERROR\ntests/test_api.py::TestModelsAPI::test_list_models ERROR\ntests/test_api.py::TestModelsAPI::test_get_model_details ERROR\ntests/test_api.py::TestErrorHandling::test_database_error_handling FAILED\ntests/test_api.py::TestErrorHandling::test_missing_tenant_id FAILED\ntests/test_api.py::TestErrorHandling::test_invalid_job_id_format FAILED\ntests/test_api.py::TestErrorHandling::test_messaging_failure_handling ERROR\ntests/test_api.py::TestErrorHandling::test_invalid_json_payload FAILED\ntests/test_api.py::TestErrorHandling::test_unsupported_content_type FAILED\ntests/test_api.py::TestAuthenticationIntegration::test_endpoints_require_auth FAILED\ntests/test_api.py::TestAuthenticationIntegration::test_tenant_isolation_in_api ERROR\ntests/test_api.py::TestAPIValidation::test_training_request_validation FAILED\ntests/test_api.py::TestAPIValidation::test_single_product_request_validation FAILED\ntests/test_api.py::TestAPIValidation::test_query_parameter_validation FAILED\ntests/test_api.py::TestAPIPerformance::test_concurrent_requests FAILED\ntests/test_api.py::TestAPIPerformance::test_large_payload_handling FAILED\ntests/test_api.py::TestAPIPerformance::test_rapid_successive_requests FAILED\ntests/test_ml.py::TestBakeryDataProcessor::test_prepare_training_data_basic PASSED\ntests/test_ml.py::TestBakeryDataProcessor::test_prepare_training_data_empty_weather PASSED\ntests/test_ml.py::TestBakeryDataProcessor::test_prepare_prediction_features PASSED\ntests/test_ml.py::TestBakeryDataProcessor::test_add_temporal_features PASSED\ntests/test_ml.py::TestBakeryDataProcessor::test_spanish_holiday_detection PASSED\ntests/test_ml.py::TestBakeryDataProcessor::test_prepare_training_data_insufficient_data FAILED\ntests/test_ml.py::TestBakeryProphetManager::test_train_bakery_model_success FAILED\ntests/test_ml.py::TestBakeryProphetManager::test_validate_training_data_valid PASSED\ntests/test_ml.py::TestBakeryProphetManager::test_validate_training_data_insufficient PASSED\ntests/test_ml.py::TestBakeryProphetManager::test_validate_training_data_missing_columns PASSED\ntests/test_ml.py::TestBakeryProphetManager::test_get_spanish_holidays PASSED\ntests/test_ml.py::TestBakeryProphetManager::test_extract_regressor_columns PASSED\ntests/test_ml.py::TestBakeryProphetManager::test_generate_forecast PASSED\ntests/test_ml.py::TestBakeryMLTrainer::test_train_tenant_models_success PASSED\ntests/test_ml.py::TestBakeryMLTrainer::test_train_single_product_success FAILED\ntests/test_ml.py::TestBakeryMLTrainer::test_train_single_product_no_data FAILED\ntests/test_ml.py::TestBakeryMLTrainer::test_validate_input_data_valid PASSED\ntests/test_ml.py::TestBakeryMLTrainer::test_validate_input_data_empty PASSED\ntests/test_ml.py::TestBakeryMLTrainer::test_validate_input_data_missing_columns PASSED\ntests/test_ml.py::TestBakeryMLTrainer::test_calculate_training_summary PASSED\ntests/test_ml.py::TestIntegrationML::test_end_to_end_training_flow SKIPPED\ntests/test_ml.py::TestIntegrationML::test_data_pipeline_integration SKIPPED\ntests/test_service.py::TestTrainingService::test_create_training_job_success FAILED\ntests/test_service.py::TestTrainingService::test_create_single_product_job_success FAILED\ntests/test_service.py::TestTrainingService::test_get_job_status_existing ERROR\ntests/test_service.py::TestTrainingService::test_get_job_status_nonexistent PASSED\ntests/test_service.py::TestTrainingService::test_list_training_jobs ERROR\ntests/test_service.py::TestTrainingService::test_list_training_jobs_with_filter ERROR\ntests/test_service.py::TestTrainingService::test_cancel_training_job_success ERROR\ntests/test_service.py::TestTrainingService::test_cancel_nonexistent_job FAILED\ntests/test_service.py::TestTrainingService::test_validate_training_data_valid ERROR\ntests/test_service.py::TestTrainingService::test_validate_training_data_no_data FAILED\ntests/test_service.py::TestTrainingService::test_update_job_status ERROR\ntests/test_service.py::TestTrainingService::test_store_trained_models FAILED\ntests/test_service.py::TestTrainingService::test_get_training_logs ERROR\ntests/test_service.py::TestTrainingServiceDataFetching::test_fetch_sales_data_success PASSED\ntests/test_service.py::TestTrainingServiceDataFetching::test_fetch_sales_data_error PASSED\ntests/test_service.py::TestTrainingServiceDataFetching::test_fetch_weather_data_success PASSED\ntests/test_service.py::TestTrainingServiceDataFetching::test_fetch_traffic_data_success PASSED\ntests/test_service.py::TestTrainingServiceDataFetching::test_fetch_data_with_date_filters PASSED\ntests/test_service.py::TestTrainingServiceExecution::test_execute_training_job_success ERROR\ntests/test_service.py::TestTrainingServiceExecution::test_execute_training_job_failure FAILED\ntests/test_service.py::TestTrainingServiceExecution::test_execute_single_product_training_success ERROR\ntests/test_service.py::TestTrainingServiceEdgeCases::test_database_connection_failure PASSED\ntests/test_service.py::TestTrainingServiceEdgeCases::test_external_service_timeout PASSED\ntests/test_service.py::TestTrainingServiceEdgeCases::test_concurrent_job_creation FAILED\ntests/test_service.py::TestTrainingServiceEdgeCases::test_malformed_config_handling FAILED\ntests/test_service.py::TestTrainingServiceEdgeCases::test_malformed_config_handling ERROR\n================================================================================\nTRAINING SERVICE TEST SESSION FINISHED\nExit Status: 1\n================================================================================\n\n\n==================================== ERRORS ====================================\n____ ERROR at setup of TestTrainingJobsAPI.test_start_training_job_success _____\nfile /app/tests/test_api.py, line 104\n @pytest.mark.asyncio\n async def test_start_training_job_success(\n self,\n test_client: AsyncClient,\n mock_messaging,\n mock_ml_trainer,\n mock_data_service\n ):\n \"\"\"Test starting a training job successfully\"\"\"\n request_data = {\n \"include_weather\": True,\n \"include_traffic\": True,\n \"min_data_points\": 30,\n \"seasonality_mode\": \"additive\"\n }\n\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n response = await test_client.post(\"/training/jobs\", json=request_data)\n\n assert response.status_code == status.HTTP_200_OK\n data = response.json()\n\n assert \"job_id\" in data\n assert data[\"status\"] == \"started\"\n assert data[\"tenant_id\"] == \"test-tenant\"\n assert \"estimated_duration_minutes\" in data\nE fixture 'mock_data_service' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_api.py:104\n_ ERROR at setup of TestTrainingJobsAPI.test_get_training_status_existing_job __\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n________ ERROR at setup of TestTrainingJobsAPI.test_list_training_jobs _________\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n_ ERROR at setup of TestTrainingJobsAPI.test_list_training_jobs_with_status_filter _\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n____ ERROR at setup of TestTrainingJobsAPI.test_cancel_training_job_success ____\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n_________ ERROR at setup of TestTrainingJobsAPI.test_get_training_logs _________\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n___ ERROR at setup of TestTrainingJobsAPI.test_validate_training_data_valid ____\nfile /app/tests/test_api.py, line 257\n @pytest.mark.asyncio\n async def test_validate_training_data_valid(\n self,\n test_client: AsyncClient,\n mock_data_service\n ):\n \"\"\"Test validating valid training data\"\"\"\n request_data = {\n \"include_weather\": True,\n \"include_traffic\": True,\n \"min_data_points\": 30\n }\n\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n response = await test_client.post(\"/training/validate\", json=request_data)\n\n assert response.status_code == status.HTTP_200_OK\n data = response.json()\n\n assert \"is_valid\" in data\n assert \"issues\" in data\n assert \"recommendations\" in data\n assert \"estimated_training_time\" in data\nE fixture 'mock_data_service' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_api.py:257\n_ ERROR at setup of TestSingleProductTrainingAPI.test_train_single_product_success _\nfile /app/tests/test_api.py, line 285\n @pytest.mark.asyncio\n async def test_train_single_product_success(\n self,\n test_client: AsyncClient,\n mock_messaging,\n mock_ml_trainer,\n mock_data_service\n ):\n \"\"\"Test training a single product successfully\"\"\"\n product_name = \"Pan Integral\"\n request_data = {\n \"include_weather\": True,\n \"include_traffic\": True,\n \"seasonality_mode\": \"additive\"\n }\n\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n response = await test_client.post(\n f\"/training/products/{product_name}\",\n json=request_data\n )\n\n assert response.status_code == status.HTTP_200_OK\n data = response.json()\n\n assert \"job_id\" in data\n assert data[\"status\"] == \"started\"\n assert data[\"tenant_id\"] == \"test-tenant\"\n assert f\"training started for {product_name}\" in data[\"message\"].lower()\nE fixture 'mock_data_service' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_api.py:285\n_ ERROR at setup of TestSingleProductTrainingAPI.test_train_single_product_special_characters _\nfile /app/tests/test_api.py, line 331\n @pytest.mark.asyncio\n async def test_train_single_product_special_characters(\n self,\n test_client: AsyncClient,\n mock_messaging,\n mock_ml_trainer,\n mock_data_service\n ):\n \"\"\"Test training product with special characters in name\"\"\"\n product_name = \"Pan Franc\u00e9s\" # With accent\n request_data = {\n \"include_weather\": True,\n \"seasonality_mode\": \"additive\"\n }\n\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n response = await test_client.post(\n f\"/training/products/{product_name}\",\n json=request_data\n )\n\n assert response.status_code == status.HTTP_200_OK\n data = response.json()\n assert \"job_id\" in data\nE fixture 'mock_data_service' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_api.py:331\n_______________ ERROR at setup of TestModelsAPI.test_list_models _______________\nfile /app/tests/test_api.py, line 360\n @pytest.mark.asyncio\n async def test_list_models(\n self,\n test_client: AsyncClient,\n trained_model_in_db\n ):\n \"\"\"Test listing trained models\"\"\"\n with patch('app.api.models.get_current_tenant_id', return_value=\"test-tenant\"):\n response = await test_client.get(\"/models\")\n\n # This endpoint might not exist yet, so we expect either 200 or 404\n assert response.status_code in [status.HTTP_200_OK, status.HTTP_404_NOT_FOUND]\n\n if response.status_code == status.HTTP_200_OK:\n data = response.json()\n assert isinstance(data, list)\nE fixture 'trained_model_in_db' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_api.py:360\n____________ ERROR at setup of TestModelsAPI.test_get_model_details ____________\nfile /app/tests/test_api.py, line 377\n @pytest.mark.asyncio\n async def test_get_model_details(\n self,\n test_client: AsyncClient,\n trained_model_in_db\n ):\n \"\"\"Test getting model details\"\"\"\n model_id = trained_model_in_db.model_id\n\n with patch('app.api.models.get_current_tenant_id', return_value=\"test-tenant\"):\n response = await test_client.get(f\"/models/{model_id}\")\n\n # This endpoint might not exist yet\n assert response.status_code in [\n status.HTTP_200_OK,\n status.HTTP_404_NOT_FOUND,\n status.HTTP_501_NOT_IMPLEMENTED\n ]\nE fixture 'trained_model_in_db' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_api.py:377\n_____ ERROR at setup of TestErrorHandling.test_messaging_failure_handling ______\nfile /app/tests/test_api.py, line 443\n @pytest.mark.asyncio\n async def test_messaging_failure_handling(\n self,\n test_client: AsyncClient,\n mock_data_service\n ):\n \"\"\"Test handling when messaging fails\"\"\"\n request_data = {\n \"include_weather\": True,\n \"include_traffic\": True,\n \"min_data_points\": 30\n }\n\n with patch('app.services.messaging.publish_job_started', side_effect=Exception(\"Messaging failed\")), \\\n patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n\n response = await test_client.post(\"/training/jobs\", json=request_data)\n\n # Should still succeed even if messaging fails\n assert response.status_code == status.HTTP_200_OK\n data = response.json()\n assert \"job_id\" in data\nE fixture 'mock_data_service' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_api.py:443\n_ ERROR at setup of TestAuthenticationIntegration.test_tenant_isolation_in_api _\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n______ ERROR at setup of TestTrainingService.test_get_job_status_existing ______\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n________ ERROR at setup of TestTrainingService.test_list_training_jobs _________\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n__ ERROR at setup of TestTrainingService.test_list_training_jobs_with_filter ___\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n____ ERROR at setup of TestTrainingService.test_cancel_training_job_success ____\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n___ ERROR at setup of TestTrainingService.test_validate_training_data_valid ____\nfile /app/tests/test_service.py, line 183\n @pytest.mark.asyncio\n async def test_validate_training_data_valid(\n self,\n training_service,\n test_db_session,\n mock_data_service\n ):\n \"\"\"Test validation with valid data\"\"\"\n config = {\"min_data_points\": 30}\n\n result = await training_service.validate_training_data(\n db=test_db_session,\n tenant_id=\"test-tenant\",\n config=config\n )\n\n assert isinstance(result, dict)\n assert \"is_valid\" in result\n assert \"issues\" in result\n assert \"recommendations\" in result\n assert \"estimated_time_minutes\" in result\nE fixture 'mock_data_service' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_service.py:183\n_________ ERROR at setup of TestTrainingService.test_update_job_status _________\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n_________ ERROR at setup of TestTrainingService.test_get_training_logs _________\ntests/conftest.py:539: in training_job_in_db\n job = ModelTrainingLog(\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance\n with util.safe_reraise():\n/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__\n raise exc_value.with_traceback(exc_tb)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance\n manager.original_init(*mixed[1:], **kwargs)\n/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor\n raise TypeError(\nE TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog\n_ ERROR at setup of TestTrainingServiceExecution.test_execute_training_job_success _\nfile /app/tests/test_service.py, line 468\n @pytest.mark.asyncio\n async def test_execute_training_job_success(\n self,\n training_service,\n test_db_session,\n mock_messaging,\n mock_data_service\n ):\n \"\"\"Test successful training job execution\"\"\"\n # Create job first\n job_id = \"test-execution-job\"\n training_log = await training_service.create_training_job(\n db=test_db_session,\n tenant_id=\"test-tenant\",\n job_id=job_id,\n config={\"include_weather\": True}\n )\n\n request = TrainingJobRequest(\n include_weather=True,\n include_traffic=True,\n min_data_points=30\n )\n\n with patch('app.services.training_service.TrainingService._fetch_sales_data') as mock_fetch_sales, \\\n patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \\\n patch('app.services.training_service.TrainingService._fetch_traffic_data') as mock_fetch_traffic, \\\n patch('app.services.training_service.TrainingService._store_trained_models') as mock_store:\n\n mock_fetch_sales.return_value = [{\"date\": \"2024-01-01\", \"product_name\": \"Pan Integral\", \"quantity\": 45}]\n mock_fetch_weather.return_value = []\n mock_fetch_traffic.return_value = []\n mock_store.return_value = None\n\n await training_service.execute_training_job(\n db=test_db_session,\n job_id=job_id,\n tenant_id=\"test-tenant\",\n request=request\n )\n\n # Verify job was completed\n updated_job = await training_service.get_job_status(\n db=test_db_session,\n job_id=job_id,\n tenant_id=\"test-tenant\"\n )\n\n assert updated_job.status == \"completed\"\n assert updated_job.progress == 100\nE fixture 'mock_data_service' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_service.py:468\n_ ERROR at setup of TestTrainingServiceExecution.test_execute_single_product_training_success _\nfile /app/tests/test_service.py, line 559\n @pytest.mark.asyncio\n async def test_execute_single_product_training_success(\n self,\n training_service,\n test_db_session,\n mock_messaging,\n mock_data_service\n ):\n \"\"\"Test successful single product training execution\"\"\"\n job_id = \"test-single-product-job\"\n product_name = \"Pan Integral\"\n\n await training_service.create_single_product_job(\n db=test_db_session,\n tenant_id=\"test-tenant\",\n product_name=product_name,\n job_id=job_id,\n config={}\n )\n\n request = SingleProductTrainingRequest(\n include_weather=True,\n include_traffic=False\n )\n\n with patch('app.services.training_service.TrainingService._fetch_product_sales_data') as mock_fetch_sales, \\\n patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \\\n patch('app.services.training_service.TrainingService._store_single_trained_model') as mock_store:\n\n mock_fetch_sales.return_value = [{\"date\": \"2024-01-01\", \"product_name\": product_name, \"quantity\": 45}]\n mock_fetch_weather.return_value = []\n mock_store.return_value = None\n\n await training_service.execute_single_product_training(\n db=test_db_session,\n job_id=job_id,\n tenant_id=\"test-tenant\",\n product_name=product_name,\n request=request\n )\n\n # Verify job was completed\n updated_job = await training_service.get_job_status(\n db=test_db_session,\n job_id=job_id,\n tenant_id=\"test-tenant\"\n )\n\n assert updated_job.status == \"completed\"\n assert updated_job.progress == 100\nE fixture 'mock_data_service' not found\n> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory\n> use 'pytest --fixtures [testpath]' for help on them.\n\n/app/tests/test_service.py:559\n_ ERROR at teardown of TestTrainingServiceEdgeCases.test_malformed_config_handling _\ntests/conftest.py:464: in setup_test_environment\n os.environ.pop(var, None)(scope=\"session\")\nE TypeError: 'str' object is not callable\n------------------------------ Captured log call -------------------------------\nERROR app.services.training_service:training_service.py:60 Failed to create training job: 'coroutine' object has no attribute 'add'\n=================================== FAILURES ===================================\n______________________ TestTrainingAPI.test_health_check _______________________\ntests/test_api.py:20: in test_health_check\n response = await test_client.get(\"/health\")\nE AttributeError: 'async_generator' object has no attribute 'get'\n__________________ TestTrainingAPI.test_readiness_check_ready __________________\ntests/test_api.py:32: in test_readiness_check_ready\n with patch('app.main.app.state.ready', True):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'ready'\n________________ TestTrainingAPI.test_readiness_check_not_ready ________________\ntests/test_api.py:42: in test_readiness_check_not_ready\n with patch('app.main.app.state.ready', False):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'ready'\n_________________ TestTrainingAPI.test_liveness_check_healthy __________________\ntests/test_api.py:53: in test_liveness_check_healthy\n response = await test_client.get(\"/health/live\")\nE AttributeError: 'async_generator' object has no attribute 'get'\n________________ TestTrainingAPI.test_liveness_check_unhealthy _________________\ntests/test_api.py:63: in test_liveness_check_unhealthy\n response = await test_client.get(\"/health/live\")\nE AttributeError: 'async_generator' object has no attribute 'get'\n____________________ TestTrainingAPI.test_metrics_endpoint _____________________\ntests/test_api.py:73: in test_metrics_endpoint\n response = await test_client.get(\"/metrics\")\nE AttributeError: 'async_generator' object has no attribute 'get'\n______________________ TestTrainingAPI.test_root_endpoint ______________________\ntests/test_api.py:92: in test_root_endpoint\n response = await test_client.get(\"/\")\nE AttributeError: 'async_generator' object has no attribute 'get'\n_________ TestTrainingJobsAPI.test_start_training_job_validation_error _________\ntests/test_api.py:139: in test_start_training_job_validation_error\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n_________ TestTrainingJobsAPI.test_get_training_status_nonexistent_job _________\ntests/test_api.py:167: in test_get_training_status_nonexistent_job\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n_______________ TestTrainingJobsAPI.test_cancel_nonexistent_job ________________\ntests/test_api.py:233: in test_cancel_nonexistent_job\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n___ TestSingleProductTrainingAPI.test_train_single_product_validation_error ____\ntests/test_api.py:323: in test_train_single_product_validation_error\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n________________ TestErrorHandling.test_database_error_handling ________________\ntests/test_api.py:412: in test_database_error_handling\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n___________________ TestErrorHandling.test_missing_tenant_id ___________________\ntests/test_api.py:427: in test_missing_tenant_id\n response = await test_client.post(\"/training/jobs\", json=request_data)\nE AttributeError: 'async_generator' object has no attribute 'post'\n_________________ TestErrorHandling.test_invalid_job_id_format _________________\ntests/test_api.py:437: in test_invalid_job_id_format\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n_________________ TestErrorHandling.test_invalid_json_payload __________________\ntests/test_api.py:469: in test_invalid_json_payload\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n_______________ TestErrorHandling.test_unsupported_content_type ________________\ntests/test_api.py:481: in test_unsupported_content_type\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n__________ TestAuthenticationIntegration.test_endpoints_require_auth ___________\ntests/test_api.py:512: in test_endpoints_require_auth\n response = await test_client.post(endpoint, json={})\nE AttributeError: 'async_generator' object has no attribute 'post'\n______________ TestAPIValidation.test_training_request_validation ______________\ntests/test_api.py:555: in test_training_request_validation\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n___________ TestAPIValidation.test_single_product_request_validation ___________\ntests/test_api.py:591: in test_single_product_request_validation\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n______________ TestAPIValidation.test_query_parameter_validation _______________\ntests/test_api.py:612: in test_query_parameter_validation\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n_________________ TestAPIPerformance.test_concurrent_requests __________________\ntests/test_api.py:643: in test_concurrent_requests\n with patch('app.api.training.get_current_tenant_id', return_value=f\"tenant-{i}\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n________________ TestAPIPerformance.test_large_payload_handling ________________\ntests/test_api.py:665: in test_large_payload_handling\n with patch('app.api.training.get_current_tenant_id', return_value=\"test-tenant\"):\n/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__\n original, local = self.get_original()\n/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original\n raise AttributeError(\nE AttributeError: does not have the attribute 'get_current_tenant_id'\n______________ TestAPIPerformance.test_rapid_successive_requests _______________\ntests/test_api.py:681: in test_rapid_successive_requests\n response = await test_client.get(\"/health\")\nE AttributeError: 'async_generator' object has no attribute 'get'\n_____ TestBakeryDataProcessor.test_prepare_training_data_insufficient_data _____\ntests/test_ml.py:201: in test_prepare_training_data_insufficient_data\n with pytest.raises(Exception):\nE Failed: DID NOT RAISE \n------------------------------ Captured log call -------------------------------\nINFO app.ml.data_processor:data_processor.py:45 Preparing training data for product: Pan Integral\nINFO app.ml.data_processor:data_processor.py:69 Prepared 5 data points for Pan Integral\n___________ TestBakeryProphetManager.test_train_bakery_model_success ___________\ntests/test_ml.py:239: in test_train_bakery_model_success\n result = await prophet_manager.train_bakery_model(\napp/ml/prophet_manager.py:70: in train_bakery_model\n model = self._create_prophet_model(regressor_columns)\napp/ml/prophet_manager.py:238: in _create_prophet_model\n daily_seasonality=settings.PROPHET_DAILY_SEASONALITY,\n/usr/local/lib/python3.11/site-packages/pydantic/main.py:761: in __getattr__\n raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}')\nE AttributeError: 'TrainingSettings' object has no attribute 'PROPHET_DAILY_SEASONALITY'\n------------------------------ Captured log call -------------------------------\nINFO app.ml.prophet_manager:prophet_manager.py:58 Training bakery model for tenant test-tenant, product Pan Integral\nINFO app.ml.prophet_manager:prophet_manager.py:226 Identified regressor columns: ['temperature', 'humidity']\nERROR app.ml.prophet_manager:prophet_manager.py:115 Failed to train bakery model for Pan Integral: 'TrainingSettings' object has no attribute 'PROPHET_DAILY_SEASONALITY'\n____________ TestBakeryMLTrainer.test_train_single_product_success _____________\ntests/test_ml.py:414: in test_train_single_product_success\n result = await ml_trainer.train_single_product(\napp/ml/trainer.py:149: in train_single_product\n model_info = await self.prophet_manager.train_bakery_model(\napp/ml/prophet_manager.py:61: in train_bakery_model\n await self._validate_training_data(df, product_name)\napp/ml/prophet_manager.py:158: in _validate_training_data\n raise ValueError(\nE ValueError: Insufficient training data for Pan Integral: 3 days, minimum required: 30\n------------------------------ Captured log call -------------------------------\nINFO app.ml.trainer:trainer.py:125 Starting single product training test-job-123 for Pan Integral\nINFO app.ml.data_processor:data_processor.py:45 Preparing training data for product: Pan Integral\nINFO app.ml.data_processor:data_processor.py:69 Prepared 3 data points for Pan Integral\nINFO app.ml.prophet_manager:prophet_manager.py:58 Training bakery model for tenant test-tenant, product Pan Integral\nERROR app.ml.prophet_manager:prophet_manager.py:115 Failed to train bakery model for Pan Integral: Insufficient training data for Pan Integral: 3 days, minimum required: 30\nERROR app.ml.trainer:trainer.py:170 Single product training test-job-123 failed: Insufficient training data for Pan Integral: 3 days, minimum required: 30\n____________ TestBakeryMLTrainer.test_train_single_product_no_data _____________\ntests/test_ml.py:438: in test_train_single_product_no_data\n await ml_trainer.train_single_product(\napp/ml/trainer.py:134: in train_single_product\n product_sales = sales_df[sales_df['product_name'] == product_name].copy()\n/usr/local/lib/python3.11/site-packages/pandas/core/frame.py:3893: in __getitem__\n indexer = self.columns.get_loc(key)\n/usr/local/lib/python3.11/site-packages/pandas/core/indexes/range.py:418: in get_loc\n raise KeyError(key)\nE KeyError: 'product_name'\n------------------------------ Captured log call -------------------------------\nINFO app.ml.trainer:trainer.py:125 Starting single product training test-job-123 for Nonexistent Product\nERROR app.ml.trainer:trainer.py:170 Single product training test-job-123 failed: 'product_name'\n_____________ TestTrainingService.test_create_training_job_success _____________\napp/services/training_service.py:52: in create_training_job\n db.add(training_log)\nE AttributeError: 'coroutine' object has no attribute 'add'\n\nDuring handling of the above exception, another exception occurred:\ntests/test_service.py:34: in test_create_training_job_success\n result = await training_service.create_training_job(\napp/services/training_service.py:61: in create_training_job\n await db.rollback()\nE AttributeError: 'coroutine' object has no attribute 'rollback'\n------------------------------ Captured log call -------------------------------\nERROR app.services.training_service:training_service.py:60 Failed to create training job: 'coroutine' object has no attribute 'add'\n__________ TestTrainingService.test_create_single_product_job_success __________\napp/services/training_service.py:84: in create_single_product_job\n db.add(training_log)\nE AttributeError: 'coroutine' object has no attribute 'add'\n\nDuring handling of the above exception, another exception occurred:\ntests/test_service.py:60: in test_create_single_product_job_success\n result = await training_service.create_single_product_job(\napp/services/training_service.py:93: in create_single_product_job\n await db.rollback()\nE AttributeError: 'coroutine' object has no attribute 'rollback'\n------------------------------ Captured log call -------------------------------\nERROR app.services.training_service:training_service.py:92 Failed to create single product training job: 'coroutine' object has no attribute 'add'\n_______________ TestTrainingService.test_cancel_nonexistent_job ________________\napp/services/training_service.py:270: in cancel_training_job\n result = await db.execute(\nE AttributeError: 'coroutine' object has no attribute 'execute'\n\nDuring handling of the above exception, another exception occurred:\ntests/test_service.py:175: in test_cancel_nonexistent_job\n result = await training_service.cancel_training_job(\napp/services/training_service.py:297: in cancel_training_job\n await db.rollback()\nE AttributeError: 'coroutine' object has no attribute 'rollback'\n------------------------------ Captured log call -------------------------------\nERROR app.services.training_service:training_service.py:296 Failed to cancel training job: 'coroutine' object has no attribute 'execute'\n___________ TestTrainingService.test_validate_training_data_no_data ____________\ntests/test_service.py:221: in test_validate_training_data_no_data\n assert result[\"is_valid\"] is False\nE assert True is False\n------------------------------ Captured log call -------------------------------\nINFO app.services.training_service:training_service.py:306 Validating training data for tenant test-tenant\n________________ TestTrainingService.test_store_trained_models _________________\napp/services/training_service.py:572: in _store_trained_models\n await db.execute(\nE AttributeError: 'coroutine' object has no attribute 'execute'\n\nDuring handling of the above exception, another exception occurred:\ntests/test_service.py:280: in test_store_trained_models\n await training_service._store_trained_models(\napp/services/training_service.py:592: in _store_trained_models\n await db.rollback()\nE AttributeError: 'coroutine' object has no attribute 'rollback'\n------------------------------ Captured log call -------------------------------\nERROR app.services.training_service:training_service.py:591 Failed to store trained models: 'coroutine' object has no attribute 'execute'\n________ TestTrainingServiceExecution.test_execute_training_job_failure ________\napp/services/training_service.py:52: in create_training_job\n db.add(training_log)\nE AttributeError: 'coroutine' object has no attribute 'add'\n\nDuring handling of the above exception, another exception occurred:\ntests/test_service.py:529: in test_execute_training_job_failure\n await training_service.create_training_job(\napp/services/training_service.py:61: in create_training_job\n await db.rollback()\nE AttributeError: 'coroutine' object has no attribute 'rollback'\n------------------------------ Captured log call -------------------------------\nERROR app.services.training_service:training_service.py:60 Failed to create training job: 'coroutine' object has no attribute 'add'\n__________ TestTrainingServiceEdgeCases.test_concurrent_job_creation ___________\napp/services/training_service.py:52: in create_training_job\n db.add(training_log)\nE AttributeError: 'coroutine' object has no attribute 'add'\n\nDuring handling of the above exception, another exception occurred:\ntests/test_service.py:660: in test_concurrent_job_creation\n job = await training_service.create_training_job(\napp/services/training_service.py:61: in create_training_job\n await db.rollback()\nE AttributeError: 'coroutine' object has no attribute 'rollback'\n------------------------------ Captured log call -------------------------------\nERROR app.services.training_service:training_service.py:60 Failed to create training job: 'coroutine' object has no attribute 'add'\n_________ TestTrainingServiceEdgeCases.test_malformed_config_handling __________\napp/services/training_service.py:52: in create_training_job\n db.add(training_log)\nE AttributeError: 'coroutine' object has no attribute 'add'\n\nDuring handling of the above exception, another exception occurred:\ntests/test_service.py:681: in test_malformed_config_handling\n job = await training_service.create_training_job(\napp/services/training_service.py:61: in create_training_job\n await db.rollback()\nE AttributeError: 'coroutine' object has no attribute 'rollback'\n------------------------------ Captured log call -------------------------------\nERROR app.services.training_service:training_service.py:60 Failed to create training job: 'coroutine' object has no attribute 'add'\n=============================== warnings summary ===============================\napp/schemas/training.py:41\n /app/app/schemas/training.py:41: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('seasonality_mode')\n\napp/schemas/training.py:106\n /app/app/schemas/training.py:106: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('min_data_points')\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149: UserWarning: Field \"model_id\" has conflict with protected namespace \"model_\".\n \n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n warnings.warn(\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149: UserWarning: Field \"model_path\" has conflict with protected namespace \"model_\".\n \n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n warnings.warn(\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149: UserWarning: Field \"model_type\" has conflict with protected namespace \"model_\".\n \n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n warnings.warn(\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149: UserWarning: Field \"model_info\" has conflict with protected namespace \"model_\".\n \n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n warnings.warn(\n\nshared/config/base.py:280\n /app/shared/config/base.py:280: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('JWT_SECRET_KEY')\n\nshared/config/base.py:288\n /app/shared/config/base.py:288: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('LOG_LEVEL')\n\nshared/config/base.py:295\n /app/shared/config/base.py:295: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('ENVIRONMENT')\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_config.py:268\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_config.py:268: PydanticDeprecatedSince20: Support for class-based `config` is deprecated, use ConfigDict instead. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n warnings.warn(DEPRECATION_MESSAGE, DeprecationWarning)\n\napp/models/training.py:12\n /app/app/models/training.py:12: MovedIn20Warning: The ``declarative_base()`` function is now available as sqlalchemy.orm.declarative_base(). (deprecated since: 2.0) (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9)\n Base = declarative_base()\n\ntests/test_api.py: 22 warnings\ntests/test_ml.py: 1 warning\n /app/tests/conftest.py:1560: RuntimeWarning: coroutine 'test_app' was never awaited\n gc.collect() # Force garbage collection after each test\n Enable tracemalloc to get traceback where the object was allocated.\n See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.\n\ntests/test_api.py: 13 warnings\n /usr/local/lib/python3.11/site-packages/_pytest/runner.py:139: RuntimeWarning: coroutine 'test_app' was never awaited\n item.funcargs = None # type: ignore[attr-defined]\n Enable tracemalloc to get traceback where the object was allocated.\n See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.\n\ntests/test_service.py::TestTrainingService::test_create_single_product_job_success\ntests/test_service.py::TestTrainingService::test_get_job_status_nonexistent\ntests/test_service.py::TestTrainingService::test_validate_training_data_no_data\ntests/test_service.py::TestTrainingService::test_store_trained_models\ntests/test_service.py::TestTrainingServiceDataFetching::test_fetch_sales_data_success\ntests/test_service.py::TestTrainingServiceEdgeCases::test_database_connection_failure\ntests/test_service.py::TestTrainingServiceEdgeCases::test_malformed_config_handling\n /app/tests/conftest.py:1560: RuntimeWarning: coroutine 'test_db_session' was never awaited\n gc.collect() # Force garbage collection after each test\n Enable tracemalloc to get traceback where the object was allocated.\n See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.\n\ntests/test_service.py: 10 warnings\n /usr/local/lib/python3.11/site-packages/_pytest/runner.py:139: RuntimeWarning: coroutine 'test_db_session' was never awaited\n item.funcargs = None # type: ignore[attr-defined]\n Enable tracemalloc to get traceback where the object was allocated.\n See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.\n\n-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html\n------------ generated xml file: /app/tests/results/junit_unit.xml -------------\n\n---------- coverage: platform linux, python 3.11.13-final-0 ----------\nName Stmts Miss Cover Missing\n----------------------------------------------------------------\napp/__init__.py 0 0 100%\napp/api/__init__.py 0 0 100%\napp/api/models.py 18 5 72% 29-33\napp/api/training.py 166 132 20% 49-88, 100-124, 134-157, 167-187, 197-232, 244-286, 296-317, 327-347, 358-384, 395-415, 426-472\napp/core/__init__.py 0 0 100%\napp/core/config.py 32 0 100%\napp/core/database.py 115 91 21% 28-36, 45-72, 77-104, 109-165, 175-192, 199-209, 214-233, 238-248\napp/main.py 89 54 39% 39-90, 116-152, 158-168, 193, 203-211, 219-221, 224\napp/ml/__init__.py 0 0 100%\napp/ml/data_processor.py 238 63 74% 72-74, 104, 110-114, 125-134, 138-141, 149, 155, 194, 238, 274, 278-285, 302, 333, 335, 339-343, 382-383, 404, 418-423, 453, 457, 463, 473-493\napp/ml/prophet_manager.py 162 69 57% 73-112, 140-141, 148-150, 155, 170, 174, 185-186, 204, 213, 250, 282-284, 296-328, 334-365, 377-378, 382-388, 392-408\napp/ml/trainer.py 127 38 70% 52, 97-99, 123, 138, 156-167, 190-240, 255-256, 260, 288-291, 317-335\napp/models/__init__.py 0 0 100%\napp/models/training.py 87 0 100%\napp/schemas/__init__.py 0 0 100%\napp/schemas/training.py 184 6 97% 43-45, 108-110\napp/services/__init__.py 0 0 100%\napp/services/messaging.py 41 20 51% 24-28, 32-33, 38-46, 54, 71-81, 89-97, 105, 121, 137, 155, 172, 189, 206\napp/services/training_service.py 285 164 42% 54-57, 62, 85-89, 94, 102-161, 170-221, 237, 249-262, 286-293, 298, 315-316, 330-332, 340-341, 358-360, 376-403, 425, 448-475, 484, 487, 498-503, 512, 515, 526-531, 585-588, 593, 601-648, 655-694\n----------------------------------------------------------------\nTOTAL 1544 642 58%\nCoverage HTML written to dir /app/tests/results/coverage_unit_html\nCoverage XML written to file /app/tests/results/coverage_unit.xml\n\n=========================== short test summary info ============================\nFAILED tests/test_api.py::TestTrainingAPI::test_health_check - AttributeError...\nFAILED tests/test_api.py::TestTrainingAPI::test_readiness_check_ready - Attri...\nFAILED tests/test_api.py::TestTrainingAPI::test_readiness_check_not_ready - A...\nFAILED tests/test_api.py::TestTrainingAPI::test_liveness_check_healthy - Attr...\nFAILED tests/test_api.py::TestTrainingAPI::test_liveness_check_unhealthy - At...\nFAILED tests/test_api.py::TestTrainingAPI::test_metrics_endpoint - AttributeE...\nFAILED tests/test_api.py::TestTrainingAPI::test_root_endpoint - AttributeErro...\nFAILED tests/test_api.py::TestTrainingJobsAPI::test_start_training_job_validation_error\nFAILED tests/test_api.py::TestTrainingJobsAPI::test_get_training_status_nonexistent_job\nFAILED tests/test_api.py::TestTrainingJobsAPI::test_cancel_nonexistent_job - ...\nFAILED tests/test_api.py::TestSingleProductTrainingAPI::test_train_single_product_validation_error\nFAILED tests/test_api.py::TestErrorHandling::test_database_error_handling - A...\nFAILED tests/test_api.py::TestErrorHandling::test_missing_tenant_id - Attribu...\nFAILED tests/test_api.py::TestErrorHandling::test_invalid_job_id_format - Att...\nFAILED tests/test_api.py::TestErrorHandling::test_invalid_json_payload - Attr...\nFAILED tests/test_api.py::TestErrorHandling::test_unsupported_content_type - ...\nFAILED tests/test_api.py::TestAuthenticationIntegration::test_endpoints_require_auth\nFAILED tests/test_api.py::TestAPIValidation::test_training_request_validation\nFAILED tests/test_api.py::TestAPIValidation::test_single_product_request_validation\nFAILED tests/test_api.py::TestAPIValidation::test_query_parameter_validation\nFAILED tests/test_api.py::TestAPIPerformance::test_concurrent_requests - Attr...\nFAILED tests/test_api.py::TestAPIPerformance::test_large_payload_handling - A...\nFAILED tests/test_api.py::TestAPIPerformance::test_rapid_successive_requests\nFAILED tests/test_ml.py::TestBakeryDataProcessor::test_prepare_training_data_insufficient_data\nFAILED tests/test_ml.py::TestBakeryProphetManager::test_train_bakery_model_success\nFAILED tests/test_ml.py::TestBakeryMLTrainer::test_train_single_product_success\nFAILED tests/test_ml.py::TestBakeryMLTrainer::test_train_single_product_no_data\nFAILED tests/test_service.py::TestTrainingService::test_create_training_job_success\nFAILED tests/test_service.py::TestTrainingService::test_create_single_product_job_success\nFAILED tests/test_service.py::TestTrainingService::test_cancel_nonexistent_job\nFAILED tests/test_service.py::TestTrainingService::test_validate_training_data_no_data\nFAILED tests/test_service.py::TestTrainingService::test_store_trained_models\nFAILED tests/test_service.py::TestTrainingServiceExecution::test_execute_training_job_failure\nFAILED tests/test_service.py::TestTrainingServiceEdgeCases::test_concurrent_job_creation\nFAILED tests/test_service.py::TestTrainingServiceEdgeCases::test_malformed_config_handling\nERROR tests/test_api.py::TestTrainingJobsAPI::test_start_training_job_success\nERROR tests/test_api.py::TestTrainingJobsAPI::test_get_training_status_existing_job\nERROR tests/test_api.py::TestTrainingJobsAPI::test_list_training_jobs - TypeE...\nERROR tests/test_api.py::TestTrainingJobsAPI::test_list_training_jobs_with_status_filter\nERROR tests/test_api.py::TestTrainingJobsAPI::test_cancel_training_job_success\nERROR tests/test_api.py::TestTrainingJobsAPI::test_get_training_logs - TypeEr...\nERROR tests/test_api.py::TestTrainingJobsAPI::test_validate_training_data_valid\nERROR tests/test_api.py::TestSingleProductTrainingAPI::test_train_single_product_success\nERROR tests/test_api.py::TestSingleProductTrainingAPI::test_train_single_product_special_characters\nERROR tests/test_api.py::TestModelsAPI::test_list_models\nERROR tests/test_api.py::TestModelsAPI::test_get_model_details\nERROR tests/test_api.py::TestErrorHandling::test_messaging_failure_handling\nERROR tests/test_api.py::TestAuthenticationIntegration::test_tenant_isolation_in_api\nERROR tests/test_service.py::TestTrainingService::test_get_job_status_existing\nERROR tests/test_service.py::TestTrainingService::test_list_training_jobs - T...\nERROR tests/test_service.py::TestTrainingService::test_list_training_jobs_with_filter\nERROR tests/test_service.py::TestTrainingService::test_cancel_training_job_success\nERROR tests/test_service.py::TestTrainingService::test_validate_training_data_valid\nERROR tests/test_service.py::TestTrainingService::test_update_job_status - Ty...\nERROR tests/test_service.py::TestTrainingService::test_get_training_logs - Ty...\nERROR tests/test_service.py::TestTrainingServiceExecution::test_execute_training_job_success\nERROR tests/test_service.py::TestTrainingServiceExecution::test_execute_single_product_training_success\nERROR tests/test_service.py::TestTrainingServiceEdgeCases::test_malformed_config_handling\n======= 35 failed, 24 passed, 2 skipped, 64 warnings, 23 errors in 5.71s =======\n",
- "stderr": "sys:1: RuntimeWarning: coroutine 'test_db_session' was never awaited\n",
- "timestamp": "2025-07-25T11:22:43.703966"
+ "return_code": 4,
+ "duration": 0.4672102928161621,
+ "stdout": "",
+ "stderr": "/usr/local/lib/python3.11/site-packages/_pytest/config/__init__.py:331: PluggyTeardownRaisedWarning: A plugin raised an exception during an old-style hookwrapper teardown.\nPlugin: helpconfig, Hook: pytest_cmdline_parse\nConftestImportFailure: NameError: name 'pytest_asyncio' is not defined (from /app/tests/conftest.py)\nFor more information see https://pluggy.readthedocs.io/en/stable/api_reference.html#pluggy.PluggyTeardownRaisedWarning\n config = pluginmanager.hook.pytest_cmdline_parse(\nImportError while loading conftest '/app/tests/conftest.py'.\ntests/conftest.py:517: in \n @pytest_asyncio.fixture\nE NameError: name 'pytest_asyncio' is not defined\n",
+ "timestamp": "2025-07-25T12:08:35.411622"
},
"integration": {
"suite": "integration",
"status": "failed",
- "return_code": 5,
- "duration": 0.5286672115325928,
- "stdout": "\n================================================================================\nTRAINING SERVICE TEST SESSION STARTING\n================================================================================\n============================= test session starts ==============================\nplatform linux -- Python 3.11.13, pytest-7.4.3, pluggy-1.6.0 -- /usr/local/bin/python\ncachedir: .pytest_cache\nrootdir: /app\nplugins: anyio-3.7.1, mock-3.12.0, asyncio-0.21.1, cov-4.1.0\nasyncio: mode=Mode.STRICT\ncollecting ... collected 0 items\n\n================================================================================\nTRAINING SERVICE TEST SESSION FINISHED\nExit Status: 5\n================================================================================\n\n--------- generated xml file: /app/tests/results/junit_integration.xml ---------\n\n---------- coverage: platform linux, python 3.11.13-final-0 ----------\nName Stmts Miss Cover Missing\n----------------------------------------------------------------\napp/__init__.py 0 0 100%\napp/api/__init__.py 0 0 100%\napp/api/models.py 18 18 0% 5-33\napp/api/training.py 166 166 0% 4-472\napp/core/__init__.py 0 0 100%\napp/core/config.py 32 32 0% 6-65\napp/core/database.py 115 115 0% 2-251\napp/main.py 89 89 0% 4-224\napp/ml/__init__.py 0 0 100%\napp/ml/data_processor.py 238 238 0% 2-493\napp/ml/prophet_manager.py 162 162 0% 2-408\napp/ml/trainer.py 127 127 0% 2-365\napp/models/__init__.py 0 0 100%\napp/models/training.py 87 87 0% 2-154\napp/schemas/__init__.py 0 0 100%\napp/schemas/training.py 184 184 0% 2-287\napp/services/__init__.py 0 0 100%\napp/services/messaging.py 41 41 0% 2-206\napp/services/training_service.py 285 285 0% 2-694\n----------------------------------------------------------------\nTOTAL 1544 1544 0%\nCoverage HTML written to dir /app/tests/results/coverage_integration_html\nCoverage XML written to file /app/tests/results/coverage_integration.xml\n\n============================ no tests ran in 0.20s =============================\n",
- "stderr": "/usr/local/lib/python3.11/site-packages/coverage/control.py:883: CoverageWarning: No data was collected. (no-data-collected)\n self._warn(\"No data was collected.\", slug=\"no-data-collected\")\n",
- "timestamp": "2025-07-25T11:22:44.232765"
+ "return_code": 4,
+ "duration": 0.5080075263977051,
+ "stdout": "",
+ "stderr": "/usr/local/lib/python3.11/site-packages/_pytest/config/__init__.py:331: PluggyTeardownRaisedWarning: A plugin raised an exception during an old-style hookwrapper teardown.\nPlugin: helpconfig, Hook: pytest_cmdline_parse\nConftestImportFailure: NameError: name 'pytest_asyncio' is not defined (from /app/tests/conftest.py)\nFor more information see https://pluggy.readthedocs.io/en/stable/api_reference.html#pluggy.PluggyTeardownRaisedWarning\n config = pluginmanager.hook.pytest_cmdline_parse(\nImportError while loading conftest '/app/tests/conftest.py'.\ntests/conftest.py:517: in \n @pytest_asyncio.fixture\nE NameError: name 'pytest_asyncio' is not defined\n",
+ "timestamp": "2025-07-25T12:08:35.919747"
},
"performance": {
"suite": "performance",
"status": "failed",
- "return_code": 2,
- "duration": 0.6394450664520264,
- "stdout": "\n================================================================================\nTRAINING SERVICE TEST SESSION STARTING\n================================================================================\n============================= test session starts ==============================\nplatform linux -- Python 3.11.13, pytest-7.4.3, pluggy-1.6.0 -- /usr/local/bin/python\ncachedir: .pytest_cache\nrootdir: /app\nplugins: anyio-3.7.1, mock-3.12.0, asyncio-0.21.1, cov-4.1.0\nasyncio: mode=Mode.STRICT\ncollecting ... collected 0 items / 1 error\n\n================================================================================\nTRAINING SERVICE TEST SESSION FINISHED\nExit Status: 2\n================================================================================\n\n==================================== ERRORS ====================================\n__________________ ERROR collecting tests/test_performance.py __________________\nImportError while importing test module '/app/tests/test_performance.py'.\nHint: make sure your test modules/packages have valid Python names.\nTraceback:\n/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\ntests/test_performance.py:16: in \n import psutil\nE ModuleNotFoundError: No module named 'psutil'\n--------- generated xml file: /app/tests/results/junit_performance.xml ---------\n\n---------- coverage: platform linux, python 3.11.13-final-0 ----------\nName Stmts Miss Cover Missing\n----------------------------------------------------------------\napp/__init__.py 0 0 100%\napp/api/__init__.py 0 0 100%\napp/api/models.py 18 18 0% 5-33\napp/api/training.py 166 166 0% 4-472\napp/core/__init__.py 0 0 100%\napp/core/config.py 32 32 0% 6-65\napp/core/database.py 115 115 0% 2-251\napp/main.py 89 89 0% 4-224\napp/ml/__init__.py 0 0 100%\napp/ml/data_processor.py 238 238 0% 2-493\napp/ml/prophet_manager.py 162 162 0% 2-408\napp/ml/trainer.py 127 127 0% 2-365\napp/models/__init__.py 0 0 100%\napp/models/training.py 87 87 0% 2-154\napp/schemas/__init__.py 0 0 100%\napp/schemas/training.py 184 184 0% 2-287\napp/services/__init__.py 0 0 100%\napp/services/messaging.py 41 41 0% 2-206\napp/services/training_service.py 285 285 0% 2-694\n----------------------------------------------------------------\nTOTAL 1544 1544 0%\nCoverage HTML written to dir /app/tests/results/coverage_performance_html\nCoverage XML written to file /app/tests/results/coverage_performance.xml\n\n=========================== short test summary info ============================\nERROR tests/test_performance.py\n!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!\n=============================== 1 error in 0.23s ===============================\n",
- "stderr": "/usr/local/lib/python3.11/site-packages/coverage/control.py:883: CoverageWarning: No data was collected. (no-data-collected)\n self._warn(\"No data was collected.\", slug=\"no-data-collected\")\n",
- "timestamp": "2025-07-25T11:22:44.872314"
+ "return_code": 4,
+ "duration": 0.5039312839508057,
+ "stdout": "",
+ "stderr": "/usr/local/lib/python3.11/site-packages/_pytest/config/__init__.py:331: PluggyTeardownRaisedWarning: A plugin raised an exception during an old-style hookwrapper teardown.\nPlugin: helpconfig, Hook: pytest_cmdline_parse\nConftestImportFailure: NameError: name 'pytest_asyncio' is not defined (from /app/tests/conftest.py)\nFor more information see https://pluggy.readthedocs.io/en/stable/api_reference.html#pluggy.PluggyTeardownRaisedWarning\n config = pluginmanager.hook.pytest_cmdline_parse(\nImportError while loading conftest '/app/tests/conftest.py'.\ntests/conftest.py:517: in \n @pytest_asyncio.fixture\nE NameError: name 'pytest_asyncio' is not defined\n",
+ "timestamp": "2025-07-25T12:08:36.423826"
},
"end_to_end": {
"suite": "end_to_end",
"status": "failed",
- "return_code": 1,
- "duration": 2.01322340965271,
- "stdout": "\n================================================================================\nTRAINING SERVICE TEST SESSION STARTING\n================================================================================\n============================= test session starts ==============================\nplatform linux -- Python 3.11.13, pytest-7.4.3, pluggy-1.6.0 -- /usr/local/bin/python\ncachedir: .pytest_cache\nrootdir: /app\nplugins: anyio-3.7.1, mock-3.12.0, asyncio-0.21.1, cov-4.1.0\nasyncio: mode=Mode.STRICT\ncollecting ... 2025-07-25 11:22:46 [INFO] shared.monitoring.logging: Logging configured for training-service at level INFO\ncollected 1 item\n\ntests/test_end_to_end.py::TestTrainingServiceEndToEnd::test_complete_training_workflow_api ERROR\ntests/test_end_to_end.py::TestTrainingServiceEndToEnd::test_complete_training_workflow_api ERROR\n================================================================================\nTRAINING SERVICE TEST SESSION FINISHED\nExit Status: 1\n================================================================================\n\n\n==================================== ERRORS ====================================\n_ ERROR at setup of TestTrainingServiceEndToEnd.test_complete_training_workflow_api _\ntests/test_end_to_end.py:75: in real_bakery_data\n temp = 15 + 12 * np.sin((date.timetuple().tm_yday / 365) * 2 * np.pi)\nE UnboundLocalError: cannot access local variable 'np' where it is not associated with a value\n_ ERROR at teardown of TestTrainingServiceEndToEnd.test_complete_training_workflow_api _\ntests/conftest.py:464: in setup_test_environment\n os.environ.pop(var, None)(scope=\"session\")\nE TypeError: 'str' object is not callable\n=============================== warnings summary ===============================\nshared/config/base.py:280\n /app/shared/config/base.py:280: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('JWT_SECRET_KEY')\n\nshared/config/base.py:288\n /app/shared/config/base.py:288: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('LOG_LEVEL')\n\nshared/config/base.py:295\n /app/shared/config/base.py:295: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('ENVIRONMENT')\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_config.py:268\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_config.py:268: PydanticDeprecatedSince20: Support for class-based `config` is deprecated, use ConfigDict instead. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n warnings.warn(DEPRECATION_MESSAGE, DeprecationWarning)\n\napp/schemas/training.py:41\n /app/app/schemas/training.py:41: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('seasonality_mode')\n\napp/schemas/training.py:106\n /app/app/schemas/training.py:106: PydanticDeprecatedSince20: Pydantic V1 style `@validator` validators are deprecated. You should migrate to Pydantic V2 style `@field_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.5/migration/\n @validator('min_data_points')\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149: UserWarning: Field \"model_id\" has conflict with protected namespace \"model_\".\n \n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n warnings.warn(\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149: UserWarning: Field \"model_path\" has conflict with protected namespace \"model_\".\n \n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n warnings.warn(\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149: UserWarning: Field \"model_type\" has conflict with protected namespace \"model_\".\n \n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n warnings.warn(\n\n../usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149\n /usr/local/lib/python3.11/site-packages/pydantic/_internal/_fields.py:149: UserWarning: Field \"model_info\" has conflict with protected namespace \"model_\".\n \n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n warnings.warn(\n\napp/models/training.py:12\n /app/app/models/training.py:12: MovedIn20Warning: The ``declarative_base()`` function is now available as sqlalchemy.orm.declarative_base(). (deprecated since: 2.0) (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9)\n Base = declarative_base()\n\n-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html\n--------- generated xml file: /app/tests/results/junit_end_to_end.xml ----------\n\n---------- coverage: platform linux, python 3.11.13-final-0 ----------\nName Stmts Miss Cover Missing\n----------------------------------------------------------------\napp/__init__.py 0 0 100%\napp/api/__init__.py 0 0 100%\napp/api/models.py 18 5 72% 29-33\napp/api/training.py 166 132 20% 49-88, 100-124, 134-157, 167-187, 197-232, 244-286, 296-317, 327-347, 358-384, 395-415, 426-472\napp/core/__init__.py 0 0 100%\napp/core/config.py 32 0 100%\napp/core/database.py 115 91 21% 28-36, 45-72, 77-104, 109-165, 175-192, 199-209, 214-233, 238-248\napp/main.py 89 54 39% 39-90, 116-152, 158-168, 193, 203-211, 219-221, 224\napp/ml/__init__.py 0 0 100%\napp/ml/data_processor.py 238 211 11% 44-74, 91-141, 145-167, 171-186, 190-218, 225-285, 292-343, 347-371, 375-385, 389-412, 416-423, 427-444, 448-465, 473-493\napp/ml/prophet_manager.py 162 127 22% 57-116, 133-150, 154-174, 178-215, 219-227, 233-250, 254-284, 296-328, 334-365, 377-378, 382-388, 392-408\napp/ml/trainer.py 127 103 19% 51-99, 122-171, 190-240, 244-260, 268-293, 300-341, 345-365\napp/models/__init__.py 0 0 100%\napp/models/training.py 87 0 100%\napp/schemas/__init__.py 0 0 100%\napp/schemas/training.py 184 6 97% 43-45, 108-110\napp/services/__init__.py 0 0 100%\napp/services/messaging.py 41 20 51% 24-28, 32-33, 38-46, 54, 71-81, 89-97, 105, 121, 137, 155, 172, 189, 206\napp/services/training_service.py 285 249 13% 41-62, 71-94, 102-161, 170-221, 228-241, 249-262, 269-298, 305-360, 376-403, 410-441, 448-475, 479-503, 507-531, 538-593, 601-648, 655-694\n----------------------------------------------------------------\nTOTAL 1544 998 35%\nCoverage HTML written to dir /app/tests/results/coverage_end_to_end_html\nCoverage XML written to file /app/tests/results/coverage_end_to_end.xml\n\n=========================== short test summary info ============================\nERROR tests/test_end_to_end.py::TestTrainingServiceEndToEnd::test_complete_training_workflow_api\nERROR tests/test_end_to_end.py::TestTrainingServiceEndToEnd::test_complete_training_workflow_api\n======================== 11 warnings, 2 errors in 1.45s ========================\n",
- "stderr": "",
- "timestamp": "2025-07-25T11:22:46.885639"
+ "return_code": 4,
+ "duration": 0.5037407875061035,
+ "stdout": "",
+ "stderr": "/usr/local/lib/python3.11/site-packages/_pytest/config/__init__.py:331: PluggyTeardownRaisedWarning: A plugin raised an exception during an old-style hookwrapper teardown.\nPlugin: helpconfig, Hook: pytest_cmdline_parse\nConftestImportFailure: NameError: name 'pytest_asyncio' is not defined (from /app/tests/conftest.py)\nFor more information see https://pluggy.readthedocs.io/en/stable/api_reference.html#pluggy.PluggyTeardownRaisedWarning\n config = pluginmanager.hook.pytest_cmdline_parse(\nImportError while loading conftest '/app/tests/conftest.py'.\ntests/conftest.py:517: in \n @pytest_asyncio.fixture\nE NameError: name 'pytest_asyncio' is not defined\n",
+ "timestamp": "2025-07-25T12:08:36.927703"
}
},
"recommendations": [
diff --git a/services/training/tests/test_api.py b/services/training/tests/test_api.py
index 888f362a..f9b696b6 100644
--- a/services/training/tests/test_api.py
+++ b/services/training/tests/test_api.py
@@ -29,7 +29,8 @@ class TestTrainingAPI:
async def test_readiness_check_ready(self, test_client: AsyncClient):
"""Test readiness check when service is ready"""
# Mock app state as ready
- with patch('app.main.app.state.ready', True):
+ from app.main import app # Add import at top
+ with patch.object(app.state, 'ready', True, create=True):
response = await test_client.get("/health/ready")
assert response.status_code == status.HTTP_200_OK
@@ -117,7 +118,7 @@ class TestTrainingJobsAPI:
"seasonality_mode": "additive"
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == status.HTTP_200_OK
@@ -136,7 +137,7 @@ class TestTrainingJobsAPI:
"min_data_points": 5 # Too low
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@@ -150,7 +151,7 @@ class TestTrainingJobsAPI:
"""Test getting status of existing training job"""
job_id = training_job_in_db.job_id
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == status.HTTP_200_OK
@@ -164,7 +165,7 @@ class TestTrainingJobsAPI:
@pytest.mark.asyncio
async def test_get_training_status_nonexistent_job(self, test_client: AsyncClient):
"""Test getting status of non-existent training job"""
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get("/training/jobs/nonexistent-job/status")
assert response.status_code == status.HTTP_404_NOT_FOUND
@@ -176,7 +177,7 @@ class TestTrainingJobsAPI:
training_job_in_db
):
"""Test listing training jobs"""
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get("/training/jobs")
assert response.status_code == status.HTTP_200_OK
@@ -198,7 +199,7 @@ class TestTrainingJobsAPI:
training_job_in_db
):
"""Test listing training jobs with status filter"""
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get("/training/jobs?status=pending")
assert response.status_code == status.HTTP_200_OK
@@ -219,7 +220,7 @@ class TestTrainingJobsAPI:
"""Test cancelling a training job successfully"""
job_id = training_job_in_db.job_id
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(f"/training/jobs/{job_id}/cancel")
assert response.status_code == status.HTTP_200_OK
@@ -230,7 +231,7 @@ class TestTrainingJobsAPI:
@pytest.mark.asyncio
async def test_cancel_nonexistent_job(self, test_client: AsyncClient):
"""Test cancelling a non-existent training job"""
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs/nonexistent-job/cancel")
assert response.status_code == status.HTTP_404_NOT_FOUND
@@ -244,7 +245,7 @@ class TestTrainingJobsAPI:
"""Test getting training logs"""
job_id = training_job_in_db.job_id
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/logs")
assert response.status_code == status.HTTP_200_OK
@@ -267,7 +268,7 @@ class TestTrainingJobsAPI:
"min_data_points": 30
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/validate", json=request_data)
assert response.status_code == status.HTTP_200_OK
@@ -298,7 +299,7 @@ class TestSingleProductTrainingAPI:
"seasonality_mode": "additive"
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
@@ -320,7 +321,7 @@ class TestSingleProductTrainingAPI:
"seasonality_mode": "invalid_mode" # Invalid value
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
@@ -343,7 +344,7 @@ class TestSingleProductTrainingAPI:
"seasonality_mode": "additive"
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
@@ -409,7 +410,7 @@ class TestErrorHandling:
"min_data_points": 30
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
@@ -434,7 +435,7 @@ class TestErrorHandling:
"""Test handling of invalid job ID format"""
invalid_job_id = "invalid-job-id-with-special-chars@#$"
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{invalid_job_id}/status")
# Should handle gracefully
@@ -454,7 +455,7 @@ class TestErrorHandling:
}
with patch('app.services.messaging.publish_job_started', side_effect=Exception("Messaging failed")), \
- patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
@@ -466,7 +467,7 @@ class TestErrorHandling:
@pytest.mark.asyncio
async def test_invalid_json_payload(self, test_client: AsyncClient):
"""Test handling of invalid JSON payload"""
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
"/training/jobs",
content="invalid json {{{",
@@ -478,7 +479,7 @@ class TestErrorHandling:
@pytest.mark.asyncio
async def test_unsupported_content_type(self, test_client: AsyncClient):
"""Test handling of unsupported content type"""
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
"/training/jobs",
content="some text data",
@@ -552,7 +553,7 @@ class TestAPIValidation:
"yearly_seasonality": True
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=valid_request)
assert response.status_code == status.HTTP_200_OK
@@ -561,7 +562,7 @@ class TestAPIValidation:
invalid_request = valid_request.copy()
invalid_request["seasonality_mode"] = "invalid_mode"
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=invalid_request)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@@ -570,7 +571,7 @@ class TestAPIValidation:
invalid_request = valid_request.copy()
invalid_request["min_data_points"] = 5 # Too low
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=invalid_request)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@@ -588,7 +589,7 @@ class TestAPIValidation:
"seasonality_mode": "multiplicative"
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=valid_request
@@ -597,7 +598,7 @@ class TestAPIValidation:
assert response.status_code == status.HTTP_200_OK
# Test empty product name
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
"/training/products/",
json=valid_request
@@ -609,7 +610,7 @@ class TestAPIValidation:
async def test_query_parameter_validation(self, test_client: AsyncClient):
"""Test query parameter validation"""
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
# Test valid limit parameter
response = await test_client.get("/training/jobs?limit=5")
assert response.status_code == status.HTTP_200_OK
@@ -662,7 +663,7 @@ class TestAPIPerformance:
"large_config": {f"key_{i}": f"value_{i}" for i in range(1000)}
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=large_request)
# Should handle large payload gracefully
diff --git a/services/training/tests/test_integration.py b/services/training/tests/test_integration.py
index 129aae67..40ea6d98 100644
--- a/services/training/tests/test_integration.py
+++ b/services/training/tests/test_integration.py
@@ -36,7 +36,7 @@ class TestTrainingWorkflowIntegration:
"seasonality_mode": "additive"
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == 200
@@ -44,7 +44,7 @@ class TestTrainingWorkflowIntegration:
job_id = job_data["job_id"]
# Step 2: Check initial status
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == 200
@@ -56,7 +56,7 @@ class TestTrainingWorkflowIntegration:
await asyncio.sleep(0.1) # Allow background task to start
# Step 4: Check completion status
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
# The job should exist in database even if not completed yet
@@ -80,7 +80,7 @@ class TestTrainingWorkflowIntegration:
}
# Start single product training
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
@@ -92,7 +92,7 @@ class TestTrainingWorkflowIntegration:
assert f"training started for {product_name}" in job_data["message"].lower()
# Check job status
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == 200
@@ -114,7 +114,7 @@ class TestTrainingWorkflowIntegration:
}
# Validate training data
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/validate", json=request_data)
assert response.status_code == 200
@@ -127,7 +127,7 @@ class TestTrainingWorkflowIntegration:
# If validation passes, start actual training
if validation_data["is_valid"]:
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == 200
@@ -144,7 +144,7 @@ class TestTrainingWorkflowIntegration:
job_id = training_job_in_db.job_id
# Check initial status
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == 200
@@ -152,7 +152,7 @@ class TestTrainingWorkflowIntegration:
assert initial_status["status"] == "pending"
# Cancel the job
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(f"/training/jobs/{job_id}/cancel")
assert response.status_code == 200
@@ -160,7 +160,7 @@ class TestTrainingWorkflowIntegration:
assert "cancelled" in cancel_response["message"].lower()
# Verify cancellation
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == 200
@@ -267,7 +267,7 @@ class TestErrorHandlingIntegration:
with patch('httpx.AsyncClient') as mock_client:
mock_client.return_value.__aenter__.return_value.get.side_effect = Exception("Service unavailable")
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
# Should still create job but might fail during execution
@@ -289,7 +289,7 @@ class TestErrorHandlingIntegration:
# Mock messaging failure
with patch('app.services.messaging.publish_job_started', side_effect=Exception("Messaging failed")):
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
# Should still succeed even if messaging fails
@@ -312,7 +312,7 @@ class TestErrorHandlingIntegration:
# Mock ML training failure
with patch('app.ml.trainer.BakeryMLTrainer.train_tenant_models', side_effect=Exception("ML training failed")):
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
# Job should be created successfully
@@ -394,7 +394,7 @@ class TestPerformanceIntegration:
# Make many rapid status requests
tasks = []
for _ in range(20):
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
task = test_client.get(f"/training/jobs/{job_id}/status")
tasks.append(task)
@@ -439,7 +439,7 @@ class TestSecurityIntegration:
"min_data_points": -5 # Invalid negative value
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=invalid_request)
assert response.status_code == 422 # Validation error
@@ -454,7 +454,7 @@ class TestSecurityIntegration:
# Try SQL injection in job ID
malicious_job_id = "job'; DROP TABLE model_training_logs; --"
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{malicious_job_id}/status")
# Should return 404, not cause database error
@@ -801,7 +801,7 @@ class TestBackwardCompatibility:
"include_weather": True
}
- with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
+ with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=minimal_request)
# Should work with defaults for missing fields
diff --git a/services/training/tests/test_ml.py b/services/training/tests/test_ml.py
index b50b99b3..a0be98ef 100644
--- a/services/training/tests/test_ml.py
+++ b/services/training/tests/test_ml.py
@@ -25,13 +25,16 @@ class TestBakeryDataProcessor:
@pytest.fixture
def sample_sales_data(self):
- """Create sample sales data"""
- dates = pd.date_range('2024-01-01', periods=60, freq='D')
- return pd.DataFrame({
- 'date': dates,
- 'product_name': ['Pan Integral'] * 60,
- 'quantity': [45 + np.random.randint(-10, 11) for _ in range(60)]
- })
+ """Provide sufficient data for ML training tests"""
+ dates = pd.date_range('2024-01-01', periods=35, freq='D') # 35 days > 30 minimum
+ data = []
+ for date in dates:
+ data.append({
+ 'date': date,
+ 'product_name': 'Pan Integral', # Ensure this column exists
+ 'quantity': 40 + (5 * np.sin(date.dayofyear / 365 * 2 * np.pi)) # Seasonal pattern
+ })
+ return pd.DataFrame(data)
@pytest.fixture
def sample_weather_data(self):