Fix generating pytest for training service

This commit is contained in:
Urtzi Alfaro
2025-07-25 14:10:27 +02:00
parent 0dc12f4b93
commit e2b85162f0
14 changed files with 151 additions and 7448 deletions

View File

@@ -61,5 +61,10 @@ class TrainingSettings(BaseServiceSettings):
# Distributed Training (for future scaling)
DISTRIBUTED_TRAINING_ENABLED: bool = os.getenv("DISTRIBUTED_TRAINING_ENABLED", "false").lower() == "true"
TRAINING_WORKER_COUNT: int = int(os.getenv("TRAINING_WORKER_COUNT", "1"))
PROPHET_DAILY_SEASONALITY: bool = True
PROPHET_WEEKLY_SEASONALITY: bool = True
PROPHET_YEARLY_SEASONALITY: bool = True
PROPHET_SEASONALITY_MODE: str = "additive"
settings = TrainingSettings()

View File

@@ -18,6 +18,7 @@ from unittest.mock import Mock, AsyncMock, patch
from typing import Dict, List, Any, Generator
from pathlib import Path
import logging
from app.models.training import ModelTrainingLog, TrainedModel
# Configure pytest-asyncio
pytestmark = pytest.mark.asyncio
@@ -213,16 +214,14 @@ async def test_app():
from app.main import app
return app
@pytest.fixture
async def test_client(test_app):
"""Test client for API testing"""
from httpx import AsyncClient
def test_client(test_app):
"""Create test client for API testing - SYNC VERSION"""
from httpx import Client
async with AsyncClient(app=test_app, base_url="http://test") as client:
with Client(app=test_app, base_url="http://test") as client:
yield client
@pytest.fixture
def auth_headers():
"""Mock authentication headers"""
@@ -452,7 +451,7 @@ def setup_test_environment():
yield
# Cleanup environment
# Cleanup environment - FIXED: removed (scope="session")
test_vars = [
'ENVIRONMENT', 'LOG_LEVEL', 'MODEL_STORAGE_PATH',
'MAX_TRAINING_TIME_MINUTES', 'MIN_TRAINING_DATA_DAYS',
@@ -461,7 +460,8 @@ def setup_test_environment():
]
for var in test_vars:
os.environ.pop(var, None)(scope="session")
os.environ.pop(var, None) # FIXED: removed the erroneous (scope="session")
def event_loop():
"""Create an instance of the default event loop for the test session."""
loop = asyncio.new_event_loop()
@@ -514,41 +514,60 @@ def pytest_collection_modifyitems(config, items):
# TEST DATABASE FIXTURES
# ================================================================
@pytest.fixture
@pytest_asyncio.fixture
async def test_db_session():
"""Mock database session for testing"""
mock_session = AsyncMock()
"""Create async test database session"""
from app.core.database import database_manager
# Mock common database operations
mock_session.add = Mock()
mock_session.commit = AsyncMock()
mock_session.rollback = AsyncMock()
mock_session.refresh = AsyncMock()
mock_session.close = AsyncMock()
mock_session.execute = AsyncMock()
mock_session.scalar = AsyncMock()
return mock_session
async with database_manager.async_session_local() as session:
yield session
@pytest.fixture
def training_job_in_db():
"""Mock training job already in database"""
from app.models.training import ModelTrainingLog
def training_job_in_db(test_db_session):
"""Create a training job in database for testing"""
from app.models.training import ModelTrainingLog # Add this import
from datetime import datetime
job = ModelTrainingLog(
job_id="test_job_123",
tenant_id="test_tenant",
job_id="test-job-123",
tenant_id="test-tenant",
status="running",
progress=50,
current_step="Training model for Pan Integral",
config={"include_weather": True, "include_traffic": True},
started_at=datetime.now(),
logs=["Started training", "Processing data"]
current_step="Training models",
start_time=datetime.now(), # Use start_time, not started_at
config={"include_weather": True},
created_at=datetime.now(),
updated_at=datetime.now()
)
test_db_session.add(job)
test_db_session.commit()
test_db_session.refresh(job)
return job
@pytest.fixture
def trained_model_in_db(test_db_session):
"""Create a trained model in database for testing"""
from app.models.training import TrainedModel # Add this import
from datetime import datetime
model = TrainedModel(
model_id="test-model-123",
tenant_id="test-tenant",
product_name="Pan Integral",
model_type="prophet",
model_path="/tmp/test_model.pkl",
version=1,
training_samples=100,
features=["temperature", "humidity"],
hyperparameters={"seasonality_mode": "additive"},
training_metrics={"mae": 2.5, "mse": 8.3},
is_active=True,
created_at=datetime.now()
)
test_db_session.add(model)
test_db_session.commit()
test_db_session.refresh(model)
return model
# ================================================================
# SAMPLE DATA FIXTURES
@@ -843,6 +862,24 @@ def mock_data_processor():
yield mock_instance
@pytest.fixture
def mock_data_service():
"""Mock data service for testing"""
from unittest.mock import Mock, AsyncMock
mock_service = Mock()
mock_service.get_sales_data = AsyncMock(return_value=[
{"date": "2024-01-01", "product_name": "Pan Integral", "quantity": 45},
{"date": "2024-01-02", "product_name": "Pan Integral", "quantity": 38}
])
mock_service.get_weather_data = AsyncMock(return_value=[
{"date": "2024-01-01", "temperature": 20.5, "humidity": 65}
])
mock_service.get_traffic_data = AsyncMock(return_value=[
{"date": "2024-01-01", "traffic_index": 0.7}
])
return mock_service
@pytest.fixture
def mock_prophet_manager():

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="2" failures="0" skipped="0" tests="2" time="1.455" timestamp="2025-07-25T11:22:45.219619" hostname="543df414761a"><testcase classname="tests.test_end_to_end.TestTrainingServiceEndToEnd" name="test_complete_training_workflow_api" time="0.034"><error message="failed on setup with &quot;UnboundLocalError: cannot access local variable 'np' where it is not associated with a value&quot;">tests/test_end_to_end.py:75: in real_bakery_data
temp = 15 + 12 * np.sin((date.timetuple().tm_yday / 365) * 2 * np.pi)
E UnboundLocalError: cannot access local variable 'np' where it is not associated with a value</error><error message="failed on teardown with &quot;TypeError: 'str' object is not callable&quot;">tests/conftest.py:464: in setup_test_environment
os.environ.pop(var, None)(scope="session")
E TypeError: 'str' object is not callable</error></testcase></testsuite></testsuites>

View File

@@ -1 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="0" failures="0" skipped="0" tests="0" time="0.204" timestamp="2025-07-25T11:22:43.995108" hostname="543df414761a" /></testsuites>

View File

@@ -1,8 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="1" failures="0" skipped="0" tests="1" time="0.238" timestamp="2025-07-25T11:22:44.599099" hostname="543df414761a"><testcase classname="" name="tests.test_performance" time="0.000"><error message="collection failure">ImportError while importing test module '/app/tests/test_performance.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_performance.py:16: in &lt;module&gt;
import psutil
E ModuleNotFoundError: No module named 'psutil'</error></testcase></testsuite></testsuites>

View File

@@ -1,649 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="23" failures="35" skipped="2" tests="83" time="5.714" timestamp="2025-07-25T11:22:37.801499" hostname="543df414761a"><testcase classname="tests.test_api.TestTrainingAPI" name="test_health_check" time="0.030"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:20: in test_health_check
response = await test_client.get("/health")
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_readiness_check_ready" time="0.069"><failure message="AttributeError: &lt;starlette.datastructures.State object at 0xffff5ae06a10&gt; does not have the attribute 'ready'">tests/test_api.py:32: in test_readiness_check_ready
with patch('app.main.app.state.ready', True):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;starlette.datastructures.State object at 0xffff5ae06a10&gt; does not have the attribute 'ready'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_readiness_check_not_ready" time="0.030"><failure message="AttributeError: &lt;starlette.datastructures.State object at 0xffff5ae06a10&gt; does not have the attribute 'ready'">tests/test_api.py:42: in test_readiness_check_not_ready
with patch('app.main.app.state.ready', False):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;starlette.datastructures.State object at 0xffff5ae06a10&gt; does not have the attribute 'ready'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_liveness_check_healthy" time="0.028"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:53: in test_liveness_check_healthy
response = await test_client.get("/health/live")
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_liveness_check_unhealthy" time="0.027"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:63: in test_liveness_check_unhealthy
response = await test_client.get("/health/live")
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_metrics_endpoint" time="0.027"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:73: in test_metrics_endpoint
response = await test_client.get("/metrics")
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_root_endpoint" time="0.026"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:92: in test_root_endpoint
response = await test_client.get("/")
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_start_training_job_success" time="0.029"><error message="failed on setup with &quot;file /app/tests/test_api.py, line 104&#10; @pytest.mark.asyncio&#10; async def test_start_training_job_success(&#10; self,&#10; test_client: AsyncClient,&#10; mock_messaging,&#10; mock_ml_trainer,&#10; mock_data_service&#10; ):&#10; &quot;&quot;&quot;Test starting a training job successfully&quot;&quot;&quot;&#10; request_data = {&#10; &quot;include_weather&quot;: True,&#10; &quot;include_traffic&quot;: True,&#10; &quot;min_data_points&quot;: 30,&#10; &quot;seasonality_mode&quot;: &quot;additive&quot;&#10; }&#10;&#10; with patch('app.api.training.get_current_tenant_id', return_value=&quot;test-tenant&quot;):&#10; response = await test_client.post(&quot;/training/jobs&quot;, json=request_data)&#10;&#10; assert response.status_code == status.HTTP_200_OK&#10; data = response.json()&#10;&#10; assert &quot;job_id&quot; in data&#10; assert data[&quot;status&quot;] == &quot;started&quot;&#10; assert data[&quot;tenant_id&quot;] == &quot;test-tenant&quot;&#10; assert &quot;estimated_duration_minutes&quot; in data&#10;E fixture 'mock_data_service' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_api.py:104&quot;">file /app/tests/test_api.py, line 104
@pytest.mark.asyncio
async def test_start_training_job_success(
self,
test_client: AsyncClient,
mock_messaging,
mock_ml_trainer,
mock_data_service
):
"""Test starting a training job successfully"""
request_data = {
"include_weather": True,
"include_traffic": True,
"min_data_points": 30,
"seasonality_mode": "additive"
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "job_id" in data
assert data["status"] == "started"
assert data["tenant_id"] == "test-tenant"
assert "estimated_duration_minutes" in data
E fixture 'mock_data_service' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_api.py:104</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_start_training_job_validation_error" time="0.027"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:139: in test_start_training_job_validation_error
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_get_training_status_existing_job" time="0.031"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_get_training_status_nonexistent_job" time="0.027"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:167: in test_get_training_status_nonexistent_job
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_list_training_jobs" time="0.028"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_list_training_jobs_with_status_filter" time="0.028"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_cancel_training_job_success" time="0.031"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_cancel_nonexistent_job" time="0.031"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:233: in test_cancel_nonexistent_job
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_get_training_logs" time="0.032"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_validate_training_data_valid" time="0.028"><error message="failed on setup with &quot;file /app/tests/test_api.py, line 257&#10; @pytest.mark.asyncio&#10; async def test_validate_training_data_valid(&#10; self,&#10; test_client: AsyncClient,&#10; mock_data_service&#10; ):&#10; &quot;&quot;&quot;Test validating valid training data&quot;&quot;&quot;&#10; request_data = {&#10; &quot;include_weather&quot;: True,&#10; &quot;include_traffic&quot;: True,&#10; &quot;min_data_points&quot;: 30&#10; }&#10;&#10; with patch('app.api.training.get_current_tenant_id', return_value=&quot;test-tenant&quot;):&#10; response = await test_client.post(&quot;/training/validate&quot;, json=request_data)&#10;&#10; assert response.status_code == status.HTTP_200_OK&#10; data = response.json()&#10;&#10; assert &quot;is_valid&quot; in data&#10; assert &quot;issues&quot; in data&#10; assert &quot;recommendations&quot; in data&#10; assert &quot;estimated_training_time&quot; in data&#10;E fixture 'mock_data_service' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_api.py:257&quot;">file /app/tests/test_api.py, line 257
@pytest.mark.asyncio
async def test_validate_training_data_valid(
self,
test_client: AsyncClient,
mock_data_service
):
"""Test validating valid training data"""
request_data = {
"include_weather": True,
"include_traffic": True,
"min_data_points": 30
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
response = await test_client.post("/training/validate", json=request_data)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "is_valid" in data
assert "issues" in data
assert "recommendations" in data
assert "estimated_training_time" in data
E fixture 'mock_data_service' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_api.py:257</error></testcase><testcase classname="tests.test_api.TestSingleProductTrainingAPI" name="test_train_single_product_success" time="0.033"><error message="failed on setup with &quot;file /app/tests/test_api.py, line 285&#10; @pytest.mark.asyncio&#10; async def test_train_single_product_success(&#10; self,&#10; test_client: AsyncClient,&#10; mock_messaging,&#10; mock_ml_trainer,&#10; mock_data_service&#10; ):&#10; &quot;&quot;&quot;Test training a single product successfully&quot;&quot;&quot;&#10; product_name = &quot;Pan Integral&quot;&#10; request_data = {&#10; &quot;include_weather&quot;: True,&#10; &quot;include_traffic&quot;: True,&#10; &quot;seasonality_mode&quot;: &quot;additive&quot;&#10; }&#10;&#10; with patch('app.api.training.get_current_tenant_id', return_value=&quot;test-tenant&quot;):&#10; response = await test_client.post(&#10; f&quot;/training/products/{product_name}&quot;,&#10; json=request_data&#10; )&#10;&#10; assert response.status_code == status.HTTP_200_OK&#10; data = response.json()&#10;&#10; assert &quot;job_id&quot; in data&#10; assert data[&quot;status&quot;] == &quot;started&quot;&#10; assert data[&quot;tenant_id&quot;] == &quot;test-tenant&quot;&#10; assert f&quot;training started for {product_name}&quot; in data[&quot;message&quot;].lower()&#10;E fixture 'mock_data_service' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_api.py:285&quot;">file /app/tests/test_api.py, line 285
@pytest.mark.asyncio
async def test_train_single_product_success(
self,
test_client: AsyncClient,
mock_messaging,
mock_ml_trainer,
mock_data_service
):
"""Test training a single product successfully"""
product_name = "Pan Integral"
request_data = {
"include_weather": True,
"include_traffic": True,
"seasonality_mode": "additive"
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "job_id" in data
assert data["status"] == "started"
assert data["tenant_id"] == "test-tenant"
assert f"training started for {product_name}" in data["message"].lower()
E fixture 'mock_data_service' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_api.py:285</error></testcase><testcase classname="tests.test_api.TestSingleProductTrainingAPI" name="test_train_single_product_validation_error" time="0.033"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:323: in test_train_single_product_validation_error
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestSingleProductTrainingAPI" name="test_train_single_product_special_characters" time="0.030"><error message="failed on setup with &quot;file /app/tests/test_api.py, line 331&#10; @pytest.mark.asyncio&#10; async def test_train_single_product_special_characters(&#10; self,&#10; test_client: AsyncClient,&#10; mock_messaging,&#10; mock_ml_trainer,&#10; mock_data_service&#10; ):&#10; &quot;&quot;&quot;Test training product with special characters in name&quot;&quot;&quot;&#10; product_name = &quot;Pan Francés&quot; # With accent&#10; request_data = {&#10; &quot;include_weather&quot;: True,&#10; &quot;seasonality_mode&quot;: &quot;additive&quot;&#10; }&#10;&#10; with patch('app.api.training.get_current_tenant_id', return_value=&quot;test-tenant&quot;):&#10; response = await test_client.post(&#10; f&quot;/training/products/{product_name}&quot;,&#10; json=request_data&#10; )&#10;&#10; assert response.status_code == status.HTTP_200_OK&#10; data = response.json()&#10; assert &quot;job_id&quot; in data&#10;E fixture 'mock_data_service' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_api.py:331&quot;">file /app/tests/test_api.py, line 331
@pytest.mark.asyncio
async def test_train_single_product_special_characters(
self,
test_client: AsyncClient,
mock_messaging,
mock_ml_trainer,
mock_data_service
):
"""Test training product with special characters in name"""
product_name = "Pan Francés" # With accent
request_data = {
"include_weather": True,
"seasonality_mode": "additive"
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "job_id" in data
E fixture 'mock_data_service' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_api.py:331</error></testcase><testcase classname="tests.test_api.TestModelsAPI" name="test_list_models" time="0.028"><error message="failed on setup with &quot;file /app/tests/test_api.py, line 360&#10; @pytest.mark.asyncio&#10; async def test_list_models(&#10; self,&#10; test_client: AsyncClient,&#10; trained_model_in_db&#10; ):&#10; &quot;&quot;&quot;Test listing trained models&quot;&quot;&quot;&#10; with patch('app.api.models.get_current_tenant_id', return_value=&quot;test-tenant&quot;):&#10; response = await test_client.get(&quot;/models&quot;)&#10;&#10; # This endpoint might not exist yet, so we expect either 200 or 404&#10; assert response.status_code in [status.HTTP_200_OK, status.HTTP_404_NOT_FOUND]&#10;&#10; if response.status_code == status.HTTP_200_OK:&#10; data = response.json()&#10; assert isinstance(data, list)&#10;E fixture 'trained_model_in_db' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_api.py:360&quot;">file /app/tests/test_api.py, line 360
@pytest.mark.asyncio
async def test_list_models(
self,
test_client: AsyncClient,
trained_model_in_db
):
"""Test listing trained models"""
with patch('app.api.models.get_current_tenant_id', return_value="test-tenant"):
response = await test_client.get("/models")
# This endpoint might not exist yet, so we expect either 200 or 404
assert response.status_code in [status.HTTP_200_OK, status.HTTP_404_NOT_FOUND]
if response.status_code == status.HTTP_200_OK:
data = response.json()
assert isinstance(data, list)
E fixture 'trained_model_in_db' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_api.py:360</error></testcase><testcase classname="tests.test_api.TestModelsAPI" name="test_get_model_details" time="0.027"><error message="failed on setup with &quot;file /app/tests/test_api.py, line 377&#10; @pytest.mark.asyncio&#10; async def test_get_model_details(&#10; self,&#10; test_client: AsyncClient,&#10; trained_model_in_db&#10; ):&#10; &quot;&quot;&quot;Test getting model details&quot;&quot;&quot;&#10; model_id = trained_model_in_db.model_id&#10;&#10; with patch('app.api.models.get_current_tenant_id', return_value=&quot;test-tenant&quot;):&#10; response = await test_client.get(f&quot;/models/{model_id}&quot;)&#10;&#10; # This endpoint might not exist yet&#10; assert response.status_code in [&#10; status.HTTP_200_OK,&#10; status.HTTP_404_NOT_FOUND,&#10; status.HTTP_501_NOT_IMPLEMENTED&#10; ]&#10;E fixture 'trained_model_in_db' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_api.py:377&quot;">file /app/tests/test_api.py, line 377
@pytest.mark.asyncio
async def test_get_model_details(
self,
test_client: AsyncClient,
trained_model_in_db
):
"""Test getting model details"""
model_id = trained_model_in_db.model_id
with patch('app.api.models.get_current_tenant_id', return_value="test-tenant"):
response = await test_client.get(f"/models/{model_id}")
# This endpoint might not exist yet
assert response.status_code in [
status.HTTP_200_OK,
status.HTTP_404_NOT_FOUND,
status.HTTP_501_NOT_IMPLEMENTED
]
E fixture 'trained_model_in_db' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_api.py:377</error></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_database_error_handling" time="0.032"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:412: in test_database_error_handling
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_missing_tenant_id" time="0.028"><failure message="AttributeError: 'async_generator' object has no attribute 'post'">tests/test_api.py:427: in test_missing_tenant_id
response = await test_client.post("/training/jobs", json=request_data)
E AttributeError: 'async_generator' object has no attribute 'post'</failure></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_invalid_job_id_format" time="0.028"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:437: in test_invalid_job_id_format
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_messaging_failure_handling" time="0.026"><error message="failed on setup with &quot;file /app/tests/test_api.py, line 443&#10; @pytest.mark.asyncio&#10; async def test_messaging_failure_handling(&#10; self,&#10; test_client: AsyncClient,&#10; mock_data_service&#10; ):&#10; &quot;&quot;&quot;Test handling when messaging fails&quot;&quot;&quot;&#10; request_data = {&#10; &quot;include_weather&quot;: True,&#10; &quot;include_traffic&quot;: True,&#10; &quot;min_data_points&quot;: 30&#10; }&#10;&#10; with patch('app.services.messaging.publish_job_started', side_effect=Exception(&quot;Messaging failed&quot;)), \&#10; patch('app.api.training.get_current_tenant_id', return_value=&quot;test-tenant&quot;):&#10;&#10; response = await test_client.post(&quot;/training/jobs&quot;, json=request_data)&#10;&#10; # Should still succeed even if messaging fails&#10; assert response.status_code == status.HTTP_200_OK&#10; data = response.json()&#10; assert &quot;job_id&quot; in data&#10;E fixture 'mock_data_service' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_api.py:443&quot;">file /app/tests/test_api.py, line 443
@pytest.mark.asyncio
async def test_messaging_failure_handling(
self,
test_client: AsyncClient,
mock_data_service
):
"""Test handling when messaging fails"""
request_data = {
"include_weather": True,
"include_traffic": True,
"min_data_points": 30
}
with patch('app.services.messaging.publish_job_started', side_effect=Exception("Messaging failed")), \
patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
# Should still succeed even if messaging fails
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "job_id" in data
E fixture 'mock_data_service' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_api.py:443</error></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_invalid_json_payload" time="0.028"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:469: in test_invalid_json_payload
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_unsupported_content_type" time="0.028"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:481: in test_unsupported_content_type
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAuthenticationIntegration" name="test_endpoints_require_auth" time="0.027"><failure message="AttributeError: 'async_generator' object has no attribute 'post'">tests/test_api.py:512: in test_endpoints_require_auth
response = await test_client.post(endpoint, json={})
E AttributeError: 'async_generator' object has no attribute 'post'</failure></testcase><testcase classname="tests.test_api.TestAuthenticationIntegration" name="test_tenant_isolation_in_api" time="0.028"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestAPIValidation" name="test_training_request_validation" time="0.027"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:555: in test_training_request_validation
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIValidation" name="test_single_product_request_validation" time="0.038"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:591: in test_single_product_request_validation
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIValidation" name="test_query_parameter_validation" time="0.030"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:612: in test_query_parameter_validation
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIPerformance" name="test_concurrent_requests" time="0.031"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:643: in test_concurrent_requests
with patch('app.api.training.get_current_tenant_id', return_value=f"tenant-{i}"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIPerformance" name="test_large_payload_handling" time="0.030"><failure message="AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'">tests/test_api.py:665: in test_large_payload_handling
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
original, local = self.get_original()
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
raise AttributeError(
E AttributeError: &lt;module 'app.api.training' from '/app/app/api/training.py'&gt; does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIPerformance" name="test_rapid_successive_requests" time="0.030"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:681: in test_rapid_successive_requests
response = await test_client.get("/health")
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_prepare_training_data_basic" time="0.049" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_prepare_training_data_empty_weather" time="0.045" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_prepare_prediction_features" time="0.034" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_add_temporal_features" time="0.029" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_spanish_holiday_detection" time="0.026" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_prepare_training_data_insufficient_data" time="0.037"><failure message="Failed: DID NOT RAISE &lt;class 'Exception'&gt;">tests/test_ml.py:201: in test_prepare_training_data_insufficient_data
with pytest.raises(Exception):
E Failed: DID NOT RAISE &lt;class 'Exception'&gt;</failure></testcase><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_train_bakery_model_success" time="0.031"><failure message="AttributeError: 'TrainingSettings' object has no attribute 'PROPHET_DAILY_SEASONALITY'">tests/test_ml.py:239: in test_train_bakery_model_success
result = await prophet_manager.train_bakery_model(
app/ml/prophet_manager.py:70: in train_bakery_model
model = self._create_prophet_model(regressor_columns)
app/ml/prophet_manager.py:238: in _create_prophet_model
daily_seasonality=settings.PROPHET_DAILY_SEASONALITY,
/usr/local/lib/python3.11/site-packages/pydantic/main.py:761: in __getattr__
raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}')
E AttributeError: 'TrainingSettings' object has no attribute 'PROPHET_DAILY_SEASONALITY'</failure></testcase><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_validate_training_data_valid" time="0.028" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_validate_training_data_insufficient" time="0.027" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_validate_training_data_missing_columns" time="0.027" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_get_spanish_holidays" time="0.029" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_extract_regressor_columns" time="0.028" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_generate_forecast" time="0.028" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_train_tenant_models_success" time="0.048" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_train_single_product_success" time="0.041"><failure message="ValueError: Insufficient training data for Pan Integral: 3 days, minimum required: 30">tests/test_ml.py:414: in test_train_single_product_success
result = await ml_trainer.train_single_product(
app/ml/trainer.py:149: in train_single_product
model_info = await self.prophet_manager.train_bakery_model(
app/ml/prophet_manager.py:61: in train_bakery_model
await self._validate_training_data(df, product_name)
app/ml/prophet_manager.py:158: in _validate_training_data
raise ValueError(
E ValueError: Insufficient training data for Pan Integral: 3 days, minimum required: 30</failure></testcase><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_train_single_product_no_data" time="0.036"><failure message="KeyError: 'product_name'">tests/test_ml.py:438: in test_train_single_product_no_data
await ml_trainer.train_single_product(
app/ml/trainer.py:134: in train_single_product
product_sales = sales_df[sales_df['product_name'] == product_name].copy()
/usr/local/lib/python3.11/site-packages/pandas/core/frame.py:3893: in __getitem__
indexer = self.columns.get_loc(key)
/usr/local/lib/python3.11/site-packages/pandas/core/indexes/range.py:418: in get_loc
raise KeyError(key)
E KeyError: 'product_name'</failure></testcase><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_validate_input_data_valid" time="0.032" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_validate_input_data_empty" time="0.033" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_validate_input_data_missing_columns" time="0.038" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_calculate_training_summary" time="0.032" /><testcase classname="tests.test_ml.TestIntegrationML" name="test_end_to_end_training_flow" time="0.028"><skipped type="pytest.skip" message="Requires actual Prophet dependencies for integration test">/app/tests/test_ml.py:508: Requires actual Prophet dependencies for integration test</skipped></testcase><testcase classname="tests.test_ml.TestIntegrationML" name="test_data_pipeline_integration" time="0.028"><skipped type="pytest.skip" message="Requires actual dependencies for integration test">/app/tests/test_ml.py:513: Requires actual dependencies for integration test</skipped></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_create_training_job_success" time="0.030"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:52: in create_training_job
db.add(training_log)
E AttributeError: 'coroutine' object has no attribute 'add'
During handling of the above exception, another exception occurred:
tests/test_service.py:34: in test_create_training_job_success
result = await training_service.create_training_job(
app/services/training_service.py:61: in create_training_job
await db.rollback()
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_create_single_product_job_success" time="0.031"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:84: in create_single_product_job
db.add(training_log)
E AttributeError: 'coroutine' object has no attribute 'add'
During handling of the above exception, another exception occurred:
tests/test_service.py:60: in test_create_single_product_job_success
result = await training_service.create_single_product_job(
app/services/training_service.py:93: in create_single_product_job
await db.rollback()
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_get_job_status_existing" time="0.035"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_get_job_status_nonexistent" time="0.030" /><testcase classname="tests.test_service.TestTrainingService" name="test_list_training_jobs" time="0.031"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_list_training_jobs_with_filter" time="0.035"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_cancel_training_job_success" time="0.035"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_cancel_nonexistent_job" time="0.031"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:270: in cancel_training_job
result = await db.execute(
E AttributeError: 'coroutine' object has no attribute 'execute'
During handling of the above exception, another exception occurred:
tests/test_service.py:175: in test_cancel_nonexistent_job
result = await training_service.cancel_training_job(
app/services/training_service.py:297: in cancel_training_job
await db.rollback()
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_validate_training_data_valid" time="0.034"><error message="failed on setup with &quot;file /app/tests/test_service.py, line 183&#10; @pytest.mark.asyncio&#10; async def test_validate_training_data_valid(&#10; self,&#10; training_service,&#10; test_db_session,&#10; mock_data_service&#10; ):&#10; &quot;&quot;&quot;Test validation with valid data&quot;&quot;&quot;&#10; config = {&quot;min_data_points&quot;: 30}&#10;&#10; result = await training_service.validate_training_data(&#10; db=test_db_session,&#10; tenant_id=&quot;test-tenant&quot;,&#10; config=config&#10; )&#10;&#10; assert isinstance(result, dict)&#10; assert &quot;is_valid&quot; in result&#10; assert &quot;issues&quot; in result&#10; assert &quot;recommendations&quot; in result&#10; assert &quot;estimated_time_minutes&quot; in result&#10;E fixture 'mock_data_service' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_service.py:183&quot;">file /app/tests/test_service.py, line 183
@pytest.mark.asyncio
async def test_validate_training_data_valid(
self,
training_service,
test_db_session,
mock_data_service
):
"""Test validation with valid data"""
config = {"min_data_points": 30}
result = await training_service.validate_training_data(
db=test_db_session,
tenant_id="test-tenant",
config=config
)
assert isinstance(result, dict)
assert "is_valid" in result
assert "issues" in result
assert "recommendations" in result
assert "estimated_time_minutes" in result
E fixture 'mock_data_service' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_service.py:183</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_validate_training_data_no_data" time="0.031"><failure message="assert True is False">tests/test_service.py:221: in test_validate_training_data_no_data
assert result["is_valid"] is False
E assert True is False</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_update_job_status" time="0.035"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_store_trained_models" time="0.032"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:572: in _store_trained_models
await db.execute(
E AttributeError: 'coroutine' object has no attribute 'execute'
During handling of the above exception, another exception occurred:
tests/test_service.py:280: in test_store_trained_models
await training_service._store_trained_models(
app/services/training_service.py:592: in _store_trained_models
await db.rollback()
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_get_training_logs" time="0.033"><error message="failed on setup with &quot;TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog&quot;">tests/conftest.py:539: in training_job_in_db
job = ModelTrainingLog(
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
with util.safe_reraise():
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
raise exc_value.with_traceback(exc_tb)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
manager.original_init(*mixed[1:], **kwargs)
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
raise TypeError(
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_sales_data_success" time="0.031" /><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_sales_data_error" time="0.030" /><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_weather_data_success" time="0.040" /><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_traffic_data_success" time="0.033" /><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_data_with_date_filters" time="0.030" /><testcase classname="tests.test_service.TestTrainingServiceExecution" name="test_execute_training_job_success" time="0.030"><error message="failed on setup with &quot;file /app/tests/test_service.py, line 468&#10; @pytest.mark.asyncio&#10; async def test_execute_training_job_success(&#10; self,&#10; training_service,&#10; test_db_session,&#10; mock_messaging,&#10; mock_data_service&#10; ):&#10; &quot;&quot;&quot;Test successful training job execution&quot;&quot;&quot;&#10; # Create job first&#10; job_id = &quot;test-execution-job&quot;&#10; training_log = await training_service.create_training_job(&#10; db=test_db_session,&#10; tenant_id=&quot;test-tenant&quot;,&#10; job_id=job_id,&#10; config={&quot;include_weather&quot;: True}&#10; )&#10;&#10; request = TrainingJobRequest(&#10; include_weather=True,&#10; include_traffic=True,&#10; min_data_points=30&#10; )&#10;&#10; with patch('app.services.training_service.TrainingService._fetch_sales_data') as mock_fetch_sales, \&#10; patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \&#10; patch('app.services.training_service.TrainingService._fetch_traffic_data') as mock_fetch_traffic, \&#10; patch('app.services.training_service.TrainingService._store_trained_models') as mock_store:&#10;&#10; mock_fetch_sales.return_value = [{&quot;date&quot;: &quot;2024-01-01&quot;, &quot;product_name&quot;: &quot;Pan Integral&quot;, &quot;quantity&quot;: 45}]&#10; mock_fetch_weather.return_value = []&#10; mock_fetch_traffic.return_value = []&#10; mock_store.return_value = None&#10;&#10; await training_service.execute_training_job(&#10; db=test_db_session,&#10; job_id=job_id,&#10; tenant_id=&quot;test-tenant&quot;,&#10; request=request&#10; )&#10;&#10; # Verify job was completed&#10; updated_job = await training_service.get_job_status(&#10; db=test_db_session,&#10; job_id=job_id,&#10; tenant_id=&quot;test-tenant&quot;&#10; )&#10;&#10; assert updated_job.status == &quot;completed&quot;&#10; assert updated_job.progress == 100&#10;E fixture 'mock_data_service' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_service.py:468&quot;">file /app/tests/test_service.py, line 468
@pytest.mark.asyncio
async def test_execute_training_job_success(
self,
training_service,
test_db_session,
mock_messaging,
mock_data_service
):
"""Test successful training job execution"""
# Create job first
job_id = "test-execution-job"
training_log = await training_service.create_training_job(
db=test_db_session,
tenant_id="test-tenant",
job_id=job_id,
config={"include_weather": True}
)
request = TrainingJobRequest(
include_weather=True,
include_traffic=True,
min_data_points=30
)
with patch('app.services.training_service.TrainingService._fetch_sales_data') as mock_fetch_sales, \
patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \
patch('app.services.training_service.TrainingService._fetch_traffic_data') as mock_fetch_traffic, \
patch('app.services.training_service.TrainingService._store_trained_models') as mock_store:
mock_fetch_sales.return_value = [{"date": "2024-01-01", "product_name": "Pan Integral", "quantity": 45}]
mock_fetch_weather.return_value = []
mock_fetch_traffic.return_value = []
mock_store.return_value = None
await training_service.execute_training_job(
db=test_db_session,
job_id=job_id,
tenant_id="test-tenant",
request=request
)
# Verify job was completed
updated_job = await training_service.get_job_status(
db=test_db_session,
job_id=job_id,
tenant_id="test-tenant"
)
assert updated_job.status == "completed"
assert updated_job.progress == 100
E fixture 'mock_data_service' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_service.py:468</error></testcase><testcase classname="tests.test_service.TestTrainingServiceExecution" name="test_execute_training_job_failure" time="0.031"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:52: in create_training_job
db.add(training_log)
E AttributeError: 'coroutine' object has no attribute 'add'
During handling of the above exception, another exception occurred:
tests/test_service.py:529: in test_execute_training_job_failure
await training_service.create_training_job(
app/services/training_service.py:61: in create_training_job
await db.rollback()
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingServiceExecution" name="test_execute_single_product_training_success" time="0.031"><error message="failed on setup with &quot;file /app/tests/test_service.py, line 559&#10; @pytest.mark.asyncio&#10; async def test_execute_single_product_training_success(&#10; self,&#10; training_service,&#10; test_db_session,&#10; mock_messaging,&#10; mock_data_service&#10; ):&#10; &quot;&quot;&quot;Test successful single product training execution&quot;&quot;&quot;&#10; job_id = &quot;test-single-product-job&quot;&#10; product_name = &quot;Pan Integral&quot;&#10;&#10; await training_service.create_single_product_job(&#10; db=test_db_session,&#10; tenant_id=&quot;test-tenant&quot;,&#10; product_name=product_name,&#10; job_id=job_id,&#10; config={}&#10; )&#10;&#10; request = SingleProductTrainingRequest(&#10; include_weather=True,&#10; include_traffic=False&#10; )&#10;&#10; with patch('app.services.training_service.TrainingService._fetch_product_sales_data') as mock_fetch_sales, \&#10; patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \&#10; patch('app.services.training_service.TrainingService._store_single_trained_model') as mock_store:&#10;&#10; mock_fetch_sales.return_value = [{&quot;date&quot;: &quot;2024-01-01&quot;, &quot;product_name&quot;: product_name, &quot;quantity&quot;: 45}]&#10; mock_fetch_weather.return_value = []&#10; mock_store.return_value = None&#10;&#10; await training_service.execute_single_product_training(&#10; db=test_db_session,&#10; job_id=job_id,&#10; tenant_id=&quot;test-tenant&quot;,&#10; product_name=product_name,&#10; request=request&#10; )&#10;&#10; # Verify job was completed&#10; updated_job = await training_service.get_job_status(&#10; db=test_db_session,&#10; job_id=job_id,&#10; tenant_id=&quot;test-tenant&quot;&#10; )&#10;&#10; assert updated_job.status == &quot;completed&quot;&#10; assert updated_job.progress == 100&#10;E fixture 'mock_data_service' not found&#10;&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory&#10;&gt; use 'pytest --fixtures [testpath]' for help on them.&#10;&#10;/app/tests/test_service.py:559&quot;">file /app/tests/test_service.py, line 559
@pytest.mark.asyncio
async def test_execute_single_product_training_success(
self,
training_service,
test_db_session,
mock_messaging,
mock_data_service
):
"""Test successful single product training execution"""
job_id = "test-single-product-job"
product_name = "Pan Integral"
await training_service.create_single_product_job(
db=test_db_session,
tenant_id="test-tenant",
product_name=product_name,
job_id=job_id,
config={}
)
request = SingleProductTrainingRequest(
include_weather=True,
include_traffic=False
)
with patch('app.services.training_service.TrainingService._fetch_product_sales_data') as mock_fetch_sales, \
patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \
patch('app.services.training_service.TrainingService._store_single_trained_model') as mock_store:
mock_fetch_sales.return_value = [{"date": "2024-01-01", "product_name": product_name, "quantity": 45}]
mock_fetch_weather.return_value = []
mock_store.return_value = None
await training_service.execute_single_product_training(
db=test_db_session,
job_id=job_id,
tenant_id="test-tenant",
product_name=product_name,
request=request
)
# Verify job was completed
updated_job = await training_service.get_job_status(
db=test_db_session,
job_id=job_id,
tenant_id="test-tenant"
)
assert updated_job.status == "completed"
assert updated_job.progress == 100
E fixture 'mock_data_service' not found
&gt; available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
&gt; use 'pytest --fixtures [testpath]' for help on them.
/app/tests/test_service.py:559</error></testcase><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_database_connection_failure" time="0.029" /><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_external_service_timeout" time="0.030" /><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_concurrent_job_creation" time="0.028"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:52: in create_training_job
db.add(training_log)
E AttributeError: 'coroutine' object has no attribute 'add'
During handling of the above exception, another exception occurred:
tests/test_service.py:660: in test_concurrent_job_creation
job = await training_service.create_training_job(
app/services/training_service.py:61: in create_training_job
await db.rollback()
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_malformed_config_handling" time="0.001"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:52: in create_training_job
db.add(training_log)
E AttributeError: 'coroutine' object has no attribute 'add'
During handling of the above exception, another exception occurred:
tests/test_service.py:681: in test_malformed_config_handling
job = await training_service.create_training_job(
app/services/training_service.py:61: in create_training_job
await db.rollback()
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_malformed_config_handling" time="0.029"><error message="failed on teardown with &quot;TypeError: 'str' object is not callable&quot;">tests/conftest.py:464: in setup_test_environment
os.environ.pop(var, None)(scope="session")
E TypeError: 'str' object is not callable</error></testcase></testsuite></testsuites>

File diff suppressed because one or more lines are too long

View File

@@ -29,7 +29,8 @@ class TestTrainingAPI:
async def test_readiness_check_ready(self, test_client: AsyncClient):
"""Test readiness check when service is ready"""
# Mock app state as ready
with patch('app.main.app.state.ready', True):
from app.main import app # Add import at top
with patch.object(app.state, 'ready', True, create=True):
response = await test_client.get("/health/ready")
assert response.status_code == status.HTTP_200_OK
@@ -117,7 +118,7 @@ class TestTrainingJobsAPI:
"seasonality_mode": "additive"
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == status.HTTP_200_OK
@@ -136,7 +137,7 @@ class TestTrainingJobsAPI:
"min_data_points": 5 # Too low
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@@ -150,7 +151,7 @@ class TestTrainingJobsAPI:
"""Test getting status of existing training job"""
job_id = training_job_in_db.job_id
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == status.HTTP_200_OK
@@ -164,7 +165,7 @@ class TestTrainingJobsAPI:
@pytest.mark.asyncio
async def test_get_training_status_nonexistent_job(self, test_client: AsyncClient):
"""Test getting status of non-existent training job"""
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get("/training/jobs/nonexistent-job/status")
assert response.status_code == status.HTTP_404_NOT_FOUND
@@ -176,7 +177,7 @@ class TestTrainingJobsAPI:
training_job_in_db
):
"""Test listing training jobs"""
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get("/training/jobs")
assert response.status_code == status.HTTP_200_OK
@@ -198,7 +199,7 @@ class TestTrainingJobsAPI:
training_job_in_db
):
"""Test listing training jobs with status filter"""
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get("/training/jobs?status=pending")
assert response.status_code == status.HTTP_200_OK
@@ -219,7 +220,7 @@ class TestTrainingJobsAPI:
"""Test cancelling a training job successfully"""
job_id = training_job_in_db.job_id
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(f"/training/jobs/{job_id}/cancel")
assert response.status_code == status.HTTP_200_OK
@@ -230,7 +231,7 @@ class TestTrainingJobsAPI:
@pytest.mark.asyncio
async def test_cancel_nonexistent_job(self, test_client: AsyncClient):
"""Test cancelling a non-existent training job"""
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs/nonexistent-job/cancel")
assert response.status_code == status.HTTP_404_NOT_FOUND
@@ -244,7 +245,7 @@ class TestTrainingJobsAPI:
"""Test getting training logs"""
job_id = training_job_in_db.job_id
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/logs")
assert response.status_code == status.HTTP_200_OK
@@ -267,7 +268,7 @@ class TestTrainingJobsAPI:
"min_data_points": 30
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/validate", json=request_data)
assert response.status_code == status.HTTP_200_OK
@@ -298,7 +299,7 @@ class TestSingleProductTrainingAPI:
"seasonality_mode": "additive"
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
@@ -320,7 +321,7 @@ class TestSingleProductTrainingAPI:
"seasonality_mode": "invalid_mode" # Invalid value
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
@@ -343,7 +344,7 @@ class TestSingleProductTrainingAPI:
"seasonality_mode": "additive"
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
@@ -409,7 +410,7 @@ class TestErrorHandling:
"min_data_points": 30
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
@@ -434,7 +435,7 @@ class TestErrorHandling:
"""Test handling of invalid job ID format"""
invalid_job_id = "invalid-job-id-with-special-chars@#$"
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{invalid_job_id}/status")
# Should handle gracefully
@@ -454,7 +455,7 @@ class TestErrorHandling:
}
with patch('app.services.messaging.publish_job_started', side_effect=Exception("Messaging failed")), \
patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
@@ -466,7 +467,7 @@ class TestErrorHandling:
@pytest.mark.asyncio
async def test_invalid_json_payload(self, test_client: AsyncClient):
"""Test handling of invalid JSON payload"""
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
"/training/jobs",
content="invalid json {{{",
@@ -478,7 +479,7 @@ class TestErrorHandling:
@pytest.mark.asyncio
async def test_unsupported_content_type(self, test_client: AsyncClient):
"""Test handling of unsupported content type"""
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
"/training/jobs",
content="some text data",
@@ -552,7 +553,7 @@ class TestAPIValidation:
"yearly_seasonality": True
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=valid_request)
assert response.status_code == status.HTTP_200_OK
@@ -561,7 +562,7 @@ class TestAPIValidation:
invalid_request = valid_request.copy()
invalid_request["seasonality_mode"] = "invalid_mode"
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=invalid_request)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@@ -570,7 +571,7 @@ class TestAPIValidation:
invalid_request = valid_request.copy()
invalid_request["min_data_points"] = 5 # Too low
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=invalid_request)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@@ -588,7 +589,7 @@ class TestAPIValidation:
"seasonality_mode": "multiplicative"
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=valid_request
@@ -597,7 +598,7 @@ class TestAPIValidation:
assert response.status_code == status.HTTP_200_OK
# Test empty product name
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
"/training/products/",
json=valid_request
@@ -609,7 +610,7 @@ class TestAPIValidation:
async def test_query_parameter_validation(self, test_client: AsyncClient):
"""Test query parameter validation"""
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
# Test valid limit parameter
response = await test_client.get("/training/jobs?limit=5")
assert response.status_code == status.HTTP_200_OK
@@ -662,7 +663,7 @@ class TestAPIPerformance:
"large_config": {f"key_{i}": f"value_{i}" for i in range(1000)}
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=large_request)
# Should handle large payload gracefully

View File

@@ -36,7 +36,7 @@ class TestTrainingWorkflowIntegration:
"seasonality_mode": "additive"
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == 200
@@ -44,7 +44,7 @@ class TestTrainingWorkflowIntegration:
job_id = job_data["job_id"]
# Step 2: Check initial status
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == 200
@@ -56,7 +56,7 @@ class TestTrainingWorkflowIntegration:
await asyncio.sleep(0.1) # Allow background task to start
# Step 4: Check completion status
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
# The job should exist in database even if not completed yet
@@ -80,7 +80,7 @@ class TestTrainingWorkflowIntegration:
}
# Start single product training
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(
f"/training/products/{product_name}",
json=request_data
@@ -92,7 +92,7 @@ class TestTrainingWorkflowIntegration:
assert f"training started for {product_name}" in job_data["message"].lower()
# Check job status
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == 200
@@ -114,7 +114,7 @@ class TestTrainingWorkflowIntegration:
}
# Validate training data
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/validate", json=request_data)
assert response.status_code == 200
@@ -127,7 +127,7 @@ class TestTrainingWorkflowIntegration:
# If validation passes, start actual training
if validation_data["is_valid"]:
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
assert response.status_code == 200
@@ -144,7 +144,7 @@ class TestTrainingWorkflowIntegration:
job_id = training_job_in_db.job_id
# Check initial status
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == 200
@@ -152,7 +152,7 @@ class TestTrainingWorkflowIntegration:
assert initial_status["status"] == "pending"
# Cancel the job
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post(f"/training/jobs/{job_id}/cancel")
assert response.status_code == 200
@@ -160,7 +160,7 @@ class TestTrainingWorkflowIntegration:
assert "cancelled" in cancel_response["message"].lower()
# Verify cancellation
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{job_id}/status")
assert response.status_code == 200
@@ -267,7 +267,7 @@ class TestErrorHandlingIntegration:
with patch('httpx.AsyncClient') as mock_client:
mock_client.return_value.__aenter__.return_value.get.side_effect = Exception("Service unavailable")
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
# Should still create job but might fail during execution
@@ -289,7 +289,7 @@ class TestErrorHandlingIntegration:
# Mock messaging failure
with patch('app.services.messaging.publish_job_started', side_effect=Exception("Messaging failed")):
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
# Should still succeed even if messaging fails
@@ -312,7 +312,7 @@ class TestErrorHandlingIntegration:
# Mock ML training failure
with patch('app.ml.trainer.BakeryMLTrainer.train_tenant_models', side_effect=Exception("ML training failed")):
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=request_data)
# Job should be created successfully
@@ -394,7 +394,7 @@ class TestPerformanceIntegration:
# Make many rapid status requests
tasks = []
for _ in range(20):
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
task = test_client.get(f"/training/jobs/{job_id}/status")
tasks.append(task)
@@ -439,7 +439,7 @@ class TestSecurityIntegration:
"min_data_points": -5 # Invalid negative value
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=invalid_request)
assert response.status_code == 422 # Validation error
@@ -454,7 +454,7 @@ class TestSecurityIntegration:
# Try SQL injection in job ID
malicious_job_id = "job'; DROP TABLE model_training_logs; --"
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.get(f"/training/jobs/{malicious_job_id}/status")
# Should return 404, not cause database error
@@ -801,7 +801,7 @@ class TestBackwardCompatibility:
"include_weather": True
}
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
with patch('shared.auth.decorators.get_current_tenant_id_dep', return_value="test-tenant"):
response = await test_client.post("/training/jobs", json=minimal_request)
# Should work with defaults for missing fields

View File

@@ -25,13 +25,16 @@ class TestBakeryDataProcessor:
@pytest.fixture
def sample_sales_data(self):
"""Create sample sales data"""
dates = pd.date_range('2024-01-01', periods=60, freq='D')
return pd.DataFrame({
'date': dates,
'product_name': ['Pan Integral'] * 60,
'quantity': [45 + np.random.randint(-10, 11) for _ in range(60)]
})
"""Provide sufficient data for ML training tests"""
dates = pd.date_range('2024-01-01', periods=35, freq='D') # 35 days > 30 minimum
data = []
for date in dates:
data.append({
'date': date,
'product_name': 'Pan Integral', # Ensure this column exists
'quantity': 40 + (5 * np.sin(date.dayofyear / 365 * 2 * np.pi)) # Seasonal pattern
})
return pd.DataFrame(data)
@pytest.fixture
def sample_weather_data(self):