Fix generating pytest for training service
This commit is contained in:
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,5 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="2" failures="0" skipped="0" tests="2" time="1.455" timestamp="2025-07-25T11:22:45.219619" hostname="543df414761a"><testcase classname="tests.test_end_to_end.TestTrainingServiceEndToEnd" name="test_complete_training_workflow_api" time="0.034"><error message="failed on setup with "UnboundLocalError: cannot access local variable 'np' where it is not associated with a value"">tests/test_end_to_end.py:75: in real_bakery_data
|
||||
temp = 15 + 12 * np.sin((date.timetuple().tm_yday / 365) * 2 * np.pi)
|
||||
E UnboundLocalError: cannot access local variable 'np' where it is not associated with a value</error><error message="failed on teardown with "TypeError: 'str' object is not callable"">tests/conftest.py:464: in setup_test_environment
|
||||
os.environ.pop(var, None)(scope="session")
|
||||
E TypeError: 'str' object is not callable</error></testcase></testsuite></testsuites>
|
||||
@@ -1 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="0" failures="0" skipped="0" tests="0" time="0.204" timestamp="2025-07-25T11:22:43.995108" hostname="543df414761a" /></testsuites>
|
||||
@@ -1,8 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="1" failures="0" skipped="0" tests="1" time="0.238" timestamp="2025-07-25T11:22:44.599099" hostname="543df414761a"><testcase classname="" name="tests.test_performance" time="0.000"><error message="collection failure">ImportError while importing test module '/app/tests/test_performance.py'.
|
||||
Hint: make sure your test modules/packages have valid Python names.
|
||||
Traceback:
|
||||
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
|
||||
return _bootstrap._gcd_import(name[level:], package, level)
|
||||
tests/test_performance.py:16: in <module>
|
||||
import psutil
|
||||
E ModuleNotFoundError: No module named 'psutil'</error></testcase></testsuite></testsuites>
|
||||
@@ -1,649 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="23" failures="35" skipped="2" tests="83" time="5.714" timestamp="2025-07-25T11:22:37.801499" hostname="543df414761a"><testcase classname="tests.test_api.TestTrainingAPI" name="test_health_check" time="0.030"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:20: in test_health_check
|
||||
response = await test_client.get("/health")
|
||||
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_readiness_check_ready" time="0.069"><failure message="AttributeError: <starlette.datastructures.State object at 0xffff5ae06a10> does not have the attribute 'ready'">tests/test_api.py:32: in test_readiness_check_ready
|
||||
with patch('app.main.app.state.ready', True):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <starlette.datastructures.State object at 0xffff5ae06a10> does not have the attribute 'ready'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_readiness_check_not_ready" time="0.030"><failure message="AttributeError: <starlette.datastructures.State object at 0xffff5ae06a10> does not have the attribute 'ready'">tests/test_api.py:42: in test_readiness_check_not_ready
|
||||
with patch('app.main.app.state.ready', False):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <starlette.datastructures.State object at 0xffff5ae06a10> does not have the attribute 'ready'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_liveness_check_healthy" time="0.028"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:53: in test_liveness_check_healthy
|
||||
response = await test_client.get("/health/live")
|
||||
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_liveness_check_unhealthy" time="0.027"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:63: in test_liveness_check_unhealthy
|
||||
response = await test_client.get("/health/live")
|
||||
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_metrics_endpoint" time="0.027"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:73: in test_metrics_endpoint
|
||||
response = await test_client.get("/metrics")
|
||||
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingAPI" name="test_root_endpoint" time="0.026"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:92: in test_root_endpoint
|
||||
response = await test_client.get("/")
|
||||
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_start_training_job_success" time="0.029"><error message="failed on setup with "file /app/tests/test_api.py, line 104 @pytest.mark.asyncio async def test_start_training_job_success( self, test_client: AsyncClient, mock_messaging, mock_ml_trainer, mock_data_service ): """Test starting a training job successfully""" request_data = { "include_weather": True, "include_traffic": True, "min_data_points": 30, "seasonality_mode": "additive" } with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"): response = await test_client.post("/training/jobs", json=request_data) assert response.status_code == status.HTTP_200_OK data = response.json() assert "job_id" in data assert data["status"] == "started" assert data["tenant_id"] == "test-tenant" assert "estimated_duration_minutes" in data E fixture 'mock_data_service' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_api.py:104"">file /app/tests/test_api.py, line 104
|
||||
@pytest.mark.asyncio
|
||||
async def test_start_training_job_success(
|
||||
self,
|
||||
test_client: AsyncClient,
|
||||
mock_messaging,
|
||||
mock_ml_trainer,
|
||||
mock_data_service
|
||||
):
|
||||
"""Test starting a training job successfully"""
|
||||
request_data = {
|
||||
"include_weather": True,
|
||||
"include_traffic": True,
|
||||
"min_data_points": 30,
|
||||
"seasonality_mode": "additive"
|
||||
}
|
||||
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
response = await test_client.post("/training/jobs", json=request_data)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()
|
||||
|
||||
assert "job_id" in data
|
||||
assert data["status"] == "started"
|
||||
assert data["tenant_id"] == "test-tenant"
|
||||
assert "estimated_duration_minutes" in data
|
||||
E fixture 'mock_data_service' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_api.py:104</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_start_training_job_validation_error" time="0.027"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:139: in test_start_training_job_validation_error
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_get_training_status_existing_job" time="0.031"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_get_training_status_nonexistent_job" time="0.027"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:167: in test_get_training_status_nonexistent_job
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_list_training_jobs" time="0.028"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_list_training_jobs_with_status_filter" time="0.028"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_cancel_training_job_success" time="0.031"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_cancel_nonexistent_job" time="0.031"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:233: in test_cancel_nonexistent_job
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_get_training_logs" time="0.032"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestTrainingJobsAPI" name="test_validate_training_data_valid" time="0.028"><error message="failed on setup with "file /app/tests/test_api.py, line 257 @pytest.mark.asyncio async def test_validate_training_data_valid( self, test_client: AsyncClient, mock_data_service ): """Test validating valid training data""" request_data = { "include_weather": True, "include_traffic": True, "min_data_points": 30 } with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"): response = await test_client.post("/training/validate", json=request_data) assert response.status_code == status.HTTP_200_OK data = response.json() assert "is_valid" in data assert "issues" in data assert "recommendations" in data assert "estimated_training_time" in data E fixture 'mock_data_service' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_api.py:257"">file /app/tests/test_api.py, line 257
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_training_data_valid(
|
||||
self,
|
||||
test_client: AsyncClient,
|
||||
mock_data_service
|
||||
):
|
||||
"""Test validating valid training data"""
|
||||
request_data = {
|
||||
"include_weather": True,
|
||||
"include_traffic": True,
|
||||
"min_data_points": 30
|
||||
}
|
||||
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
response = await test_client.post("/training/validate", json=request_data)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()
|
||||
|
||||
assert "is_valid" in data
|
||||
assert "issues" in data
|
||||
assert "recommendations" in data
|
||||
assert "estimated_training_time" in data
|
||||
E fixture 'mock_data_service' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_api.py:257</error></testcase><testcase classname="tests.test_api.TestSingleProductTrainingAPI" name="test_train_single_product_success" time="0.033"><error message="failed on setup with "file /app/tests/test_api.py, line 285 @pytest.mark.asyncio async def test_train_single_product_success( self, test_client: AsyncClient, mock_messaging, mock_ml_trainer, mock_data_service ): """Test training a single product successfully""" product_name = "Pan Integral" request_data = { "include_weather": True, "include_traffic": True, "seasonality_mode": "additive" } with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"): response = await test_client.post( f"/training/products/{product_name}", json=request_data ) assert response.status_code == status.HTTP_200_OK data = response.json() assert "job_id" in data assert data["status"] == "started" assert data["tenant_id"] == "test-tenant" assert f"training started for {product_name}" in data["message"].lower() E fixture 'mock_data_service' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_api.py:285"">file /app/tests/test_api.py, line 285
|
||||
@pytest.mark.asyncio
|
||||
async def test_train_single_product_success(
|
||||
self,
|
||||
test_client: AsyncClient,
|
||||
mock_messaging,
|
||||
mock_ml_trainer,
|
||||
mock_data_service
|
||||
):
|
||||
"""Test training a single product successfully"""
|
||||
product_name = "Pan Integral"
|
||||
request_data = {
|
||||
"include_weather": True,
|
||||
"include_traffic": True,
|
||||
"seasonality_mode": "additive"
|
||||
}
|
||||
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
response = await test_client.post(
|
||||
f"/training/products/{product_name}",
|
||||
json=request_data
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()
|
||||
|
||||
assert "job_id" in data
|
||||
assert data["status"] == "started"
|
||||
assert data["tenant_id"] == "test-tenant"
|
||||
assert f"training started for {product_name}" in data["message"].lower()
|
||||
E fixture 'mock_data_service' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_api.py:285</error></testcase><testcase classname="tests.test_api.TestSingleProductTrainingAPI" name="test_train_single_product_validation_error" time="0.033"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:323: in test_train_single_product_validation_error
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestSingleProductTrainingAPI" name="test_train_single_product_special_characters" time="0.030"><error message="failed on setup with "file /app/tests/test_api.py, line 331 @pytest.mark.asyncio async def test_train_single_product_special_characters( self, test_client: AsyncClient, mock_messaging, mock_ml_trainer, mock_data_service ): """Test training product with special characters in name""" product_name = "Pan Francés" # With accent request_data = { "include_weather": True, "seasonality_mode": "additive" } with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"): response = await test_client.post( f"/training/products/{product_name}", json=request_data ) assert response.status_code == status.HTTP_200_OK data = response.json() assert "job_id" in data E fixture 'mock_data_service' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_api.py:331"">file /app/tests/test_api.py, line 331
|
||||
@pytest.mark.asyncio
|
||||
async def test_train_single_product_special_characters(
|
||||
self,
|
||||
test_client: AsyncClient,
|
||||
mock_messaging,
|
||||
mock_ml_trainer,
|
||||
mock_data_service
|
||||
):
|
||||
"""Test training product with special characters in name"""
|
||||
product_name = "Pan Francés" # With accent
|
||||
request_data = {
|
||||
"include_weather": True,
|
||||
"seasonality_mode": "additive"
|
||||
}
|
||||
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
response = await test_client.post(
|
||||
f"/training/products/{product_name}",
|
||||
json=request_data
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()
|
||||
assert "job_id" in data
|
||||
E fixture 'mock_data_service' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_api.py:331</error></testcase><testcase classname="tests.test_api.TestModelsAPI" name="test_list_models" time="0.028"><error message="failed on setup with "file /app/tests/test_api.py, line 360 @pytest.mark.asyncio async def test_list_models( self, test_client: AsyncClient, trained_model_in_db ): """Test listing trained models""" with patch('app.api.models.get_current_tenant_id', return_value="test-tenant"): response = await test_client.get("/models") # This endpoint might not exist yet, so we expect either 200 or 404 assert response.status_code in [status.HTTP_200_OK, status.HTTP_404_NOT_FOUND] if response.status_code == status.HTTP_200_OK: data = response.json() assert isinstance(data, list) E fixture 'trained_model_in_db' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_api.py:360"">file /app/tests/test_api.py, line 360
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_models(
|
||||
self,
|
||||
test_client: AsyncClient,
|
||||
trained_model_in_db
|
||||
):
|
||||
"""Test listing trained models"""
|
||||
with patch('app.api.models.get_current_tenant_id', return_value="test-tenant"):
|
||||
response = await test_client.get("/models")
|
||||
|
||||
# This endpoint might not exist yet, so we expect either 200 or 404
|
||||
assert response.status_code in [status.HTTP_200_OK, status.HTTP_404_NOT_FOUND]
|
||||
|
||||
if response.status_code == status.HTTP_200_OK:
|
||||
data = response.json()
|
||||
assert isinstance(data, list)
|
||||
E fixture 'trained_model_in_db' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_api.py:360</error></testcase><testcase classname="tests.test_api.TestModelsAPI" name="test_get_model_details" time="0.027"><error message="failed on setup with "file /app/tests/test_api.py, line 377 @pytest.mark.asyncio async def test_get_model_details( self, test_client: AsyncClient, trained_model_in_db ): """Test getting model details""" model_id = trained_model_in_db.model_id with patch('app.api.models.get_current_tenant_id', return_value="test-tenant"): response = await test_client.get(f"/models/{model_id}") # This endpoint might not exist yet assert response.status_code in [ status.HTTP_200_OK, status.HTTP_404_NOT_FOUND, status.HTTP_501_NOT_IMPLEMENTED ] E fixture 'trained_model_in_db' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_api.py:377"">file /app/tests/test_api.py, line 377
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_model_details(
|
||||
self,
|
||||
test_client: AsyncClient,
|
||||
trained_model_in_db
|
||||
):
|
||||
"""Test getting model details"""
|
||||
model_id = trained_model_in_db.model_id
|
||||
|
||||
with patch('app.api.models.get_current_tenant_id', return_value="test-tenant"):
|
||||
response = await test_client.get(f"/models/{model_id}")
|
||||
|
||||
# This endpoint might not exist yet
|
||||
assert response.status_code in [
|
||||
status.HTTP_200_OK,
|
||||
status.HTTP_404_NOT_FOUND,
|
||||
status.HTTP_501_NOT_IMPLEMENTED
|
||||
]
|
||||
E fixture 'trained_model_in_db' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_api.py:377</error></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_database_error_handling" time="0.032"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:412: in test_database_error_handling
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_missing_tenant_id" time="0.028"><failure message="AttributeError: 'async_generator' object has no attribute 'post'">tests/test_api.py:427: in test_missing_tenant_id
|
||||
response = await test_client.post("/training/jobs", json=request_data)
|
||||
E AttributeError: 'async_generator' object has no attribute 'post'</failure></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_invalid_job_id_format" time="0.028"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:437: in test_invalid_job_id_format
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_messaging_failure_handling" time="0.026"><error message="failed on setup with "file /app/tests/test_api.py, line 443 @pytest.mark.asyncio async def test_messaging_failure_handling( self, test_client: AsyncClient, mock_data_service ): """Test handling when messaging fails""" request_data = { "include_weather": True, "include_traffic": True, "min_data_points": 30 } with patch('app.services.messaging.publish_job_started', side_effect=Exception("Messaging failed")), \ patch('app.api.training.get_current_tenant_id', return_value="test-tenant"): response = await test_client.post("/training/jobs", json=request_data) # Should still succeed even if messaging fails assert response.status_code == status.HTTP_200_OK data = response.json() assert "job_id" in data E fixture 'mock_data_service' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_api.py:443"">file /app/tests/test_api.py, line 443
|
||||
@pytest.mark.asyncio
|
||||
async def test_messaging_failure_handling(
|
||||
self,
|
||||
test_client: AsyncClient,
|
||||
mock_data_service
|
||||
):
|
||||
"""Test handling when messaging fails"""
|
||||
request_data = {
|
||||
"include_weather": True,
|
||||
"include_traffic": True,
|
||||
"min_data_points": 30
|
||||
}
|
||||
|
||||
with patch('app.services.messaging.publish_job_started', side_effect=Exception("Messaging failed")), \
|
||||
patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
|
||||
response = await test_client.post("/training/jobs", json=request_data)
|
||||
|
||||
# Should still succeed even if messaging fails
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()
|
||||
assert "job_id" in data
|
||||
E fixture 'mock_data_service' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_api.py:443</error></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_invalid_json_payload" time="0.028"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:469: in test_invalid_json_payload
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestErrorHandling" name="test_unsupported_content_type" time="0.028"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:481: in test_unsupported_content_type
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAuthenticationIntegration" name="test_endpoints_require_auth" time="0.027"><failure message="AttributeError: 'async_generator' object has no attribute 'post'">tests/test_api.py:512: in test_endpoints_require_auth
|
||||
response = await test_client.post(endpoint, json={})
|
||||
E AttributeError: 'async_generator' object has no attribute 'post'</failure></testcase><testcase classname="tests.test_api.TestAuthenticationIntegration" name="test_tenant_isolation_in_api" time="0.028"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_api.TestAPIValidation" name="test_training_request_validation" time="0.027"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:555: in test_training_request_validation
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIValidation" name="test_single_product_request_validation" time="0.038"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:591: in test_single_product_request_validation
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIValidation" name="test_query_parameter_validation" time="0.030"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:612: in test_query_parameter_validation
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIPerformance" name="test_concurrent_requests" time="0.031"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:643: in test_concurrent_requests
|
||||
with patch('app.api.training.get_current_tenant_id', return_value=f"tenant-{i}"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIPerformance" name="test_large_payload_handling" time="0.030"><failure message="AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'">tests/test_api.py:665: in test_large_payload_handling
|
||||
with patch('app.api.training.get_current_tenant_id', return_value="test-tenant"):
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1446: in __enter__
|
||||
original, local = self.get_original()
|
||||
/usr/local/lib/python3.11/unittest/mock.py:1419: in get_original
|
||||
raise AttributeError(
|
||||
E AttributeError: <module 'app.api.training' from '/app/app/api/training.py'> does not have the attribute 'get_current_tenant_id'</failure></testcase><testcase classname="tests.test_api.TestAPIPerformance" name="test_rapid_successive_requests" time="0.030"><failure message="AttributeError: 'async_generator' object has no attribute 'get'">tests/test_api.py:681: in test_rapid_successive_requests
|
||||
response = await test_client.get("/health")
|
||||
E AttributeError: 'async_generator' object has no attribute 'get'</failure></testcase><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_prepare_training_data_basic" time="0.049" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_prepare_training_data_empty_weather" time="0.045" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_prepare_prediction_features" time="0.034" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_add_temporal_features" time="0.029" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_spanish_holiday_detection" time="0.026" /><testcase classname="tests.test_ml.TestBakeryDataProcessor" name="test_prepare_training_data_insufficient_data" time="0.037"><failure message="Failed: DID NOT RAISE <class 'Exception'>">tests/test_ml.py:201: in test_prepare_training_data_insufficient_data
|
||||
with pytest.raises(Exception):
|
||||
E Failed: DID NOT RAISE <class 'Exception'></failure></testcase><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_train_bakery_model_success" time="0.031"><failure message="AttributeError: 'TrainingSettings' object has no attribute 'PROPHET_DAILY_SEASONALITY'">tests/test_ml.py:239: in test_train_bakery_model_success
|
||||
result = await prophet_manager.train_bakery_model(
|
||||
app/ml/prophet_manager.py:70: in train_bakery_model
|
||||
model = self._create_prophet_model(regressor_columns)
|
||||
app/ml/prophet_manager.py:238: in _create_prophet_model
|
||||
daily_seasonality=settings.PROPHET_DAILY_SEASONALITY,
|
||||
/usr/local/lib/python3.11/site-packages/pydantic/main.py:761: in __getattr__
|
||||
raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}')
|
||||
E AttributeError: 'TrainingSettings' object has no attribute 'PROPHET_DAILY_SEASONALITY'</failure></testcase><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_validate_training_data_valid" time="0.028" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_validate_training_data_insufficient" time="0.027" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_validate_training_data_missing_columns" time="0.027" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_get_spanish_holidays" time="0.029" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_extract_regressor_columns" time="0.028" /><testcase classname="tests.test_ml.TestBakeryProphetManager" name="test_generate_forecast" time="0.028" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_train_tenant_models_success" time="0.048" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_train_single_product_success" time="0.041"><failure message="ValueError: Insufficient training data for Pan Integral: 3 days, minimum required: 30">tests/test_ml.py:414: in test_train_single_product_success
|
||||
result = await ml_trainer.train_single_product(
|
||||
app/ml/trainer.py:149: in train_single_product
|
||||
model_info = await self.prophet_manager.train_bakery_model(
|
||||
app/ml/prophet_manager.py:61: in train_bakery_model
|
||||
await self._validate_training_data(df, product_name)
|
||||
app/ml/prophet_manager.py:158: in _validate_training_data
|
||||
raise ValueError(
|
||||
E ValueError: Insufficient training data for Pan Integral: 3 days, minimum required: 30</failure></testcase><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_train_single_product_no_data" time="0.036"><failure message="KeyError: 'product_name'">tests/test_ml.py:438: in test_train_single_product_no_data
|
||||
await ml_trainer.train_single_product(
|
||||
app/ml/trainer.py:134: in train_single_product
|
||||
product_sales = sales_df[sales_df['product_name'] == product_name].copy()
|
||||
/usr/local/lib/python3.11/site-packages/pandas/core/frame.py:3893: in __getitem__
|
||||
indexer = self.columns.get_loc(key)
|
||||
/usr/local/lib/python3.11/site-packages/pandas/core/indexes/range.py:418: in get_loc
|
||||
raise KeyError(key)
|
||||
E KeyError: 'product_name'</failure></testcase><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_validate_input_data_valid" time="0.032" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_validate_input_data_empty" time="0.033" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_validate_input_data_missing_columns" time="0.038" /><testcase classname="tests.test_ml.TestBakeryMLTrainer" name="test_calculate_training_summary" time="0.032" /><testcase classname="tests.test_ml.TestIntegrationML" name="test_end_to_end_training_flow" time="0.028"><skipped type="pytest.skip" message="Requires actual Prophet dependencies for integration test">/app/tests/test_ml.py:508: Requires actual Prophet dependencies for integration test</skipped></testcase><testcase classname="tests.test_ml.TestIntegrationML" name="test_data_pipeline_integration" time="0.028"><skipped type="pytest.skip" message="Requires actual dependencies for integration test">/app/tests/test_ml.py:513: Requires actual dependencies for integration test</skipped></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_create_training_job_success" time="0.030"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:52: in create_training_job
|
||||
db.add(training_log)
|
||||
E AttributeError: 'coroutine' object has no attribute 'add'
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
tests/test_service.py:34: in test_create_training_job_success
|
||||
result = await training_service.create_training_job(
|
||||
app/services/training_service.py:61: in create_training_job
|
||||
await db.rollback()
|
||||
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_create_single_product_job_success" time="0.031"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:84: in create_single_product_job
|
||||
db.add(training_log)
|
||||
E AttributeError: 'coroutine' object has no attribute 'add'
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
tests/test_service.py:60: in test_create_single_product_job_success
|
||||
result = await training_service.create_single_product_job(
|
||||
app/services/training_service.py:93: in create_single_product_job
|
||||
await db.rollback()
|
||||
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_get_job_status_existing" time="0.035"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_get_job_status_nonexistent" time="0.030" /><testcase classname="tests.test_service.TestTrainingService" name="test_list_training_jobs" time="0.031"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_list_training_jobs_with_filter" time="0.035"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_cancel_training_job_success" time="0.035"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_cancel_nonexistent_job" time="0.031"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:270: in cancel_training_job
|
||||
result = await db.execute(
|
||||
E AttributeError: 'coroutine' object has no attribute 'execute'
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
tests/test_service.py:175: in test_cancel_nonexistent_job
|
||||
result = await training_service.cancel_training_job(
|
||||
app/services/training_service.py:297: in cancel_training_job
|
||||
await db.rollback()
|
||||
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_validate_training_data_valid" time="0.034"><error message="failed on setup with "file /app/tests/test_service.py, line 183 @pytest.mark.asyncio async def test_validate_training_data_valid( self, training_service, test_db_session, mock_data_service ): """Test validation with valid data""" config = {"min_data_points": 30} result = await training_service.validate_training_data( db=test_db_session, tenant_id="test-tenant", config=config ) assert isinstance(result, dict) assert "is_valid" in result assert "issues" in result assert "recommendations" in result assert "estimated_time_minutes" in result E fixture 'mock_data_service' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_service.py:183"">file /app/tests/test_service.py, line 183
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_training_data_valid(
|
||||
self,
|
||||
training_service,
|
||||
test_db_session,
|
||||
mock_data_service
|
||||
):
|
||||
"""Test validation with valid data"""
|
||||
config = {"min_data_points": 30}
|
||||
|
||||
result = await training_service.validate_training_data(
|
||||
db=test_db_session,
|
||||
tenant_id="test-tenant",
|
||||
config=config
|
||||
)
|
||||
|
||||
assert isinstance(result, dict)
|
||||
assert "is_valid" in result
|
||||
assert "issues" in result
|
||||
assert "recommendations" in result
|
||||
assert "estimated_time_minutes" in result
|
||||
E fixture 'mock_data_service' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_service.py:183</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_validate_training_data_no_data" time="0.031"><failure message="assert True is False">tests/test_service.py:221: in test_validate_training_data_no_data
|
||||
assert result["is_valid"] is False
|
||||
E assert True is False</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_update_job_status" time="0.035"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_store_trained_models" time="0.032"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:572: in _store_trained_models
|
||||
await db.execute(
|
||||
E AttributeError: 'coroutine' object has no attribute 'execute'
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
tests/test_service.py:280: in test_store_trained_models
|
||||
await training_service._store_trained_models(
|
||||
app/services/training_service.py:592: in _store_trained_models
|
||||
await db.rollback()
|
||||
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingService" name="test_get_training_logs" time="0.033"><error message="failed on setup with "TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog"">tests/conftest.py:539: in training_job_in_db
|
||||
job = ModelTrainingLog(
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:566: in _initialize_instance
|
||||
with util.safe_reraise():
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/util/langhelpers.py:146: in __exit__
|
||||
raise exc_value.with_traceback(exc_tb)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/state.py:564: in _initialize_instance
|
||||
manager.original_init(*mixed[1:], **kwargs)
|
||||
/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/decl_base.py:2142: in _declarative_constructor
|
||||
raise TypeError(
|
||||
E TypeError: 'started_at' is an invalid keyword argument for ModelTrainingLog</error></testcase><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_sales_data_success" time="0.031" /><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_sales_data_error" time="0.030" /><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_weather_data_success" time="0.040" /><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_traffic_data_success" time="0.033" /><testcase classname="tests.test_service.TestTrainingServiceDataFetching" name="test_fetch_data_with_date_filters" time="0.030" /><testcase classname="tests.test_service.TestTrainingServiceExecution" name="test_execute_training_job_success" time="0.030"><error message="failed on setup with "file /app/tests/test_service.py, line 468 @pytest.mark.asyncio async def test_execute_training_job_success( self, training_service, test_db_session, mock_messaging, mock_data_service ): """Test successful training job execution""" # Create job first job_id = "test-execution-job" training_log = await training_service.create_training_job( db=test_db_session, tenant_id="test-tenant", job_id=job_id, config={"include_weather": True} ) request = TrainingJobRequest( include_weather=True, include_traffic=True, min_data_points=30 ) with patch('app.services.training_service.TrainingService._fetch_sales_data') as mock_fetch_sales, \ patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \ patch('app.services.training_service.TrainingService._fetch_traffic_data') as mock_fetch_traffic, \ patch('app.services.training_service.TrainingService._store_trained_models') as mock_store: mock_fetch_sales.return_value = [{"date": "2024-01-01", "product_name": "Pan Integral", "quantity": 45}] mock_fetch_weather.return_value = [] mock_fetch_traffic.return_value = [] mock_store.return_value = None await training_service.execute_training_job( db=test_db_session, job_id=job_id, tenant_id="test-tenant", request=request ) # Verify job was completed updated_job = await training_service.get_job_status( db=test_db_session, job_id=job_id, tenant_id="test-tenant" ) assert updated_job.status == "completed" assert updated_job.progress == 100 E fixture 'mock_data_service' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_service.py:468"">file /app/tests/test_service.py, line 468
|
||||
@pytest.mark.asyncio
|
||||
async def test_execute_training_job_success(
|
||||
self,
|
||||
training_service,
|
||||
test_db_session,
|
||||
mock_messaging,
|
||||
mock_data_service
|
||||
):
|
||||
"""Test successful training job execution"""
|
||||
# Create job first
|
||||
job_id = "test-execution-job"
|
||||
training_log = await training_service.create_training_job(
|
||||
db=test_db_session,
|
||||
tenant_id="test-tenant",
|
||||
job_id=job_id,
|
||||
config={"include_weather": True}
|
||||
)
|
||||
|
||||
request = TrainingJobRequest(
|
||||
include_weather=True,
|
||||
include_traffic=True,
|
||||
min_data_points=30
|
||||
)
|
||||
|
||||
with patch('app.services.training_service.TrainingService._fetch_sales_data') as mock_fetch_sales, \
|
||||
patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \
|
||||
patch('app.services.training_service.TrainingService._fetch_traffic_data') as mock_fetch_traffic, \
|
||||
patch('app.services.training_service.TrainingService._store_trained_models') as mock_store:
|
||||
|
||||
mock_fetch_sales.return_value = [{"date": "2024-01-01", "product_name": "Pan Integral", "quantity": 45}]
|
||||
mock_fetch_weather.return_value = []
|
||||
mock_fetch_traffic.return_value = []
|
||||
mock_store.return_value = None
|
||||
|
||||
await training_service.execute_training_job(
|
||||
db=test_db_session,
|
||||
job_id=job_id,
|
||||
tenant_id="test-tenant",
|
||||
request=request
|
||||
)
|
||||
|
||||
# Verify job was completed
|
||||
updated_job = await training_service.get_job_status(
|
||||
db=test_db_session,
|
||||
job_id=job_id,
|
||||
tenant_id="test-tenant"
|
||||
)
|
||||
|
||||
assert updated_job.status == "completed"
|
||||
assert updated_job.progress == 100
|
||||
E fixture 'mock_data_service' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_service.py:468</error></testcase><testcase classname="tests.test_service.TestTrainingServiceExecution" name="test_execute_training_job_failure" time="0.031"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:52: in create_training_job
|
||||
db.add(training_log)
|
||||
E AttributeError: 'coroutine' object has no attribute 'add'
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
tests/test_service.py:529: in test_execute_training_job_failure
|
||||
await training_service.create_training_job(
|
||||
app/services/training_service.py:61: in create_training_job
|
||||
await db.rollback()
|
||||
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingServiceExecution" name="test_execute_single_product_training_success" time="0.031"><error message="failed on setup with "file /app/tests/test_service.py, line 559 @pytest.mark.asyncio async def test_execute_single_product_training_success( self, training_service, test_db_session, mock_messaging, mock_data_service ): """Test successful single product training execution""" job_id = "test-single-product-job" product_name = "Pan Integral" await training_service.create_single_product_job( db=test_db_session, tenant_id="test-tenant", product_name=product_name, job_id=job_id, config={} ) request = SingleProductTrainingRequest( include_weather=True, include_traffic=False ) with patch('app.services.training_service.TrainingService._fetch_product_sales_data') as mock_fetch_sales, \ patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \ patch('app.services.training_service.TrainingService._store_single_trained_model') as mock_store: mock_fetch_sales.return_value = [{"date": "2024-01-01", "product_name": product_name, "quantity": 45}] mock_fetch_weather.return_value = [] mock_store.return_value = None await training_service.execute_single_product_training( db=test_db_session, job_id=job_id, tenant_id="test-tenant", product_name=product_name, request=request ) # Verify job was completed updated_job = await training_service.get_job_status( db=test_db_session, job_id=job_id, tenant_id="test-tenant" ) assert updated_job.status == "completed" assert updated_job.progress == 100 E fixture 'mock_data_service' not found > available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory > use 'pytest --fixtures [testpath]' for help on them. /app/tests/test_service.py:559"">file /app/tests/test_service.py, line 559
|
||||
@pytest.mark.asyncio
|
||||
async def test_execute_single_product_training_success(
|
||||
self,
|
||||
training_service,
|
||||
test_db_session,
|
||||
mock_messaging,
|
||||
mock_data_service
|
||||
):
|
||||
"""Test successful single product training execution"""
|
||||
job_id = "test-single-product-job"
|
||||
product_name = "Pan Integral"
|
||||
|
||||
await training_service.create_single_product_job(
|
||||
db=test_db_session,
|
||||
tenant_id="test-tenant",
|
||||
product_name=product_name,
|
||||
job_id=job_id,
|
||||
config={}
|
||||
)
|
||||
|
||||
request = SingleProductTrainingRequest(
|
||||
include_weather=True,
|
||||
include_traffic=False
|
||||
)
|
||||
|
||||
with patch('app.services.training_service.TrainingService._fetch_product_sales_data') as mock_fetch_sales, \
|
||||
patch('app.services.training_service.TrainingService._fetch_weather_data') as mock_fetch_weather, \
|
||||
patch('app.services.training_service.TrainingService._store_single_trained_model') as mock_store:
|
||||
|
||||
mock_fetch_sales.return_value = [{"date": "2024-01-01", "product_name": product_name, "quantity": 45}]
|
||||
mock_fetch_weather.return_value = []
|
||||
mock_store.return_value = None
|
||||
|
||||
await training_service.execute_single_product_training(
|
||||
db=test_db_session,
|
||||
job_id=job_id,
|
||||
tenant_id="test-tenant",
|
||||
product_name=product_name,
|
||||
request=request
|
||||
)
|
||||
|
||||
# Verify job was completed
|
||||
updated_job = await training_service.get_job_status(
|
||||
db=test_db_session,
|
||||
job_id=job_id,
|
||||
tenant_id="test-tenant"
|
||||
)
|
||||
|
||||
assert updated_job.status == "completed"
|
||||
assert updated_job.progress == 100
|
||||
E fixture 'mock_data_service' not found
|
||||
> available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, api_test_scenarios, auth_headers, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, class_mocker, cleanup_after_test, configure_test_logging, corrupted_sales_data, cov, data_quality_test_cases, doctest_namespace, error_scenarios, event_loop, failing_external_services, insufficient_sales_data, integration_test_dependencies, integration_test_setup, large_dataset_for_performance, load_test_configuration, memory_monitor, mock_aemet_client, mock_data_processor, mock_external_services, mock_job_scheduler, mock_madrid_client, mock_messaging, mock_ml_trainer, mock_model_storage, mock_notification_system, mock_prophet_manager, mocker, module_mocker, monkeypatch, no_cover, package_mocker, performance_benchmarks, pytestconfig, real_world_scenarios, record_property, record_testsuite_property, record_xml_attribute, recwarn, sample_bakery_sales_data, sample_model_metadata, sample_single_product_request, sample_traffic_data, sample_training_request, sample_weather_data, seasonal_product_data, session_mocker, setup_test_environment, spanish_holidays_2023, temp_model_storage, test_app, test_client, test_config, test_data_validator, test_db_session, test_metrics_collector, timing_monitor, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory, training_job_in_db, training_progress_states, training_service, unused_tcp_port, unused_tcp_port_factory, unused_udp_port, unused_udp_port_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
/app/tests/test_service.py:559</error></testcase><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_database_connection_failure" time="0.029" /><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_external_service_timeout" time="0.030" /><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_concurrent_job_creation" time="0.028"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:52: in create_training_job
|
||||
db.add(training_log)
|
||||
E AttributeError: 'coroutine' object has no attribute 'add'
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
tests/test_service.py:660: in test_concurrent_job_creation
|
||||
job = await training_service.create_training_job(
|
||||
app/services/training_service.py:61: in create_training_job
|
||||
await db.rollback()
|
||||
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_malformed_config_handling" time="0.001"><failure message="AttributeError: 'coroutine' object has no attribute 'rollback'">app/services/training_service.py:52: in create_training_job
|
||||
db.add(training_log)
|
||||
E AttributeError: 'coroutine' object has no attribute 'add'
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
tests/test_service.py:681: in test_malformed_config_handling
|
||||
job = await training_service.create_training_job(
|
||||
app/services/training_service.py:61: in create_training_job
|
||||
await db.rollback()
|
||||
E AttributeError: 'coroutine' object has no attribute 'rollback'</failure></testcase><testcase classname="tests.test_service.TestTrainingServiceEdgeCases" name="test_malformed_config_handling" time="0.029"><error message="failed on teardown with "TypeError: 'str' object is not callable"">tests/conftest.py:464: in setup_test_environment
|
||||
os.environ.pop(var, None)(scope="session")
|
||||
E TypeError: 'str' object is not callable</error></testcase></testsuite></testsuites>
|
||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user