Files
bakery-ia/services/training/app/models/training.py

193 lines
7.3 KiB
Python
Raw Normal View History

2025-07-19 16:59:37 +02:00
# services/training/app/models/training.py
2025-07-17 14:34:24 +02:00
"""
2025-07-19 16:59:37 +02:00
Database models for training service
2025-07-17 14:34:24 +02:00
"""
2025-07-19 16:59:37 +02:00
from sqlalchemy import Column, Integer, String, DateTime, Text, Boolean, JSON, Float
from sqlalchemy.dialects.postgresql import UUID, ARRAY
2025-07-25 20:01:37 +02:00
from shared.database.base import Base
2025-07-17 14:34:24 +02:00
from datetime import datetime
import uuid
2025-07-27 10:01:37 +02:00
2025-07-19 16:59:37 +02:00
class ModelTrainingLog(Base):
"""
Table to track training job execution and status.
Replaces the old Celery task tracking.
"""
__tablename__ = "model_training_logs"
id = Column(Integer, primary_key=True, index=True)
job_id = Column(String(255), unique=True, index=True, nullable=False)
2025-07-27 10:01:37 +02:00
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
2025-07-19 16:59:37 +02:00
status = Column(String(50), nullable=False, default="pending") # pending, running, completed, failed, cancelled
progress = Column(Integer, default=0) # 0-100 percentage
current_step = Column(String(500), default="")
# Timestamps
start_time = Column(DateTime, default=datetime.now)
end_time = Column(DateTime, nullable=True)
# Configuration and results
config = Column(JSON, nullable=True) # Training job configuration
results = Column(JSON, nullable=True) # Training results
error_message = Column(Text, nullable=True)
2025-07-17 14:34:24 +02:00
# Metadata
2025-07-19 16:59:37 +02:00
created_at = Column(DateTime, default=datetime.now)
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
2025-07-17 14:34:24 +02:00
2025-07-19 16:59:37 +02:00
class ModelPerformanceMetric(Base):
"""
Table to track model performance over time.
"""
__tablename__ = "model_performance_metrics"
2025-07-17 14:34:24 +02:00
2025-07-19 16:59:37 +02:00
id = Column(Integer, primary_key=True, index=True)
model_id = Column(String(255), index=True, nullable=False)
2025-07-27 10:01:37 +02:00
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
2025-07-19 16:59:37 +02:00
product_name = Column(String(255), index=True, nullable=False)
2025-07-17 14:34:24 +02:00
# Performance metrics
2025-07-19 16:59:37 +02:00
mae = Column(Float, nullable=True) # Mean Absolute Error
mse = Column(Float, nullable=True) # Mean Squared Error
rmse = Column(Float, nullable=True) # Root Mean Squared Error
mape = Column(Float, nullable=True) # Mean Absolute Percentage Error
r2_score = Column(Float, nullable=True) # R-squared score
2025-07-17 14:34:24 +02:00
2025-07-19 16:59:37 +02:00
# Additional metrics
accuracy_percentage = Column(Float, nullable=True)
prediction_confidence = Column(Float, nullable=True)
2025-07-17 14:34:24 +02:00
2025-07-19 16:59:37 +02:00
# Evaluation information
evaluation_period_start = Column(DateTime, nullable=True)
evaluation_period_end = Column(DateTime, nullable=True)
evaluation_samples = Column(Integer, nullable=True)
2025-07-17 14:34:24 +02:00
2025-07-19 16:59:37 +02:00
# Metadata
measured_at = Column(DateTime, default=datetime.now)
created_at = Column(DateTime, default=datetime.now)
2025-07-17 14:34:24 +02:00
2025-07-19 16:59:37 +02:00
class TrainingJobQueue(Base):
"""
Table to manage training job queue and scheduling.
"""
__tablename__ = "training_job_queue"
id = Column(Integer, primary_key=True, index=True)
job_id = Column(String(255), unique=True, index=True, nullable=False)
2025-07-27 10:01:37 +02:00
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
2025-07-19 16:59:37 +02:00
# Job configuration
job_type = Column(String(50), nullable=False) # full_training, single_product, evaluation
priority = Column(Integer, default=1) # Higher number = higher priority
config = Column(JSON, nullable=True)
# Scheduling information
scheduled_at = Column(DateTime, nullable=True)
started_at = Column(DateTime, nullable=True)
estimated_duration_minutes = Column(Integer, nullable=True)
2025-07-17 14:34:24 +02:00
2025-07-19 16:59:37 +02:00
# Status
status = Column(String(50), nullable=False, default="queued") # queued, running, completed, failed
retry_count = Column(Integer, default=0)
max_retries = Column(Integer, default=3)
2025-07-17 14:34:24 +02:00
2025-07-19 16:59:37 +02:00
# Metadata
created_at = Column(DateTime, default=datetime.now)
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
2025-08-02 17:09:53 +02:00
cancelled_by = Column(String, nullable=True)
2025-07-19 16:59:37 +02:00
class ModelArtifact(Base):
"""
Table to track model files and artifacts.
"""
__tablename__ = "model_artifacts"
id = Column(Integer, primary_key=True, index=True)
model_id = Column(String(255), index=True, nullable=False)
2025-07-27 10:01:37 +02:00
tenant_id = Column(UUID(as_uuid=True), nullable=False, index=True)
2025-07-19 16:59:37 +02:00
# Artifact information
artifact_type = Column(String(50), nullable=False) # model_file, metadata, training_data, etc.
file_path = Column(String(1000), nullable=False)
file_size_bytes = Column(Integer, nullable=True)
checksum = Column(String(255), nullable=True) # For file integrity
# Storage information
storage_location = Column(String(100), nullable=False, default="local") # local, s3, gcs, etc.
compression = Column(String(50), nullable=True) # gzip, lz4, etc.
2025-07-17 14:34:24 +02:00
2025-07-19 16:59:37 +02:00
# Metadata
created_at = Column(DateTime, default=datetime.now)
2025-07-28 19:28:39 +02:00
expires_at = Column(DateTime, nullable=True) # For automatic cleanup
class TrainedModel(Base):
__tablename__ = "trained_models"
# Primary identification
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
tenant_id = Column(String, nullable=False, index=True)
product_name = Column(String, nullable=False, index=True)
# Model information
model_type = Column(String, default="prophet_optimized")
model_version = Column(String, default="1.0")
job_id = Column(String, nullable=False)
# File storage
model_path = Column(String, nullable=False) # Path to the .pkl file
metadata_path = Column(String) # Path to metadata JSON
# Training metrics
mape = Column(Float)
mae = Column(Float)
rmse = Column(Float)
r2_score = Column(Float)
training_samples = Column(Integer)
# Hyperparameters and features
hyperparameters = Column(JSON) # Store optimized parameters
features_used = Column(JSON) # List of regressor columns
# Model status
is_active = Column(Boolean, default=True)
is_production = Column(Boolean, default=False)
# Timestamps
created_at = Column(DateTime, default=datetime.utcnow)
last_used_at = Column(DateTime)
# Training data info
training_start_date = Column(DateTime)
training_end_date = Column(DateTime)
data_quality_score = Column(Float)
# Additional metadata
notes = Column(Text)
created_by = Column(String) # User who triggered training
def to_dict(self):
return {
"id": self.id,
2025-07-29 21:08:33 +02:00
"model_id": self.id,
2025-07-28 19:28:39 +02:00
"tenant_id": self.tenant_id,
"product_name": self.product_name,
"model_type": self.model_type,
"model_version": self.model_version,
"model_path": self.model_path,
"mape": self.mape,
"mae": self.mae,
"rmse": self.rmse,
"r2_score": self.r2_score,
"training_samples": self.training_samples,
"hyperparameters": self.hyperparameters,
"features_used": self.features_used,
"is_active": self.is_active,
"is_production": self.is_production,
"created_at": self.created_at.isoformat() if self.created_at else None,
"last_used_at": self.last_used_at.isoformat() if self.last_used_at else None,
"training_start_date": self.training_start_date.isoformat() if self.training_start_date else None,
"training_end_date": self.training_end_date.isoformat() if self.training_end_date else None,
"data_quality_score": self.data_quality_score
}