Initial commit - production deployment
This commit is contained in:
141
services/production/migrations/env.py
Normal file
141
services/production/migrations/env.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""Alembic environment configuration for production service"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from logging.config import fileConfig
|
||||
from sqlalchemy import pool
|
||||
from sqlalchemy.engine import Connection
|
||||
from sqlalchemy.ext.asyncio import async_engine_from_config
|
||||
from alembic import context
|
||||
|
||||
# Add the service directory to the Python path
|
||||
service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||
if service_path not in sys.path:
|
||||
sys.path.insert(0, service_path)
|
||||
|
||||
# Add shared modules to path
|
||||
shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared"))
|
||||
if shared_path not in sys.path:
|
||||
sys.path.insert(0, shared_path)
|
||||
|
||||
try:
|
||||
from app.core.config import settings
|
||||
from shared.database.base import Base
|
||||
|
||||
# Import all models to ensure they are registered with Base.metadata
|
||||
from app.models import * # noqa: F401, F403
|
||||
|
||||
except ImportError as e:
|
||||
print(f"Import error in migrations env.py: {e}")
|
||||
print(f"Current Python path: {sys.path}")
|
||||
raise
|
||||
|
||||
# this is the Alembic Config object
|
||||
config = context.config
|
||||
|
||||
# Determine service name from file path
|
||||
service_name = os.path.basename(os.path.dirname(os.path.dirname(__file__)))
|
||||
service_name_upper = service_name.upper().replace('-', '_')
|
||||
|
||||
# Set database URL from environment variables with multiple fallback strategies
|
||||
database_url = (
|
||||
os.getenv(f'{service_name_upper}_DATABASE_URL') or # Service-specific
|
||||
os.getenv('DATABASE_URL') # Generic fallback
|
||||
)
|
||||
|
||||
# If DATABASE_URL is not set, construct from individual components
|
||||
if not database_url:
|
||||
# Try generic PostgreSQL environment variables first
|
||||
postgres_host = os.getenv('POSTGRES_HOST')
|
||||
postgres_port = os.getenv('POSTGRES_PORT', '5432')
|
||||
postgres_db = os.getenv('POSTGRES_DB')
|
||||
postgres_user = os.getenv('POSTGRES_USER')
|
||||
postgres_password = os.getenv('POSTGRES_PASSWORD')
|
||||
|
||||
if all([postgres_host, postgres_db, postgres_user, postgres_password]):
|
||||
database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}"
|
||||
else:
|
||||
# Try service-specific environment variables
|
||||
db_host = os.getenv(f'{service_name_upper}_DB_HOST', f'{service_name}-db-service')
|
||||
db_port = os.getenv(f'{service_name_upper}_DB_PORT', '5432')
|
||||
db_name = os.getenv(f'{service_name_upper}_DB_NAME', f'{service_name.replace("-", "_")}_db')
|
||||
db_user = os.getenv(f'{service_name_upper}_DB_USER', f'{service_name.replace("-", "_")}_user')
|
||||
db_password = os.getenv(f'{service_name_upper}_DB_PASSWORD')
|
||||
|
||||
if db_password:
|
||||
database_url = f"postgresql+asyncpg://{db_user}:{db_password}@{db_host}:{db_port}/{db_name}"
|
||||
else:
|
||||
# Final fallback: try to get from settings object
|
||||
try:
|
||||
database_url = getattr(settings, 'DATABASE_URL', None)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not database_url:
|
||||
error_msg = f"ERROR: No database URL configured for {service_name} service"
|
||||
print(error_msg)
|
||||
raise Exception(error_msg)
|
||||
|
||||
config.set_main_option("sqlalchemy.url", database_url)
|
||||
|
||||
# Interpret the config file for Python logging
|
||||
if config.config_file_name is not None:
|
||||
fileConfig(config.config_file_name)
|
||||
|
||||
# Set target metadata
|
||||
target_metadata = Base.metadata
|
||||
|
||||
|
||||
def run_migrations_offline() -> None:
|
||||
"""Run migrations in 'offline' mode."""
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
context.configure(
|
||||
url=url,
|
||||
target_metadata=target_metadata,
|
||||
literal_binds=True,
|
||||
dialect_opts={"paramstyle": "named"},
|
||||
compare_type=True,
|
||||
compare_server_default=True,
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def do_run_migrations(connection: Connection) -> None:
|
||||
"""Execute migrations with the given connection."""
|
||||
context.configure(
|
||||
connection=connection,
|
||||
target_metadata=target_metadata,
|
||||
compare_type=True,
|
||||
compare_server_default=True,
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
async def run_async_migrations() -> None:
|
||||
"""Run migrations in 'online' mode with async support."""
|
||||
connectable = async_engine_from_config(
|
||||
config.get_section(config.config_ini_section, {}),
|
||||
prefix="sqlalchemy.",
|
||||
poolclass=pool.NullPool,
|
||||
)
|
||||
|
||||
async with connectable.connect() as connection:
|
||||
await connection.run_sync(do_run_migrations)
|
||||
|
||||
await connectable.dispose()
|
||||
|
||||
|
||||
def run_migrations_online() -> None:
|
||||
"""Run migrations in 'online' mode."""
|
||||
asyncio.run(run_async_migrations())
|
||||
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
||||
26
services/production/migrations/script.py.mako
Normal file
26
services/production/migrations/script.py.mako
Normal file
@@ -0,0 +1,26 @@
|
||||
"""${message}
|
||||
|
||||
Revision ID: ${up_revision}
|
||||
Revises: ${down_revision | comma,n}
|
||||
Create Date: ${create_date}
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
${imports if imports else ""}
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = ${repr(up_revision)}
|
||||
down_revision: Union[str, None] = ${repr(down_revision)}
|
||||
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
|
||||
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
${downgrades if downgrades else "pass"}
|
||||
@@ -0,0 +1,336 @@
|
||||
"""unified initial production schema
|
||||
|
||||
Revision ID: 001_unified_initial_schema
|
||||
Revises:
|
||||
Create Date: 2025-11-07
|
||||
|
||||
Complete production service schema including:
|
||||
- Production batches (with reasoning_data for i18n JTBD dashboard and waste tracking)
|
||||
- Production schedules
|
||||
- Production capacity
|
||||
- Equipment
|
||||
- Quality checks and templates
|
||||
- Audit logs
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '001_unified_initial_schema'
|
||||
down_revision: Union[str, None] = None
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Create audit_logs table
|
||||
op.create_table('audit_logs',
|
||||
sa.Column('id', sa.UUID(), nullable=False),
|
||||
sa.Column('tenant_id', sa.UUID(), nullable=False),
|
||||
sa.Column('user_id', sa.UUID(), nullable=False),
|
||||
sa.Column('action', sa.String(length=100), nullable=False),
|
||||
sa.Column('resource_type', sa.String(length=100), nullable=False),
|
||||
sa.Column('resource_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('severity', sa.String(length=20), nullable=False),
|
||||
sa.Column('service_name', sa.String(length=100), nullable=False),
|
||||
sa.Column('description', sa.Text(), nullable=True),
|
||||
sa.Column('changes', postgresql.JSON(astext_type=sa.Text()), nullable=True),
|
||||
sa.Column('audit_metadata', postgresql.JSON(astext_type=sa.Text()), nullable=True),
|
||||
sa.Column('ip_address', sa.String(length=45), nullable=True),
|
||||
sa.Column('user_agent', sa.Text(), nullable=True),
|
||||
sa.Column('endpoint', sa.String(length=255), nullable=True),
|
||||
sa.Column('method', sa.String(length=10), nullable=True),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index('idx_audit_resource_type_action', 'audit_logs', ['resource_type', 'action'], unique=False)
|
||||
op.create_index('idx_audit_service_created', 'audit_logs', ['service_name', 'created_at'], unique=False)
|
||||
op.create_index('idx_audit_severity_created', 'audit_logs', ['severity', 'created_at'], unique=False)
|
||||
op.create_index('idx_audit_tenant_created', 'audit_logs', ['tenant_id', 'created_at'], unique=False)
|
||||
op.create_index('idx_audit_user_created', 'audit_logs', ['user_id', 'created_at'], unique=False)
|
||||
op.create_index(op.f('ix_audit_logs_action'), 'audit_logs', ['action'], unique=False)
|
||||
op.create_index(op.f('ix_audit_logs_created_at'), 'audit_logs', ['created_at'], unique=False)
|
||||
op.create_index(op.f('ix_audit_logs_resource_id'), 'audit_logs', ['resource_id'], unique=False)
|
||||
op.create_index(op.f('ix_audit_logs_resource_type'), 'audit_logs', ['resource_type'], unique=False)
|
||||
op.create_index(op.f('ix_audit_logs_service_name'), 'audit_logs', ['service_name'], unique=False)
|
||||
op.create_index(op.f('ix_audit_logs_severity'), 'audit_logs', ['severity'], unique=False)
|
||||
op.create_index(op.f('ix_audit_logs_tenant_id'), 'audit_logs', ['tenant_id'], unique=False)
|
||||
op.create_index(op.f('ix_audit_logs_user_id'), 'audit_logs', ['user_id'], unique=False)
|
||||
|
||||
# Create equipment table
|
||||
op.create_table('equipment',
|
||||
sa.Column('id', sa.UUID(), nullable=False),
|
||||
sa.Column('tenant_id', sa.UUID(), nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=False),
|
||||
sa.Column('type', sa.Enum('OVEN', 'MIXER', 'PROOFER', 'FREEZER', 'PACKAGING', 'OTHER', name='equipmenttype'), nullable=False),
|
||||
sa.Column('model', sa.String(length=100), nullable=True),
|
||||
sa.Column('serial_number', sa.String(length=100), nullable=True),
|
||||
sa.Column('location', sa.String(length=255), nullable=True),
|
||||
sa.Column('status', sa.Enum('OPERATIONAL', 'MAINTENANCE', 'DOWN', 'WARNING', name='equipmentstatus'), nullable=False),
|
||||
sa.Column('install_date', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column('last_maintenance_date', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column('next_maintenance_date', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column('maintenance_interval_days', sa.Integer(), nullable=True),
|
||||
sa.Column('efficiency_percentage', sa.Float(), nullable=True),
|
||||
sa.Column('uptime_percentage', sa.Float(), nullable=True),
|
||||
sa.Column('energy_usage_kwh', sa.Float(), nullable=True),
|
||||
sa.Column('power_kw', sa.Float(), nullable=True),
|
||||
sa.Column('capacity', sa.Float(), nullable=True),
|
||||
sa.Column('weight_kg', sa.Float(), nullable=True),
|
||||
sa.Column('current_temperature', sa.Float(), nullable=True),
|
||||
sa.Column('target_temperature', sa.Float(), nullable=True),
|
||||
sa.Column('is_active', sa.Boolean(), nullable=True),
|
||||
sa.Column('notes', sa.Text(), nullable=True),
|
||||
sa.Column('support_contact', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_equipment_tenant_id'), 'equipment', ['tenant_id'], unique=False)
|
||||
|
||||
# Create production_batches table (with reasoning_data for i18n and waste tracking)
|
||||
op.create_table('production_batches',
|
||||
sa.Column('id', sa.UUID(), nullable=False),
|
||||
sa.Column('tenant_id', sa.UUID(), nullable=False),
|
||||
sa.Column('batch_number', sa.String(length=50), nullable=False),
|
||||
sa.Column('product_id', sa.UUID(), nullable=False),
|
||||
sa.Column('product_name', sa.String(length=255), nullable=False),
|
||||
sa.Column('recipe_id', sa.UUID(), nullable=True),
|
||||
sa.Column('planned_start_time', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('planned_end_time', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('planned_quantity', sa.Float(), nullable=False),
|
||||
sa.Column('planned_duration_minutes', sa.Integer(), nullable=False),
|
||||
sa.Column('actual_start_time', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column('actual_end_time', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column('actual_quantity', sa.Float(), nullable=True),
|
||||
sa.Column('actual_duration_minutes', sa.Integer(), nullable=True),
|
||||
sa.Column('status', sa.Enum('PENDING', 'IN_PROGRESS', 'COMPLETED', 'CANCELLED', 'ON_HOLD', 'QUALITY_CHECK', 'FAILED', name='productionstatus'), nullable=False),
|
||||
sa.Column('priority', sa.Enum('LOW', 'MEDIUM', 'HIGH', 'URGENT', name='productionpriority'), nullable=False),
|
||||
sa.Column('current_process_stage', sa.Enum('MIXING', 'PROOFING', 'SHAPING', 'BAKING', 'COOLING', 'PACKAGING', 'FINISHING', name='processstage'), nullable=True),
|
||||
sa.Column('process_stage_history', sa.JSON(), nullable=True),
|
||||
sa.Column('pending_quality_checks', sa.JSON(), nullable=True),
|
||||
sa.Column('completed_quality_checks', sa.JSON(), nullable=True),
|
||||
sa.Column('estimated_cost', sa.Float(), nullable=True),
|
||||
sa.Column('actual_cost', sa.Float(), nullable=True),
|
||||
sa.Column('labor_cost', sa.Float(), nullable=True),
|
||||
sa.Column('material_cost', sa.Float(), nullable=True),
|
||||
sa.Column('overhead_cost', sa.Float(), nullable=True),
|
||||
sa.Column('yield_percentage', sa.Float(), nullable=True),
|
||||
sa.Column('quality_score', sa.Float(), nullable=True),
|
||||
sa.Column('waste_quantity', sa.Float(), nullable=True),
|
||||
sa.Column('defect_quantity', sa.Float(), nullable=True),
|
||||
# Waste tracking fields (from 20251023_0900 migration)
|
||||
sa.Column('waste_defect_type', sa.String(length=100), nullable=True),
|
||||
sa.Column('is_ai_assisted', sa.Boolean(), nullable=False, server_default='false'),
|
||||
sa.Column('equipment_used', sa.JSON(), nullable=True),
|
||||
sa.Column('staff_assigned', sa.JSON(), nullable=True),
|
||||
sa.Column('station_id', sa.String(length=50), nullable=True),
|
||||
sa.Column('order_id', sa.UUID(), nullable=True),
|
||||
sa.Column('forecast_id', sa.UUID(), nullable=True),
|
||||
sa.Column('is_rush_order', sa.Boolean(), nullable=True),
|
||||
sa.Column('is_special_recipe', sa.Boolean(), nullable=True),
|
||||
sa.Column('production_notes', sa.Text(), nullable=True),
|
||||
sa.Column('quality_notes', sa.Text(), nullable=True),
|
||||
sa.Column('delay_reason', sa.String(length=255), nullable=True),
|
||||
sa.Column('cancellation_reason', sa.String(length=255), nullable=True),
|
||||
# JTBD Dashboard: Structured reasoning for i18n support
|
||||
sa.Column('reasoning_data', sa.JSON(), nullable=True),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.Column('completed_at', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_production_batches_batch_number'), 'production_batches', ['batch_number'], unique=True)
|
||||
op.create_index(op.f('ix_production_batches_current_process_stage'), 'production_batches', ['current_process_stage'], unique=False)
|
||||
op.create_index(op.f('ix_production_batches_product_id'), 'production_batches', ['product_id'], unique=False)
|
||||
op.create_index(op.f('ix_production_batches_status'), 'production_batches', ['status'], unique=False)
|
||||
op.create_index(op.f('ix_production_batches_tenant_id'), 'production_batches', ['tenant_id'], unique=False)
|
||||
op.create_index('ix_production_batches_is_ai_assisted', 'production_batches', ['is_ai_assisted'], unique=False)
|
||||
|
||||
# Create production_capacity table
|
||||
op.create_table('production_capacity',
|
||||
sa.Column('id', sa.UUID(), nullable=False),
|
||||
sa.Column('tenant_id', sa.UUID(), nullable=False),
|
||||
sa.Column('resource_type', sa.String(length=50), nullable=False),
|
||||
sa.Column('resource_id', sa.String(length=100), nullable=False),
|
||||
sa.Column('resource_name', sa.String(length=255), nullable=False),
|
||||
sa.Column('date', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('start_time', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('end_time', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('total_capacity_units', sa.Float(), nullable=False),
|
||||
sa.Column('allocated_capacity_units', sa.Float(), nullable=False),
|
||||
sa.Column('remaining_capacity_units', sa.Float(), nullable=False),
|
||||
sa.Column('is_available', sa.Boolean(), nullable=True),
|
||||
sa.Column('is_maintenance', sa.Boolean(), nullable=True),
|
||||
sa.Column('is_reserved', sa.Boolean(), nullable=True),
|
||||
sa.Column('equipment_type', sa.String(length=100), nullable=True),
|
||||
sa.Column('max_batch_size', sa.Float(), nullable=True),
|
||||
sa.Column('min_batch_size', sa.Float(), nullable=True),
|
||||
sa.Column('setup_time_minutes', sa.Integer(), nullable=True),
|
||||
sa.Column('cleanup_time_minutes', sa.Integer(), nullable=True),
|
||||
sa.Column('efficiency_rating', sa.Float(), nullable=True),
|
||||
sa.Column('maintenance_status', sa.String(length=50), nullable=True),
|
||||
sa.Column('last_maintenance_date', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column('notes', sa.Text(), nullable=True),
|
||||
sa.Column('restrictions', sa.JSON(), nullable=True),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_production_capacity_date'), 'production_capacity', ['date'], unique=False)
|
||||
op.create_index(op.f('ix_production_capacity_tenant_id'), 'production_capacity', ['tenant_id'], unique=False)
|
||||
|
||||
# Create production_schedules table
|
||||
op.create_table('production_schedules',
|
||||
sa.Column('id', sa.UUID(), nullable=False),
|
||||
sa.Column('tenant_id', sa.UUID(), nullable=False),
|
||||
sa.Column('schedule_date', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('shift_start', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('shift_end', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('total_capacity_hours', sa.Float(), nullable=False),
|
||||
sa.Column('planned_capacity_hours', sa.Float(), nullable=False),
|
||||
sa.Column('actual_capacity_hours', sa.Float(), nullable=True),
|
||||
sa.Column('overtime_hours', sa.Float(), nullable=True),
|
||||
sa.Column('staff_count', sa.Integer(), nullable=False),
|
||||
sa.Column('equipment_capacity', sa.JSON(), nullable=True),
|
||||
sa.Column('station_assignments', sa.JSON(), nullable=True),
|
||||
sa.Column('total_batches_planned', sa.Integer(), nullable=False),
|
||||
sa.Column('total_batches_completed', sa.Integer(), nullable=True),
|
||||
sa.Column('total_quantity_planned', sa.Float(), nullable=False),
|
||||
sa.Column('total_quantity_produced', sa.Float(), nullable=True),
|
||||
sa.Column('is_finalized', sa.Boolean(), nullable=True),
|
||||
sa.Column('is_active', sa.Boolean(), nullable=True),
|
||||
sa.Column('efficiency_percentage', sa.Float(), nullable=True),
|
||||
sa.Column('utilization_percentage', sa.Float(), nullable=True),
|
||||
sa.Column('on_time_completion_rate', sa.Float(), nullable=True),
|
||||
sa.Column('schedule_notes', sa.Text(), nullable=True),
|
||||
sa.Column('schedule_adjustments', sa.JSON(), nullable=True),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.Column('finalized_at', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_production_schedules_schedule_date'), 'production_schedules', ['schedule_date'], unique=False)
|
||||
op.create_index(op.f('ix_production_schedules_tenant_id'), 'production_schedules', ['tenant_id'], unique=False)
|
||||
|
||||
# Create quality_check_templates table
|
||||
op.create_table('quality_check_templates',
|
||||
sa.Column('id', sa.UUID(), nullable=False),
|
||||
sa.Column('tenant_id', sa.UUID(), nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=False),
|
||||
sa.Column('template_code', sa.String(length=100), nullable=True),
|
||||
sa.Column('check_type', sa.String(length=50), nullable=False),
|
||||
sa.Column('category', sa.String(length=100), nullable=True),
|
||||
sa.Column('description', sa.Text(), nullable=True),
|
||||
sa.Column('instructions', sa.Text(), nullable=True),
|
||||
sa.Column('parameters', sa.JSON(), nullable=True),
|
||||
sa.Column('thresholds', sa.JSON(), nullable=True),
|
||||
sa.Column('scoring_criteria', sa.JSON(), nullable=True),
|
||||
sa.Column('is_active', sa.Boolean(), nullable=True),
|
||||
sa.Column('is_required', sa.Boolean(), nullable=True),
|
||||
sa.Column('is_critical', sa.Boolean(), nullable=True),
|
||||
sa.Column('weight', sa.Float(), nullable=True),
|
||||
sa.Column('min_value', sa.Float(), nullable=True),
|
||||
sa.Column('max_value', sa.Float(), nullable=True),
|
||||
sa.Column('target_value', sa.Float(), nullable=True),
|
||||
sa.Column('unit', sa.String(length=20), nullable=True),
|
||||
sa.Column('tolerance_percentage', sa.Float(), nullable=True),
|
||||
sa.Column('applicable_stages', sa.JSON(), nullable=True),
|
||||
sa.Column('created_by', sa.UUID(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_quality_check_templates_template_code'), 'quality_check_templates', ['template_code'], unique=False)
|
||||
op.create_index(op.f('ix_quality_check_templates_tenant_id'), 'quality_check_templates', ['tenant_id'], unique=False)
|
||||
|
||||
# Create quality_checks table
|
||||
op.create_table('quality_checks',
|
||||
sa.Column('id', sa.UUID(), nullable=False),
|
||||
sa.Column('tenant_id', sa.UUID(), nullable=False),
|
||||
sa.Column('batch_id', sa.UUID(), nullable=False),
|
||||
sa.Column('template_id', sa.UUID(), nullable=True),
|
||||
sa.Column('check_type', sa.String(length=50), nullable=False),
|
||||
sa.Column('process_stage', sa.Enum('MIXING', 'PROOFING', 'SHAPING', 'BAKING', 'COOLING', 'PACKAGING', 'FINISHING', name='processstage'), nullable=True),
|
||||
sa.Column('check_time', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('checker_id', sa.String(length=100), nullable=True),
|
||||
sa.Column('quality_score', sa.Float(), nullable=False),
|
||||
sa.Column('pass_fail', sa.Boolean(), nullable=False),
|
||||
sa.Column('defect_count', sa.Integer(), nullable=False),
|
||||
sa.Column('defect_types', sa.JSON(), nullable=True),
|
||||
sa.Column('measured_weight', sa.Float(), nullable=True),
|
||||
sa.Column('measured_temperature', sa.Float(), nullable=True),
|
||||
sa.Column('measured_moisture', sa.Float(), nullable=True),
|
||||
sa.Column('measured_dimensions', sa.JSON(), nullable=True),
|
||||
sa.Column('stage_specific_data', sa.JSON(), nullable=True),
|
||||
sa.Column('target_weight', sa.Float(), nullable=True),
|
||||
sa.Column('target_temperature', sa.Float(), nullable=True),
|
||||
sa.Column('target_moisture', sa.Float(), nullable=True),
|
||||
sa.Column('tolerance_percentage', sa.Float(), nullable=True),
|
||||
sa.Column('within_tolerance', sa.Boolean(), nullable=True),
|
||||
sa.Column('corrective_action_needed', sa.Boolean(), nullable=True),
|
||||
sa.Column('corrective_actions', sa.JSON(), nullable=True),
|
||||
sa.Column('template_results', sa.JSON(), nullable=True),
|
||||
sa.Column('criteria_scores', sa.JSON(), nullable=True),
|
||||
sa.Column('check_notes', sa.Text(), nullable=True),
|
||||
sa.Column('photos_urls', sa.JSON(), nullable=True),
|
||||
sa.Column('certificate_url', sa.String(length=500), nullable=True),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_quality_checks_batch_id'), 'quality_checks', ['batch_id'], unique=False)
|
||||
op.create_index(op.f('ix_quality_checks_process_stage'), 'quality_checks', ['process_stage'], unique=False)
|
||||
op.create_index(op.f('ix_quality_checks_template_id'), 'quality_checks', ['template_id'], unique=False)
|
||||
op.create_index(op.f('ix_quality_checks_tenant_id'), 'quality_checks', ['tenant_id'], unique=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop tables in reverse order of creation
|
||||
op.drop_index(op.f('ix_quality_checks_tenant_id'), table_name='quality_checks')
|
||||
op.drop_index(op.f('ix_quality_checks_template_id'), table_name='quality_checks')
|
||||
op.drop_index(op.f('ix_quality_checks_process_stage'), table_name='quality_checks')
|
||||
op.drop_index(op.f('ix_quality_checks_batch_id'), table_name='quality_checks')
|
||||
op.drop_table('quality_checks')
|
||||
op.drop_index(op.f('ix_quality_check_templates_tenant_id'), table_name='quality_check_templates')
|
||||
op.drop_index(op.f('ix_quality_check_templates_template_code'), table_name='quality_check_templates')
|
||||
op.drop_table('quality_check_templates')
|
||||
op.drop_index(op.f('ix_production_schedules_tenant_id'), table_name='production_schedules')
|
||||
op.drop_index(op.f('ix_production_schedules_schedule_date'), table_name='production_schedules')
|
||||
op.drop_table('production_schedules')
|
||||
op.drop_index(op.f('ix_production_capacity_tenant_id'), table_name='production_capacity')
|
||||
op.drop_index(op.f('ix_production_capacity_date'), table_name='production_capacity')
|
||||
op.drop_table('production_capacity')
|
||||
op.drop_index('ix_production_batches_is_ai_assisted', table_name='production_batches')
|
||||
op.drop_index(op.f('ix_production_batches_tenant_id'), table_name='production_batches')
|
||||
op.drop_index(op.f('ix_production_batches_status'), table_name='production_batches')
|
||||
op.drop_index(op.f('ix_production_batches_product_id'), table_name='production_batches')
|
||||
op.drop_index(op.f('ix_production_batches_current_process_stage'), table_name='production_batches')
|
||||
op.drop_index(op.f('ix_production_batches_batch_number'), table_name='production_batches')
|
||||
op.drop_table('production_batches')
|
||||
op.drop_index(op.f('ix_equipment_tenant_id'), table_name='equipment')
|
||||
op.drop_table('equipment')
|
||||
op.drop_index(op.f('ix_audit_logs_user_id'), table_name='audit_logs')
|
||||
op.drop_index(op.f('ix_audit_logs_tenant_id'), table_name='audit_logs')
|
||||
op.drop_index(op.f('ix_audit_logs_severity'), table_name='audit_logs')
|
||||
op.drop_index(op.f('ix_audit_logs_service_name'), table_name='audit_logs')
|
||||
op.drop_index(op.f('ix_audit_logs_resource_type'), table_name='audit_logs')
|
||||
op.drop_index(op.f('ix_audit_logs_resource_id'), table_name='audit_logs')
|
||||
op.create_index(op.f('ix_audit_logs_created_at'), table_name='audit_logs')
|
||||
op.drop_index(op.f('ix_audit_logs_action'), table_name='audit_logs')
|
||||
op.drop_index('idx_audit_user_created', table_name='audit_logs')
|
||||
op.drop_index('idx_audit_tenant_created', table_name='audit_logs')
|
||||
op.drop_index('idx_audit_severity_created', table_name='audit_logs')
|
||||
op.drop_index('idx_audit_service_created', table_name='audit_logs')
|
||||
op.drop_index('idx_audit_resource_type_action', table_name='audit_logs')
|
||||
op.drop_table('audit_logs')
|
||||
|
||||
# Drop enum types
|
||||
op.execute("DROP TYPE IF EXISTS equipmenttype")
|
||||
op.execute("DROP TYPE IF EXISTS equipmentstatus")
|
||||
op.execute("DROP TYPE IF EXISTS productionstatus")
|
||||
op.execute("DROP TYPE IF EXISTS productionpriority")
|
||||
op.execute("DROP TYPE IF EXISTS processstage")
|
||||
@@ -0,0 +1,241 @@
|
||||
"""Add IoT equipment support
|
||||
|
||||
Revision ID: 002_add_iot_equipment_support
|
||||
Revises: 001_unified_initial_schema
|
||||
Create Date: 2025-01-12 10:00:00.000000
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '002_add_iot_equipment_support'
|
||||
down_revision = '001_unified_initial_schema'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""Add IoT connectivity fields to equipment and create sensor data tables"""
|
||||
|
||||
# Add IoT connectivity fields to equipment table
|
||||
op.add_column('equipment', sa.Column('iot_enabled', sa.Boolean(), nullable=False, server_default='false'))
|
||||
op.add_column('equipment', sa.Column('iot_protocol', sa.String(50), nullable=True))
|
||||
op.add_column('equipment', sa.Column('iot_endpoint', sa.String(500), nullable=True))
|
||||
op.add_column('equipment', sa.Column('iot_port', sa.Integer(), nullable=True))
|
||||
op.add_column('equipment', sa.Column('iot_credentials', postgresql.JSON(astext_type=sa.Text()), nullable=True))
|
||||
op.add_column('equipment', sa.Column('iot_connection_status', sa.String(50), nullable=True))
|
||||
op.add_column('equipment', sa.Column('iot_last_connected', sa.DateTime(timezone=True), nullable=True))
|
||||
op.add_column('equipment', sa.Column('iot_config', postgresql.JSON(astext_type=sa.Text()), nullable=True))
|
||||
op.add_column('equipment', sa.Column('manufacturer', sa.String(100), nullable=True))
|
||||
op.add_column('equipment', sa.Column('firmware_version', sa.String(50), nullable=True))
|
||||
|
||||
# Add real-time monitoring fields
|
||||
op.add_column('equipment', sa.Column('supports_realtime', sa.Boolean(), nullable=False, server_default='false'))
|
||||
op.add_column('equipment', sa.Column('poll_interval_seconds', sa.Integer(), nullable=True))
|
||||
|
||||
# Add sensor capability fields
|
||||
op.add_column('equipment', sa.Column('temperature_zones', sa.Integer(), nullable=True))
|
||||
op.add_column('equipment', sa.Column('supports_humidity', sa.Boolean(), nullable=False, server_default='false'))
|
||||
op.add_column('equipment', sa.Column('supports_energy_monitoring', sa.Boolean(), nullable=False, server_default='false'))
|
||||
op.add_column('equipment', sa.Column('supports_remote_control', sa.Boolean(), nullable=False, server_default='false'))
|
||||
|
||||
# Create equipment_sensor_readings table for time-series data
|
||||
op.create_table(
|
||||
'equipment_sensor_readings',
|
||||
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
|
||||
sa.Column('tenant_id', postgresql.UUID(as_uuid=True), nullable=False, index=True),
|
||||
sa.Column('equipment_id', postgresql.UUID(as_uuid=True), nullable=False, index=True),
|
||||
sa.Column('batch_id', postgresql.UUID(as_uuid=True), nullable=True, index=True),
|
||||
|
||||
# Timestamp
|
||||
sa.Column('reading_time', sa.DateTime(timezone=True), nullable=False, index=True),
|
||||
|
||||
# Temperature readings (support multiple zones)
|
||||
sa.Column('temperature', sa.Float(), nullable=True),
|
||||
sa.Column('temperature_zones', postgresql.JSON(astext_type=sa.Text()), nullable=True),
|
||||
sa.Column('target_temperature', sa.Float(), nullable=True),
|
||||
|
||||
# Humidity
|
||||
sa.Column('humidity', sa.Float(), nullable=True),
|
||||
sa.Column('target_humidity', sa.Float(), nullable=True),
|
||||
|
||||
# Energy monitoring
|
||||
sa.Column('energy_consumption_kwh', sa.Float(), nullable=True),
|
||||
sa.Column('power_current_kw', sa.Float(), nullable=True),
|
||||
|
||||
# Equipment status
|
||||
sa.Column('operational_status', sa.String(50), nullable=True),
|
||||
sa.Column('cycle_stage', sa.String(100), nullable=True),
|
||||
sa.Column('cycle_progress_percentage', sa.Float(), nullable=True),
|
||||
sa.Column('time_remaining_minutes', sa.Integer(), nullable=True),
|
||||
|
||||
# Process parameters
|
||||
sa.Column('motor_speed_rpm', sa.Float(), nullable=True),
|
||||
sa.Column('door_status', sa.String(20), nullable=True),
|
||||
sa.Column('steam_level', sa.Float(), nullable=True),
|
||||
|
||||
# Quality indicators
|
||||
sa.Column('product_weight_kg', sa.Float(), nullable=True),
|
||||
sa.Column('moisture_content', sa.Float(), nullable=True),
|
||||
|
||||
# Additional sensor data (flexible JSON for manufacturer-specific metrics)
|
||||
sa.Column('additional_sensors', postgresql.JSON(astext_type=sa.Text()), nullable=True),
|
||||
|
||||
# Data quality
|
||||
sa.Column('data_quality_score', sa.Float(), nullable=True),
|
||||
sa.Column('is_anomaly', sa.Boolean(), nullable=False, server_default='false'),
|
||||
|
||||
# Timestamps
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now()),
|
||||
|
||||
# Foreign key constraints
|
||||
sa.ForeignKeyConstraint(['equipment_id'], ['equipment.id'], ondelete='CASCADE'),
|
||||
)
|
||||
|
||||
# Create indexes for time-series queries
|
||||
op.create_index(
|
||||
'idx_sensor_readings_equipment_time',
|
||||
'equipment_sensor_readings',
|
||||
['equipment_id', 'reading_time'],
|
||||
)
|
||||
op.create_index(
|
||||
'idx_sensor_readings_batch',
|
||||
'equipment_sensor_readings',
|
||||
['batch_id', 'reading_time'],
|
||||
)
|
||||
op.create_index(
|
||||
'idx_sensor_readings_tenant_time',
|
||||
'equipment_sensor_readings',
|
||||
['tenant_id', 'reading_time'],
|
||||
)
|
||||
|
||||
# Create equipment_connection_logs table for tracking connectivity
|
||||
op.create_table(
|
||||
'equipment_connection_logs',
|
||||
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
|
||||
sa.Column('tenant_id', postgresql.UUID(as_uuid=True), nullable=False, index=True),
|
||||
sa.Column('equipment_id', postgresql.UUID(as_uuid=True), nullable=False, index=True),
|
||||
|
||||
# Connection event
|
||||
sa.Column('event_type', sa.String(50), nullable=False), # connected, disconnected, error, timeout
|
||||
sa.Column('event_time', sa.DateTime(timezone=True), nullable=False, index=True),
|
||||
|
||||
# Connection details
|
||||
sa.Column('connection_status', sa.String(50), nullable=False),
|
||||
sa.Column('protocol_used', sa.String(50), nullable=True),
|
||||
sa.Column('endpoint', sa.String(500), nullable=True),
|
||||
|
||||
# Error tracking
|
||||
sa.Column('error_message', sa.Text(), nullable=True),
|
||||
sa.Column('error_code', sa.String(50), nullable=True),
|
||||
|
||||
# Performance metrics
|
||||
sa.Column('response_time_ms', sa.Integer(), nullable=True),
|
||||
sa.Column('data_points_received', sa.Integer(), nullable=True),
|
||||
|
||||
# Additional details
|
||||
sa.Column('additional_data', postgresql.JSON(astext_type=sa.Text()), nullable=True),
|
||||
|
||||
# Timestamps
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now()),
|
||||
|
||||
# Foreign key constraints
|
||||
sa.ForeignKeyConstraint(['equipment_id'], ['equipment.id'], ondelete='CASCADE'),
|
||||
)
|
||||
|
||||
# Create index for connection logs
|
||||
op.create_index(
|
||||
'idx_connection_logs_equipment_time',
|
||||
'equipment_connection_logs',
|
||||
['equipment_id', 'event_time'],
|
||||
)
|
||||
|
||||
# Create equipment_alerts table for IoT-based alerts
|
||||
op.create_table(
|
||||
'equipment_iot_alerts',
|
||||
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
|
||||
sa.Column('tenant_id', postgresql.UUID(as_uuid=True), nullable=False, index=True),
|
||||
sa.Column('equipment_id', postgresql.UUID(as_uuid=True), nullable=False, index=True),
|
||||
sa.Column('batch_id', postgresql.UUID(as_uuid=True), nullable=True, index=True),
|
||||
|
||||
# Alert information
|
||||
sa.Column('alert_type', sa.String(50), nullable=False), # temperature_deviation, connection_lost, equipment_error
|
||||
sa.Column('severity', sa.String(20), nullable=False), # info, warning, critical
|
||||
sa.Column('alert_time', sa.DateTime(timezone=True), nullable=False, index=True),
|
||||
|
||||
# Alert details
|
||||
sa.Column('title', sa.String(255), nullable=False),
|
||||
sa.Column('message', sa.Text(), nullable=False),
|
||||
sa.Column('sensor_reading_id', postgresql.UUID(as_uuid=True), nullable=True),
|
||||
|
||||
# Threshold information
|
||||
sa.Column('threshold_value', sa.Float(), nullable=True),
|
||||
sa.Column('actual_value', sa.Float(), nullable=True),
|
||||
sa.Column('deviation_percentage', sa.Float(), nullable=True),
|
||||
|
||||
# Status tracking
|
||||
sa.Column('is_active', sa.Boolean(), nullable=False, server_default='true'),
|
||||
sa.Column('is_acknowledged', sa.Boolean(), nullable=False, server_default='false'),
|
||||
sa.Column('acknowledged_by', postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column('acknowledged_at', sa.DateTime(timezone=True), nullable=True),
|
||||
|
||||
sa.Column('is_resolved', sa.Boolean(), nullable=False, server_default='false'),
|
||||
sa.Column('resolved_by', postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column('resolved_at', sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column('resolution_notes', sa.Text(), nullable=True),
|
||||
|
||||
# Automated response
|
||||
sa.Column('auto_resolved', sa.Boolean(), nullable=False, server_default='false'),
|
||||
sa.Column('corrective_action_taken', sa.String(255), nullable=True),
|
||||
|
||||
# Additional data
|
||||
sa.Column('additional_data', postgresql.JSON(astext_type=sa.Text()), nullable=True),
|
||||
|
||||
# Timestamps
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now()),
|
||||
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.func.now(), onupdate=sa.func.now()),
|
||||
|
||||
# Foreign key constraints
|
||||
sa.ForeignKeyConstraint(['equipment_id'], ['equipment.id'], ondelete='CASCADE'),
|
||||
)
|
||||
|
||||
# Create indexes for alerts
|
||||
op.create_index(
|
||||
'idx_iot_alerts_equipment_time',
|
||||
'equipment_iot_alerts',
|
||||
['equipment_id', 'alert_time'],
|
||||
)
|
||||
op.create_index(
|
||||
'idx_iot_alerts_active',
|
||||
'equipment_iot_alerts',
|
||||
['is_active', 'is_resolved'],
|
||||
)
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""Remove IoT equipment support"""
|
||||
|
||||
# Drop tables
|
||||
op.drop_table('equipment_iot_alerts')
|
||||
op.drop_table('equipment_connection_logs')
|
||||
op.drop_table('equipment_sensor_readings')
|
||||
|
||||
# Remove columns from equipment table
|
||||
op.drop_column('equipment', 'supports_remote_control')
|
||||
op.drop_column('equipment', 'supports_energy_monitoring')
|
||||
op.drop_column('equipment', 'supports_humidity')
|
||||
op.drop_column('equipment', 'temperature_zones')
|
||||
op.drop_column('equipment', 'poll_interval_seconds')
|
||||
op.drop_column('equipment', 'supports_realtime')
|
||||
op.drop_column('equipment', 'firmware_version')
|
||||
op.drop_column('equipment', 'manufacturer')
|
||||
op.drop_column('equipment', 'iot_config')
|
||||
op.drop_column('equipment', 'iot_last_connected')
|
||||
op.drop_column('equipment', 'iot_connection_status')
|
||||
op.drop_column('equipment', 'iot_credentials')
|
||||
op.drop_column('equipment', 'iot_port')
|
||||
op.drop_column('equipment', 'iot_endpoint')
|
||||
op.drop_column('equipment', 'iot_protocol')
|
||||
op.drop_column('equipment', 'iot_enabled')
|
||||
@@ -0,0 +1,61 @@
|
||||
"""Rename metadata to additional_data
|
||||
|
||||
Revision ID: 003_rename_metadata
|
||||
Revises: 002_add_iot_equipment_support
|
||||
Create Date: 2025-01-12 21:05:00.000000
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '003_rename_metadata'
|
||||
down_revision = '002_add_iot_equipment_support'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""Rename metadata columns to additional_data to avoid SQLAlchemy reserved attribute conflict"""
|
||||
|
||||
# Check if columns need to be renamed (they may already be named additional_data in migration 002)
|
||||
from sqlalchemy import inspect
|
||||
from alembic import op
|
||||
|
||||
connection = op.get_bind()
|
||||
inspector = inspect(connection)
|
||||
|
||||
# Check equipment_connection_logs table
|
||||
if 'equipment_connection_logs' in inspector.get_table_names():
|
||||
columns = [col['name'] for col in inspector.get_columns('equipment_connection_logs')]
|
||||
if 'metadata' in columns and 'additional_data' not in columns:
|
||||
op.execute('ALTER TABLE equipment_connection_logs RENAME COLUMN metadata TO additional_data')
|
||||
|
||||
# Check equipment_iot_alerts table
|
||||
if 'equipment_iot_alerts' in inspector.get_table_names():
|
||||
columns = [col['name'] for col in inspector.get_columns('equipment_iot_alerts')]
|
||||
if 'metadata' in columns and 'additional_data' not in columns:
|
||||
op.execute('ALTER TABLE equipment_iot_alerts RENAME COLUMN metadata TO additional_data')
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""Revert column names back to metadata"""
|
||||
|
||||
# Check if columns need to be renamed back
|
||||
from sqlalchemy import inspect
|
||||
from alembic import op
|
||||
|
||||
connection = op.get_bind()
|
||||
inspector = inspect(connection)
|
||||
|
||||
# Check equipment_iot_alerts table
|
||||
if 'equipment_iot_alerts' in inspector.get_table_names():
|
||||
columns = [col['name'] for col in inspector.get_columns('equipment_iot_alerts')]
|
||||
if 'additional_data' in columns and 'metadata' not in columns:
|
||||
op.execute('ALTER TABLE equipment_iot_alerts RENAME COLUMN additional_data TO metadata')
|
||||
|
||||
# Check equipment_connection_logs table
|
||||
if 'equipment_connection_logs' in inspector.get_table_names():
|
||||
columns = [col['name'] for col in inspector.get_columns('equipment_connection_logs')]
|
||||
if 'additional_data' in columns and 'metadata' not in columns:
|
||||
op.execute('ALTER TABLE equipment_connection_logs RENAME COLUMN additional_data TO metadata')
|
||||
Reference in New Issue
Block a user