diff --git a/bakery-ia-ca.crt b/bakery-ia-ca.crt deleted file mode 100644 index 10cb2c52..00000000 --- a/bakery-ia-ca.crt +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIB9jCCAZ2gAwIBAgIRALeFt7uyrRUtqT8VC8AyOqAwCgYIKoZIzj0EAwIwWzEL -MAkGA1UEBhMCVVMxEjAQBgNVBAoTCUJha2VyeSBJQTEbMBkGA1UECxMSQmFrZXJ5 -IElBIExvY2FsIENBMRswGQYDVQQDExJiYWtlcnktaWEtbG9jYWwtY2EwHhcNMjUw -OTI4MTYzMzAxWhcNMjYwOTI4MTYzMzAxWjBbMQswCQYDVQQGEwJVUzESMBAGA1UE -ChMJQmFrZXJ5IElBMRswGQYDVQQLExJCYWtlcnkgSUEgTG9jYWwgQ0ExGzAZBgNV -BAMTEmJha2VyeS1pYS1sb2NhbC1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IA -BMvQUfoPOJxF4JWwFX+YoolhrMKMBJ7pN5roI6/puxXa3UKRuQSF17lQGqdI9MFy -oYaQJlQ9PqI5RwqZn6uAIT6jQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8E -BTADAQH/MB0GA1UdDgQWBBS5waYyMCV5bG55I8YGZSIJCioRdjAKBggqhkjOPQQD -AgNHADBEAiAckCO8A4ZHLQg0wYi8q67lLB83OVXpyJ4Y3csjKI3WogIgNtuWgJ48 -uOcW+pgMS55qTRkhZfAZXdAlhq/M2d/C6QA= ------END CERTIFICATE----- diff --git a/docs/DATABASE_INITIALIZATION.md b/docs/DATABASE_INITIALIZATION.md new file mode 100644 index 00000000..70d1eb5d --- /dev/null +++ b/docs/DATABASE_INITIALIZATION.md @@ -0,0 +1,309 @@ +# Database Initialization System + +This document explains the automatic database initialization system for the Bakery-IA microservices architecture. + +## Overview + +The system handles two main scenarios: + +1. **Production/First-time Deployment**: Automatically creates tables from SQLAlchemy models and sets up Alembic version tracking +2. **Development Workflow**: Provides easy reset capabilities to start with a clean slate + +## Key Features + +- ✅ **Automatic Table Creation**: Creates tables from SQLAlchemy models when database is empty +- ✅ **Alembic Integration**: Properly manages migration versions and history +- ✅ **Development Reset**: Easy clean-slate restart for development +- ✅ **Production Ready**: Safe for production deployments +- ✅ **All 14 Services**: Works across all microservices + +## How It Works + +### 1. Automatic Detection + +The system automatically detects the database state: + +- **Empty Database**: Creates tables from models and initializes Alembic +- **Existing Database with Alembic**: Runs pending migrations +- **Existing Database without Alembic**: Initializes Alembic on existing schema +- **Force Recreate Mode**: Drops everything and recreates (development only) + +### 2. Integration Points + +#### Service Startup +```python +# In your service main.py +class AuthService(StandardFastAPIService): + # Migration verification happens automatically during startup + pass +``` + +#### Kubernetes Migration Jobs +```yaml +# Enhanced migration jobs handle automatic table creation +containers: +- name: migrate + image: bakery/auth-service:${IMAGE_TAG} + command: ["python", "/app/scripts/run_migrations.py", "auth"] +``` + +#### Environment Variables +```bash +# Control behavior via environment variables +DB_FORCE_RECREATE=true # Force recreate tables (development) +DEVELOPMENT_MODE=true # Enable development features +``` + +## Usage Scenarios + +### 1. First-Time Production Deployment + +**What happens:** +1. Migration job detects empty database +2. Creates all tables from SQLAlchemy models +3. Stamps Alembic with the latest migration version +4. Service starts and verifies migration state + +**No manual intervention required!** + +### 2. Development - Clean Slate Reset + +**Option A: Using the Development Script** +```bash +# Reset specific service +./scripts/dev-reset-database.sh --service auth + +# Reset all services +./scripts/dev-reset-database.sh --all + +# Reset with auto-confirmation +./scripts/dev-reset-database.sh --service auth --yes +``` + +**Option B: Using the Workflow Script** +```bash +# Clean start with dev profile +./scripts/dev-workflow.sh clean --profile dev + +# Reset specific service and restart +./scripts/dev-workflow.sh reset --service auth +``` + +**Option C: Manual Environment Variable** +```bash +# Set force recreate mode +kubectl patch configmap development-config -n bakery-ia \ + --patch='{"data":{"DB_FORCE_RECREATE":"true"}}' + +# Run migration job +kubectl apply -f infrastructure/kubernetes/base/migrations/auth-migration-job.yaml +``` + +### 3. Regular Development Workflow + +```bash +# Start development environment +./scripts/dev-workflow.sh start --profile minimal + +# Check status +./scripts/dev-workflow.sh status + +# View logs for specific service +./scripts/dev-workflow.sh logs --service auth + +# Run migrations only +./scripts/dev-workflow.sh migrate --service auth +``` + +## Configuration + +### Skaffold Profiles + +The system supports different deployment profiles: + +```yaml +# skaffold.yaml profiles +profiles: + - name: minimal # Only auth and inventory + - name: full # All services + infrastructure + - name: single # Template for single service + - name: dev # Full development environment +``` + +### Environment Variables + +| Variable | Description | Default | +|----------|-------------|---------| +| `DB_FORCE_RECREATE` | Force recreate tables | `false` | +| `DEVELOPMENT_MODE` | Enable development features | `false` | +| `DEBUG_LOGGING` | Enable debug logging | `false` | +| `SKIP_MIGRATION_VERSION_CHECK` | Skip version verification | `false` | + +### Service Configuration + +Each service automatically detects its configuration: + +- **Models Module**: `services.{service}.app.models` +- **Alembic Config**: `services/{service}/alembic.ini` +- **Migration Scripts**: `services/{service}/migrations/versions/` + +## Development Workflows + +### Quick Start +```bash +# 1. Start minimal environment +./scripts/dev-workflow.sh start --profile minimal + +# 2. Reset specific service when needed +./scripts/dev-workflow.sh reset --service auth + +# 3. Clean restart when you want fresh start +./scripts/dev-workflow.sh clean --profile dev +``` + +### Database Reset Workflows + +#### Scenario 1: "I want to reset auth service only" +```bash +./scripts/dev-reset-database.sh --service auth +``` + +#### Scenario 2: "I want to start completely fresh" +```bash +./scripts/dev-reset-database.sh --all +# or +./scripts/dev-workflow.sh clean --profile dev +``` + +#### Scenario 3: "I want to reset and restart in one command" +```bash +./scripts/dev-workflow.sh reset --service auth +``` + +## Technical Details + +### Database Initialization Manager + +The core logic is in [`shared/database/init_manager.py`](shared/database/init_manager.py): + +```python +# Main initialization method +async def initialize_database(self) -> Dict[str, Any]: + # Check current database state + db_state = await self._check_database_state() + + # Handle different scenarios + if self.force_recreate: + result = await self._handle_force_recreate() + elif db_state["is_empty"]: + result = await self._handle_first_time_deployment() + # ... etc +``` + +### Migration Job Enhancement + +Migration jobs now use the enhanced runner: + +```yaml +containers: +- name: migrate + command: ["python", "/app/scripts/run_migrations.py", "auth"] + env: + - name: AUTH_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: AUTH_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: development-config + key: DB_FORCE_RECREATE +``` + +### Service Integration + +Services automatically handle table initialization during startup: + +```python +async def _handle_database_tables(self): + # Check if we're in force recreate mode + force_recreate = os.getenv("DB_FORCE_RECREATE", "false").lower() == "true" + + # Initialize database with automatic table creation + result = await initialize_service_database( + database_manager=self.database_manager, + service_name=self.service_name, + force_recreate=force_recreate + ) +``` + +## Troubleshooting + +### Common Issues + +#### 1. Migration Job Fails +```bash +# Check job logs +kubectl logs -l job-name=auth-migration -n bakery-ia + +# Check database connectivity +kubectl exec auth-db-pod -n bakery-ia -- pg_isready +``` + +#### 2. Service Won't Start +```bash +# Check service logs +kubectl logs -l app.kubernetes.io/name=auth -n bakery-ia + +# Check database state +./scripts/dev-workflow.sh status +``` + +#### 3. Tables Not Created +```bash +# Force recreate mode +./scripts/dev-reset-database.sh --service auth --yes + +# Check migration job status +kubectl get jobs -n bakery-ia +``` + +### Debugging Commands + +```bash +# Check all components +./scripts/dev-workflow.sh status + +# View specific service logs +./scripts/dev-workflow.sh logs --service auth + +# Check migration jobs +kubectl get jobs -l app.kubernetes.io/component=migration -n bakery-ia + +# Check ConfigMaps +kubectl get configmaps -n bakery-ia + +# View database pods +kubectl get pods -l app.kubernetes.io/component=database -n bakery-ia +``` + +## Benefits + +1. **Zero Manual Setup**: Tables are created automatically on first deployment +2. **Development Friendly**: Easy reset capabilities for clean development +3. **Production Safe**: Handles existing databases gracefully +4. **Alembic Compatible**: Maintains proper migration history and versioning +5. **Service Agnostic**: Works identically across all 14 microservices +6. **Kubernetes Native**: Integrates seamlessly with Kubernetes workflows + +## Migration from TODO State + +If you have existing services with TODO migrations: + +1. **Keep existing models**: Your SQLAlchemy models are the source of truth +2. **Deploy normally**: The system will create tables from models automatically +3. **Alembic versions**: Will be stamped with the latest migration version +4. **No data loss**: Existing data is preserved in production deployments + +The system eliminates the need to manually fill in TODO migration files while maintaining proper Alembic version tracking. \ No newline at end of file diff --git a/infrastructure/kubernetes/base/components/databases/alert-processor-db.yaml b/infrastructure/kubernetes/base/components/databases/alert-processor-db.yaml index cf5ab3aa..76e91cb8 100644 --- a/infrastructure/kubernetes/base/components/databases/alert-processor-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/alert-processor-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: alert-processor-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/alert-processor-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: alert-processor-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: alert-processor-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/auth-db.yaml b/infrastructure/kubernetes/base/components/databases/auth-db.yaml index 3f2d3f24..8a1fe5b4 100644 --- a/infrastructure/kubernetes/base/components/databases/auth-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/auth-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: auth-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/auth-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: auth-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: auth-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/external-db.yaml b/infrastructure/kubernetes/base/components/databases/external-db.yaml index 641ab1c8..be1ac23a 100644 --- a/infrastructure/kubernetes/base/components/databases/external-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/external-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: external-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/external-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: external-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: external-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/forecasting-db.yaml b/infrastructure/kubernetes/base/components/databases/forecasting-db.yaml index 1d002f4f..af511fba 100644 --- a/infrastructure/kubernetes/base/components/databases/forecasting-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/forecasting-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: forecasting-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/forecasting-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: forecasting-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: forecasting-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/inventory-db.yaml b/infrastructure/kubernetes/base/components/databases/inventory-db.yaml index 1054f51b..06eb141b 100644 --- a/infrastructure/kubernetes/base/components/databases/inventory-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/inventory-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: inventory-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/inventory-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: inventory-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: inventory-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/notification-db.yaml b/infrastructure/kubernetes/base/components/databases/notification-db.yaml index 5f4776cf..ce04ca04 100644 --- a/infrastructure/kubernetes/base/components/databases/notification-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/notification-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: notification-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/notification-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: notification-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: notification-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/orders-db.yaml b/infrastructure/kubernetes/base/components/databases/orders-db.yaml index 61f65c6c..b31e2035 100644 --- a/infrastructure/kubernetes/base/components/databases/orders-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/orders-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: orders-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/orders-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: orders-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: orders-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/pos-db.yaml b/infrastructure/kubernetes/base/components/databases/pos-db.yaml index 26ab44c2..ad5fa00d 100644 --- a/infrastructure/kubernetes/base/components/databases/pos-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/pos-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pos-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/pos-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: pos-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: pos-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/production-db.yaml b/infrastructure/kubernetes/base/components/databases/production-db.yaml index 1434f668..a5c1eef9 100644 --- a/infrastructure/kubernetes/base/components/databases/production-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/production-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: production-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/production-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: production-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: production-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/recipes-db.yaml b/infrastructure/kubernetes/base/components/databases/recipes-db.yaml index d3509223..ca7a9914 100644 --- a/infrastructure/kubernetes/base/components/databases/recipes-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/recipes-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: recipes-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/recipes-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: recipes-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: recipes-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/sales-db.yaml b/infrastructure/kubernetes/base/components/databases/sales-db.yaml index 20f521fc..25bb846b 100644 --- a/infrastructure/kubernetes/base/components/databases/sales-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/sales-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: sales-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/sales-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: sales-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: sales-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/suppliers-db.yaml b/infrastructure/kubernetes/base/components/databases/suppliers-db.yaml index 8a448a02..ba7f3ae4 100644 --- a/infrastructure/kubernetes/base/components/databases/suppliers-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/suppliers-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: suppliers-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/suppliers-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: suppliers-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: suppliers-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/tenant-db.yaml b/infrastructure/kubernetes/base/components/databases/tenant-db.yaml index 6f9285ae..88ad0eba 100644 --- a/infrastructure/kubernetes/base/components/databases/tenant-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/tenant-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: tenant-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/tenant-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: tenant-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: tenant-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/components/databases/training-db.yaml b/infrastructure/kubernetes/base/components/databases/training-db.yaml index 71d4e61f..abd82a53 100644 --- a/infrastructure/kubernetes/base/components/databases/training-db.yaml +++ b/infrastructure/kubernetes/base/components/databases/training-db.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: training-db-pv + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/opt/bakery-data/training-db" +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -51,6 +67,8 @@ spec: volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data + - name: init-scripts + mountPath: /docker-entrypoint-initdb.d resources: requests: memory: "256Mi" @@ -86,6 +104,9 @@ spec: - name: postgres-data persistentVolumeClaim: claimName: training-db-pvc + - name: init-scripts + configMap: + name: postgres-init-config --- apiVersion: v1 @@ -117,8 +138,9 @@ metadata: app.kubernetes.io/name: training-db app.kubernetes.io/component: database spec: + storageClassName: manual accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi \ No newline at end of file + storage: 5Gi diff --git a/infrastructure/kubernetes/base/configmap.yaml b/infrastructure/kubernetes/base/configmap.yaml index 00bfdae9..cde348dd 100644 --- a/infrastructure/kubernetes/base/configmap.yaml +++ b/infrastructure/kubernetes/base/configmap.yaml @@ -13,6 +13,9 @@ data: ENVIRONMENT: "production" DEBUG: "false" LOG_LEVEL: "INFO" + + # Database initialization settings + DB_FORCE_RECREATE: "false" BUILD_DATE: "2024-01-20T10:00:00Z" VCS_REF: "latest" IMAGE_TAG: "latest" diff --git a/infrastructure/kubernetes/base/configs/development-config.yaml b/infrastructure/kubernetes/base/configs/development-config.yaml new file mode 100644 index 00000000..2424511e --- /dev/null +++ b/infrastructure/kubernetes/base/configs/development-config.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: development-config + namespace: bakery-ia + labels: + app.kubernetes.io/component: config + app.kubernetes.io/part-of: bakery-ia + environment: development +data: + # Set to "true" to force recreate all tables from scratch (development mode) + # This will drop all existing tables and recreate them from SQLAlchemy models + DB_FORCE_RECREATE: "false" + + # Development mode flag + DEVELOPMENT_MODE: "true" + + # Enable debug logging in development + DEBUG_LOGGING: "true" + + # Skip migration version checking in development + SKIP_MIGRATION_VERSION_CHECK: "false" \ No newline at end of file diff --git a/infrastructure/kubernetes/base/configs/postgres-init-config.yaml b/infrastructure/kubernetes/base/configs/postgres-init-config.yaml new file mode 100644 index 00000000..d6693d2c --- /dev/null +++ b/infrastructure/kubernetes/base/configs/postgres-init-config.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-init-config + namespace: bakery-ia + labels: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: bakery-ia +data: + init.sql: | + CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; + CREATE EXTENSION IF NOT EXISTS "pg_stat_statements"; \ No newline at end of file diff --git a/infrastructure/kubernetes/base/kustomization.yaml b/infrastructure/kubernetes/base/kustomization.yaml index 8c930da4..8eb7f337 100644 --- a/infrastructure/kubernetes/base/kustomization.yaml +++ b/infrastructure/kubernetes/base/kustomization.yaml @@ -11,6 +11,26 @@ resources: - secrets.yaml - ingress-https.yaml + # Additional configs + - configs/postgres-init-config.yaml + - configs/development-config.yaml + + # Migration jobs + - migrations/auth-migration-job.yaml + - migrations/tenant-migration-job.yaml + - migrations/training-migration-job.yaml + - migrations/forecasting-migration-job.yaml + - migrations/sales-migration-job.yaml + - migrations/external-migration-job.yaml + - migrations/notification-migration-job.yaml + - migrations/inventory-migration-job.yaml + - migrations/recipes-migration-job.yaml + - migrations/suppliers-migration-job.yaml + - migrations/pos-migration-job.yaml + - migrations/orders-migration-job.yaml + - migrations/production-migration-job.yaml + - migrations/alert-processor-migration-job.yaml + # Infrastructure components - components/databases/redis.yaml - components/databases/rabbitmq.yaml diff --git a/infrastructure/kubernetes/base/migrations/alert-processor-migration-job.yaml b/infrastructure/kubernetes/base/migrations/alert-processor-migration-job.yaml new file mode 100644 index 00000000..497e98cd --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/alert-processor-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for alert-processor service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: alert-processor-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: alert-processor-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: alert-processor-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h alert-processor-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/alert-processor:dev + command: ["python", "/app/scripts/run_migrations.py", "alert_processor"] + env: + - name: ALERT_PROCESSOR_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: ALERT_PROCESSOR_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/auth-migration-job.yaml b/infrastructure/kubernetes/base/migrations/auth-migration-job.yaml new file mode 100644 index 00000000..a018a07e --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/auth-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for auth service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: auth-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: auth-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: auth-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h auth-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/auth-service:dev + command: ["python", "/app/scripts/run_migrations.py", "auth"] + env: + - name: AUTH_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: AUTH_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/external-migration-job.yaml b/infrastructure/kubernetes/base/migrations/external-migration-job.yaml new file mode 100644 index 00000000..33ca382a --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/external-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for external service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: external-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: external-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: external-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h external-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/external-service:dev + command: ["python", "/app/scripts/run_migrations.py", "external"] + env: + - name: EXTERNAL_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: EXTERNAL_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/forecasting-migration-job.yaml b/infrastructure/kubernetes/base/migrations/forecasting-migration-job.yaml new file mode 100644 index 00000000..64ce40ee --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/forecasting-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for forecasting service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: forecasting-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: forecasting-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: forecasting-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h forecasting-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/forecasting-service:dev + command: ["python", "/app/scripts/run_migrations.py", "forecasting"] + env: + - name: FORECASTING_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: FORECASTING_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/inventory-migration-job.yaml b/infrastructure/kubernetes/base/migrations/inventory-migration-job.yaml new file mode 100644 index 00000000..06598d56 --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/inventory-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for inventory service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: inventory-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: inventory-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: inventory-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h inventory-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/inventory-service:dev + command: ["python", "/app/scripts/run_migrations.py", "inventory"] + env: + - name: INVENTORY_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: INVENTORY_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/notification-migration-job.yaml b/infrastructure/kubernetes/base/migrations/notification-migration-job.yaml new file mode 100644 index 00000000..e3328f42 --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/notification-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for notification service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: notification-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: notification-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: notification-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h notification-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/notification-service:dev + command: ["python", "/app/scripts/run_migrations.py", "notification"] + env: + - name: NOTIFICATION_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: NOTIFICATION_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/orders-migration-job.yaml b/infrastructure/kubernetes/base/migrations/orders-migration-job.yaml new file mode 100644 index 00000000..8e1a6ed9 --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/orders-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for orders service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: orders-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: orders-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: orders-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h orders-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/orders-service:dev + command: ["python", "/app/scripts/run_migrations.py", "orders"] + env: + - name: ORDERS_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: ORDERS_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/pos-migration-job.yaml b/infrastructure/kubernetes/base/migrations/pos-migration-job.yaml new file mode 100644 index 00000000..20865c1c --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/pos-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for pos service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: pos-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: pos-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: pos-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h pos-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/pos-service:dev + command: ["python", "/app/scripts/run_migrations.py", "pos"] + env: + - name: POS_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: POS_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/production-migration-job.yaml b/infrastructure/kubernetes/base/migrations/production-migration-job.yaml new file mode 100644 index 00000000..048e5264 --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/production-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for production service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: production-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: production-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: production-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h production-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/production-service:dev + command: ["python", "/app/scripts/run_migrations.py", "production"] + env: + - name: PRODUCTION_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: PRODUCTION_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/recipes-migration-job.yaml b/infrastructure/kubernetes/base/migrations/recipes-migration-job.yaml new file mode 100644 index 00000000..ac36f2a5 --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/recipes-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for recipes service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: recipes-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: recipes-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: recipes-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h recipes-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/recipes-service:dev + command: ["python", "/app/scripts/run_migrations.py", "recipes"] + env: + - name: RECIPES_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: RECIPES_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/sales-migration-job.yaml b/infrastructure/kubernetes/base/migrations/sales-migration-job.yaml new file mode 100644 index 00000000..43e77e96 --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/sales-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for sales service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: sales-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: sales-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: sales-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h sales-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/sales-service:dev + command: ["python", "/app/scripts/run_migrations.py", "sales"] + env: + - name: SALES_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: SALES_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/suppliers-migration-job.yaml b/infrastructure/kubernetes/base/migrations/suppliers-migration-job.yaml new file mode 100644 index 00000000..34503d73 --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/suppliers-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for suppliers service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: suppliers-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: suppliers-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: suppliers-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h suppliers-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/suppliers-service:dev + command: ["python", "/app/scripts/run_migrations.py", "suppliers"] + env: + - name: SUPPLIERS_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: SUPPLIERS_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/tenant-migration-job.yaml b/infrastructure/kubernetes/base/migrations/tenant-migration-job.yaml new file mode 100644 index 00000000..e7029d6d --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/tenant-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for tenant service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: tenant-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: tenant-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: tenant-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h tenant-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/tenant-service:dev + command: ["python", "/app/scripts/run_migrations.py", "tenant"] + env: + - name: TENANT_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: TENANT_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/kubernetes/base/migrations/training-migration-job.yaml b/infrastructure/kubernetes/base/migrations/training-migration-job.yaml new file mode 100644 index 00000000..3ce549b1 --- /dev/null +++ b/infrastructure/kubernetes/base/migrations/training-migration-job.yaml @@ -0,0 +1,55 @@ +# Enhanced migration job for training service with automatic table creation +apiVersion: batch/v1 +kind: Job +metadata: + name: training-migration + namespace: bakery-ia + labels: + app.kubernetes.io/name: training-migration + app.kubernetes.io/component: migration + app.kubernetes.io/part-of: bakery-ia +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: training-migration + app.kubernetes.io/component: migration + spec: + initContainers: + - name: wait-for-db + image: postgres:15-alpine + command: ["sh", "-c", "until pg_isready -h training-db-service -p 5432; do sleep 2; done"] + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + containers: + - name: migrate + image: bakery/training-service:dev + command: ["python", "/app/scripts/run_migrations.py", "training"] + env: + - name: TRAINING_DATABASE_URL + valueFrom: + secretKeyRef: + name: database-secrets + key: TRAINING_DATABASE_URL + - name: DB_FORCE_RECREATE + valueFrom: + configMapKeyRef: + name: bakery-config + key: DB_FORCE_RECREATE + optional: true + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + restartPolicy: OnFailure diff --git a/infrastructure/scripts/alert-processor_backup.sh b/infrastructure/scripts/alert-processor_backup.sh new file mode 100755 index 00000000..5ad4d068 --- /dev/null +++ b/infrastructure/scripts/alert-processor_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for alert-processor database +set -e + +SERVICE_NAME="alert-processor" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.ALERT_PROCESSOR_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.ALERT_PROCESSOR_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=alert-processor-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find alert-processor database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/alert-processor_restore.sh b/infrastructure/scripts/alert-processor_restore.sh new file mode 100755 index 00000000..61436385 --- /dev/null +++ b/infrastructure/scripts/alert-processor_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for alert-processor database +set -e + +SERVICE_NAME="alert-processor" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.ALERT_PROCESSOR_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.ALERT_PROCESSOR_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=alert-processor-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find alert-processor database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/alert-processor_seed.sh b/infrastructure/scripts/alert-processor_seed.sh new file mode 100755 index 00000000..ad2a29ae --- /dev/null +++ b/infrastructure/scripts/alert-processor_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for alert-processor database +set -e + +SERVICE_NAME="alert-processor" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.ALERT_PROCESSOR_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.ALERT_PROCESSOR_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=alert-processor-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find alert-processor database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for alert-processor service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your alert-processor service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/auth_backup.sh b/infrastructure/scripts/auth_backup.sh new file mode 100755 index 00000000..2e0719b7 --- /dev/null +++ b/infrastructure/scripts/auth_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for auth database +set -e + +SERVICE_NAME="auth" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.AUTH_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.AUTH_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=auth-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find auth database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/auth_restore.sh b/infrastructure/scripts/auth_restore.sh new file mode 100755 index 00000000..96773fdb --- /dev/null +++ b/infrastructure/scripts/auth_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for auth database +set -e + +SERVICE_NAME="auth" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.AUTH_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.AUTH_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=auth-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find auth database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/auth_seed.sh b/infrastructure/scripts/auth_seed.sh new file mode 100755 index 00000000..b250c88e --- /dev/null +++ b/infrastructure/scripts/auth_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for auth database +set -e + +SERVICE_NAME="auth" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.AUTH_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.AUTH_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=auth-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find auth database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for auth service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your auth service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/external_backup.sh b/infrastructure/scripts/external_backup.sh new file mode 100755 index 00000000..d0788fc8 --- /dev/null +++ b/infrastructure/scripts/external_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for external database +set -e + +SERVICE_NAME="external" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.EXTERNAL_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.EXTERNAL_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=external-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find external database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/external_restore.sh b/infrastructure/scripts/external_restore.sh new file mode 100755 index 00000000..b05edeb1 --- /dev/null +++ b/infrastructure/scripts/external_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for external database +set -e + +SERVICE_NAME="external" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.EXTERNAL_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.EXTERNAL_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=external-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find external database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/external_seed.sh b/infrastructure/scripts/external_seed.sh new file mode 100755 index 00000000..b27a0767 --- /dev/null +++ b/infrastructure/scripts/external_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for external database +set -e + +SERVICE_NAME="external" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.EXTERNAL_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.EXTERNAL_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=external-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find external database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for external service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your external service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/forecasting_backup.sh b/infrastructure/scripts/forecasting_backup.sh new file mode 100755 index 00000000..19edf3d7 --- /dev/null +++ b/infrastructure/scripts/forecasting_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for forecasting database +set -e + +SERVICE_NAME="forecasting" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.FORECASTING_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.FORECASTING_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=forecasting-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find forecasting database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/forecasting_restore.sh b/infrastructure/scripts/forecasting_restore.sh new file mode 100755 index 00000000..db7f1ed3 --- /dev/null +++ b/infrastructure/scripts/forecasting_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for forecasting database +set -e + +SERVICE_NAME="forecasting" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.FORECASTING_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.FORECASTING_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=forecasting-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find forecasting database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/forecasting_seed.sh b/infrastructure/scripts/forecasting_seed.sh new file mode 100755 index 00000000..681c2a63 --- /dev/null +++ b/infrastructure/scripts/forecasting_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for forecasting database +set -e + +SERVICE_NAME="forecasting" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.FORECASTING_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.FORECASTING_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=forecasting-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find forecasting database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for forecasting service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your forecasting service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/inventory_backup.sh b/infrastructure/scripts/inventory_backup.sh new file mode 100755 index 00000000..b74614c6 --- /dev/null +++ b/infrastructure/scripts/inventory_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for inventory database +set -e + +SERVICE_NAME="inventory" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.INVENTORY_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.INVENTORY_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=inventory-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find inventory database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/inventory_restore.sh b/infrastructure/scripts/inventory_restore.sh new file mode 100755 index 00000000..847e395a --- /dev/null +++ b/infrastructure/scripts/inventory_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for inventory database +set -e + +SERVICE_NAME="inventory" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.INVENTORY_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.INVENTORY_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=inventory-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find inventory database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/inventory_seed.sh b/infrastructure/scripts/inventory_seed.sh new file mode 100755 index 00000000..b5005747 --- /dev/null +++ b/infrastructure/scripts/inventory_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for inventory database +set -e + +SERVICE_NAME="inventory" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.INVENTORY_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.INVENTORY_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=inventory-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find inventory database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for inventory service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your inventory service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/notification_backup.sh b/infrastructure/scripts/notification_backup.sh new file mode 100755 index 00000000..e07ed5c2 --- /dev/null +++ b/infrastructure/scripts/notification_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for notification database +set -e + +SERVICE_NAME="notification" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.NOTIFICATION_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.NOTIFICATION_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=notification-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find notification database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/notification_restore.sh b/infrastructure/scripts/notification_restore.sh new file mode 100755 index 00000000..9d497fb3 --- /dev/null +++ b/infrastructure/scripts/notification_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for notification database +set -e + +SERVICE_NAME="notification" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.NOTIFICATION_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.NOTIFICATION_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=notification-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find notification database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/notification_seed.sh b/infrastructure/scripts/notification_seed.sh new file mode 100755 index 00000000..2d4cc13a --- /dev/null +++ b/infrastructure/scripts/notification_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for notification database +set -e + +SERVICE_NAME="notification" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.NOTIFICATION_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.NOTIFICATION_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=notification-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find notification database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for notification service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your notification service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/orders_backup.sh b/infrastructure/scripts/orders_backup.sh new file mode 100755 index 00000000..02400e42 --- /dev/null +++ b/infrastructure/scripts/orders_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for orders database +set -e + +SERVICE_NAME="orders" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.ORDERS_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.ORDERS_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=orders-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find orders database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/orders_restore.sh b/infrastructure/scripts/orders_restore.sh new file mode 100755 index 00000000..c9329e1b --- /dev/null +++ b/infrastructure/scripts/orders_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for orders database +set -e + +SERVICE_NAME="orders" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.ORDERS_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.ORDERS_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=orders-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find orders database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/orders_seed.sh b/infrastructure/scripts/orders_seed.sh new file mode 100755 index 00000000..9892827d --- /dev/null +++ b/infrastructure/scripts/orders_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for orders database +set -e + +SERVICE_NAME="orders" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.ORDERS_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.ORDERS_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=orders-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find orders database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for orders service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your orders service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/pos_backup.sh b/infrastructure/scripts/pos_backup.sh new file mode 100755 index 00000000..2995701d --- /dev/null +++ b/infrastructure/scripts/pos_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for pos database +set -e + +SERVICE_NAME="pos" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.POS_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.POS_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=pos-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find pos database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/pos_restore.sh b/infrastructure/scripts/pos_restore.sh new file mode 100755 index 00000000..62e84d08 --- /dev/null +++ b/infrastructure/scripts/pos_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for pos database +set -e + +SERVICE_NAME="pos" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.POS_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.POS_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=pos-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find pos database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/pos_seed.sh b/infrastructure/scripts/pos_seed.sh new file mode 100755 index 00000000..1f528b5f --- /dev/null +++ b/infrastructure/scripts/pos_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for pos database +set -e + +SERVICE_NAME="pos" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.POS_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.POS_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=pos-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find pos database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for pos service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your pos service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/production_backup.sh b/infrastructure/scripts/production_backup.sh new file mode 100755 index 00000000..e0b889b0 --- /dev/null +++ b/infrastructure/scripts/production_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for production database +set -e + +SERVICE_NAME="production" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.PRODUCTION_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.PRODUCTION_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=production-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find production database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/production_restore.sh b/infrastructure/scripts/production_restore.sh new file mode 100755 index 00000000..115675ad --- /dev/null +++ b/infrastructure/scripts/production_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for production database +set -e + +SERVICE_NAME="production" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.PRODUCTION_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.PRODUCTION_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=production-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find production database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/production_seed.sh b/infrastructure/scripts/production_seed.sh new file mode 100755 index 00000000..b8923bfe --- /dev/null +++ b/infrastructure/scripts/production_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for production database +set -e + +SERVICE_NAME="production" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.PRODUCTION_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.PRODUCTION_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=production-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find production database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for production service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your production service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/recipes_backup.sh b/infrastructure/scripts/recipes_backup.sh new file mode 100755 index 00000000..ac8f2bf1 --- /dev/null +++ b/infrastructure/scripts/recipes_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for recipes database +set -e + +SERVICE_NAME="recipes" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.RECIPES_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.RECIPES_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=recipes-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find recipes database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/recipes_restore.sh b/infrastructure/scripts/recipes_restore.sh new file mode 100755 index 00000000..19fe3c74 --- /dev/null +++ b/infrastructure/scripts/recipes_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for recipes database +set -e + +SERVICE_NAME="recipes" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.RECIPES_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.RECIPES_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=recipes-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find recipes database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/recipes_seed.sh b/infrastructure/scripts/recipes_seed.sh new file mode 100755 index 00000000..b253eec9 --- /dev/null +++ b/infrastructure/scripts/recipes_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for recipes database +set -e + +SERVICE_NAME="recipes" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.RECIPES_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.RECIPES_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=recipes-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find recipes database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for recipes service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your recipes service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/sales_backup.sh b/infrastructure/scripts/sales_backup.sh new file mode 100755 index 00000000..82411d01 --- /dev/null +++ b/infrastructure/scripts/sales_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for sales database +set -e + +SERVICE_NAME="sales" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.SALES_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.SALES_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=sales-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find sales database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/sales_restore.sh b/infrastructure/scripts/sales_restore.sh new file mode 100755 index 00000000..a40a9e0a --- /dev/null +++ b/infrastructure/scripts/sales_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for sales database +set -e + +SERVICE_NAME="sales" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.SALES_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.SALES_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=sales-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find sales database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/sales_seed.sh b/infrastructure/scripts/sales_seed.sh new file mode 100755 index 00000000..ba488fa5 --- /dev/null +++ b/infrastructure/scripts/sales_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for sales database +set -e + +SERVICE_NAME="sales" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.SALES_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.SALES_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=sales-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find sales database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for sales service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your sales service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/suppliers_backup.sh b/infrastructure/scripts/suppliers_backup.sh new file mode 100755 index 00000000..0626553b --- /dev/null +++ b/infrastructure/scripts/suppliers_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for suppliers database +set -e + +SERVICE_NAME="suppliers" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.SUPPLIERS_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.SUPPLIERS_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=suppliers-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find suppliers database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/suppliers_restore.sh b/infrastructure/scripts/suppliers_restore.sh new file mode 100755 index 00000000..148f7d1d --- /dev/null +++ b/infrastructure/scripts/suppliers_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for suppliers database +set -e + +SERVICE_NAME="suppliers" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.SUPPLIERS_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.SUPPLIERS_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=suppliers-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find suppliers database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/suppliers_seed.sh b/infrastructure/scripts/suppliers_seed.sh new file mode 100755 index 00000000..9df347b8 --- /dev/null +++ b/infrastructure/scripts/suppliers_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for suppliers database +set -e + +SERVICE_NAME="suppliers" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.SUPPLIERS_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.SUPPLIERS_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=suppliers-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find suppliers database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for suppliers service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your suppliers service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/tenant_backup.sh b/infrastructure/scripts/tenant_backup.sh new file mode 100755 index 00000000..3323cf04 --- /dev/null +++ b/infrastructure/scripts/tenant_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for tenant database +set -e + +SERVICE_NAME="tenant" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.TENANT_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.TENANT_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=tenant-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find tenant database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/tenant_restore.sh b/infrastructure/scripts/tenant_restore.sh new file mode 100755 index 00000000..8427eadc --- /dev/null +++ b/infrastructure/scripts/tenant_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for tenant database +set -e + +SERVICE_NAME="tenant" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.TENANT_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.TENANT_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=tenant-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find tenant database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/tenant_seed.sh b/infrastructure/scripts/tenant_seed.sh new file mode 100755 index 00000000..c6e36b3a --- /dev/null +++ b/infrastructure/scripts/tenant_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for tenant database +set -e + +SERVICE_NAME="tenant" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.TENANT_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.TENANT_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=tenant-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find tenant database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for tenant service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your tenant service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/infrastructure/scripts/training_backup.sh b/infrastructure/scripts/training_backup.sh new file mode 100755 index 00000000..e3bf37e7 --- /dev/null +++ b/infrastructure/scripts/training_backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Backup script for training database +set -e + +SERVICE_NAME="training" +BACKUP_DIR="${BACKUP_DIR:-./backups}" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +BACKUP_FILE="${BACKUP_DIR}/${SERVICE_NAME}_backup_${TIMESTAMP}.sql" + +# Create backup directory if it doesn't exist +mkdir -p "$BACKUP_DIR" + +echo "Starting backup for $SERVICE_NAME database..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.TRAINING_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.TRAINING_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=training-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find training database pod" + exit 1 +fi + +echo "Backing up to: $BACKUP_FILE" +kubectl exec "$POD_NAME" -n bakery-ia -- pg_dump -U "$DB_USER" "$DB_NAME" > "$BACKUP_FILE" + +if [ $? -eq 0 ]; then + echo "Backup completed successfully: $BACKUP_FILE" + # Compress the backup + gzip "$BACKUP_FILE" + echo "Backup compressed: ${BACKUP_FILE}.gz" +else + echo "Backup failed" + exit 1 +fi diff --git a/infrastructure/scripts/training_restore.sh b/infrastructure/scripts/training_restore.sh new file mode 100755 index 00000000..3d771141 --- /dev/null +++ b/infrastructure/scripts/training_restore.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Restore script for training database +set -e + +SERVICE_NAME="training" +BACKUP_FILE="$1" + +if [ -z "$BACKUP_FILE" ]; then + echo "Usage: $0 " + echo "Example: $0 ./backups/${SERVICE_NAME}_backup_20240101_120000.sql" + exit 1 +fi + +if [ ! -f "$BACKUP_FILE" ]; then + echo "Error: Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "Starting restore for $SERVICE_NAME database from: $BACKUP_FILE" + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.TRAINING_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.TRAINING_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=training-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find training database pod" + exit 1 +fi + +# Check if file is compressed +if [[ "$BACKUP_FILE" == *.gz ]]; then + echo "Decompressing backup file..." + zcat "$BACKUP_FILE" | kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" +else + kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$BACKUP_FILE" +fi + +if [ $? -eq 0 ]; then + echo "Restore completed successfully" +else + echo "Restore failed" + exit 1 +fi diff --git a/infrastructure/scripts/training_seed.sh b/infrastructure/scripts/training_seed.sh new file mode 100755 index 00000000..cbc0eabe --- /dev/null +++ b/infrastructure/scripts/training_seed.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Seeding script for training database +set -e + +SERVICE_NAME="training" +SEED_FILE="${SEED_FILE:-infrastructure/scripts/seeds/${SERVICE_NAME}_seed.sql}" + +echo "Starting database seeding for $SERVICE_NAME..." + +# Get database credentials from Kubernetes secrets +DB_USER=$(kubectl get secret database-secrets -n bakery-ia -o jsonpath='{.data.TRAINING_DB_USER}' | base64 -d) +DB_NAME=$(kubectl get configmap bakery-config -n bakery-ia -o jsonpath='{.data.TRAINING_DB_NAME}') + +# Get the pod name +POD_NAME=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/name=training-db -o jsonpath='{.items[0].metadata.name}') + +if [ -z "$POD_NAME" ]; then + echo "Error: Could not find training database pod" + exit 1 +fi + +# Check if seed file exists +if [ ! -f "$SEED_FILE" ]; then + echo "Warning: Seed file not found: $SEED_FILE" + echo "Creating sample seed file..." + + mkdir -p "infrastructure/scripts/seeds" + cat > "$SEED_FILE" << 'SEED_EOF' +-- Sample seed data for training service +-- Add your seed data here + +-- Example: +-- INSERT INTO sample_table (name, created_at) VALUES +-- ('Sample Data 1', NOW()), +-- ('Sample Data 2', NOW()); + +-- Note: Replace with actual seed data for your training service +SELECT 'Seed file created. Please add your seed data.' as message; +SEED_EOF + + echo "Sample seed file created at: $SEED_FILE" + echo "Please edit this file to add your actual seed data" + exit 0 +fi + +echo "Applying seed data from: $SEED_FILE" +kubectl exec -i "$POD_NAME" -n bakery-ia -- psql -U "$DB_USER" "$DB_NAME" < "$SEED_FILE" + +if [ $? -eq 0 ]; then + echo "Seeding completed successfully" +else + echo "Seeding failed" + exit 1 +fi diff --git a/scripts/dev-reset-database.sh b/scripts/dev-reset-database.sh new file mode 100755 index 00000000..83ee5394 --- /dev/null +++ b/scripts/dev-reset-database.sh @@ -0,0 +1,231 @@ +#!/bin/bash + +# Development Database Reset Script +# +# This script helps developers reset their databases to a clean slate. +# It can reset individual services or all services at once. + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Configuration +NAMESPACE="bakery-ia" +SERVICES=("alert-processor" "auth" "external" "forecasting" "inventory" "notification" "orders" "pos" "production" "recipes" "sales" "suppliers" "tenant" "training") + +print_banner() { + echo -e "${BLUE}" + echo "╔═══════════════════════════════════════════════════════════════╗" + echo "║ Bakery-IA Development Database Reset ║" + echo "║ ║" + echo "║ This script will reset database(s) to a clean slate ║" + echo "║ WARNING: This will delete all existing data! ║" + echo "╚═══════════════════════════════════════════════════════════════╝" + echo -e "${NC}" +} + +show_usage() { + echo "Usage: $0 [OPTIONS] [SERVICE]" + echo "" + echo "Options:" + echo " -a, --all Reset all services" + echo " -s, --service NAME Reset specific service" + echo " -l, --list List available services" + echo " -y, --yes Skip confirmation prompts" + echo " -h, --help Show this help" + echo "" + echo "Examples:" + echo " $0 --service auth # Reset only auth service" + echo " $0 --all # Reset all services" + echo " $0 auth # Reset auth service (short form)" +} + +list_services() { + echo -e "${YELLOW}Available services:${NC}" + for service in "${SERVICES[@]}"; do + echo " - $service" + done +} + +confirm_action() { + local service="$1" + local message="${2:-Are you sure you want to reset}" + + if [[ "$SKIP_CONFIRM" == "true" ]]; then + return 0 + fi + + echo -e "${YELLOW}$message the database for service: ${RED}$service${YELLOW}?${NC}" + echo -e "${RED}This will delete ALL existing data!${NC}" + read -p "Type 'yes' to continue: " confirmation + + if [[ "$confirmation" != "yes" ]]; then + echo -e "${YELLOW}Operation cancelled.${NC}" + return 1 + fi + return 0 +} + +enable_force_recreate() { + echo -e "${BLUE}Enabling force recreate mode...${NC}" + + # Update the development config + kubectl patch configmap development-config -n "$NAMESPACE" \ + --patch='{"data":{"DB_FORCE_RECREATE":"true"}}' 2>/dev/null || \ + kubectl create configmap development-config -n "$NAMESPACE" \ + --from-literal=DB_FORCE_RECREATE=true \ + --from-literal=DEVELOPMENT_MODE=true \ + --from-literal=DEBUG_LOGGING=true || true +} + +disable_force_recreate() { + echo -e "${BLUE}Disabling force recreate mode...${NC}" + + kubectl patch configmap development-config -n "$NAMESPACE" \ + --patch='{"data":{"DB_FORCE_RECREATE":"false"}}' 2>/dev/null || true +} + +reset_service() { + local service="$1" + echo -e "${BLUE}Resetting database for service: $service${NC}" + + # Delete existing migration job if it exists + kubectl delete job "${service}-migration" -n "$NAMESPACE" 2>/dev/null || true + + # Wait a moment for cleanup + sleep 2 + + # Create new migration job + echo -e "${YELLOW}Creating migration job for $service...${NC}" + kubectl apply -f "infrastructure/kubernetes/base/migrations/${service}-migration-job.yaml" + + # Wait for job to complete + echo -e "${YELLOW}Waiting for migration to complete...${NC}" + kubectl wait --for=condition=complete job/"${service}-migration" -n "$NAMESPACE" --timeout=300s + + # Check job status + if kubectl get job "${service}-migration" -n "$NAMESPACE" -o jsonpath='{.status.succeeded}' | grep -q "1"; then + echo -e "${GREEN}✓ Database reset completed successfully for $service${NC}" + else + echo -e "${RED}✗ Database reset failed for $service${NC}" + echo "Check logs with: kubectl logs -l job-name=${service}-migration -n $NAMESPACE" + return 1 + fi +} + +reset_all_services() { + echo -e "${BLUE}Resetting databases for all services...${NC}" + + local failed_services=() + + for service in "${SERVICES[@]}"; do + echo -e "\n${BLUE}Processing $service...${NC}" + if ! reset_service "$service"; then + failed_services+=("$service") + fi + done + + if [[ ${#failed_services[@]} -eq 0 ]]; then + echo -e "\n${GREEN}✓ All services reset successfully!${NC}" + else + echo -e "\n${RED}✗ Some services failed to reset:${NC}" + for service in "${failed_services[@]}"; do + echo -e " ${RED}- $service${NC}" + done + return 1 + fi +} + +cleanup_migration_jobs() { + echo -e "${BLUE}Cleaning up migration jobs...${NC}" + kubectl delete jobs -l app.kubernetes.io/component=migration -n "$NAMESPACE" 2>/dev/null || true +} + +main() { + local action="" + local target_service="" + + # Parse arguments + while [[ $# -gt 0 ]]; do + case $1 in + -a|--all) + action="all" + shift + ;; + -s|--service) + action="service" + target_service="$2" + shift 2 + ;; + -l|--list) + list_services + exit 0 + ;; + -y|--yes) + SKIP_CONFIRM="true" + shift + ;; + -h|--help) + show_usage + exit 0 + ;; + *) + if [[ -z "$action" && -z "$target_service" ]]; then + action="service" + target_service="$1" + fi + shift + ;; + esac + done + + print_banner + + # Validate arguments + if [[ -z "$action" ]]; then + echo -e "${RED}Error: No action specified${NC}" + show_usage + exit 1 + fi + + if [[ "$action" == "service" && -z "$target_service" ]]; then + echo -e "${RED}Error: Service name required${NC}" + show_usage + exit 1 + fi + + if [[ "$action" == "service" ]]; then + # Validate service name + if [[ ! " ${SERVICES[*]} " =~ " ${target_service} " ]]; then + echo -e "${RED}Error: Invalid service name: $target_service${NC}" + list_services + exit 1 + fi + fi + + # Execute action + case "$action" in + "all") + if confirm_action "ALL SERVICES" "Are you sure you want to reset ALL databases? This will affect"; then + enable_force_recreate + trap disable_force_recreate EXIT + reset_all_services + fi + ;; + "service") + if confirm_action "$target_service"; then + enable_force_recreate + trap disable_force_recreate EXIT + reset_service "$target_service" + fi + ;; + esac +} + +# Run main function +main "$@" \ No newline at end of file diff --git a/scripts/dev-workflow.sh b/scripts/dev-workflow.sh new file mode 100755 index 00000000..208edca8 --- /dev/null +++ b/scripts/dev-workflow.sh @@ -0,0 +1,235 @@ +#!/bin/bash + +# Development Workflow Script for Bakery-IA +# +# This script provides common development workflows with database management + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +show_usage() { + echo "Development Workflow Script for Bakery-IA" + echo "" + echo "Usage: $0 [COMMAND] [OPTIONS]" + echo "" + echo "Commands:" + echo " start Start development environment" + echo " reset Reset database(s) and restart" + echo " clean Clean start (drop all data)" + echo " migrate Run migrations only" + echo " logs Show service logs" + echo " status Show deployment status" + echo "" + echo "Options:" + echo " --service NAME Target specific service (default: all)" + echo " --profile NAME Use specific Skaffold profile (minimal, full, dev)" + echo " --clean-slate Force recreate all tables" + echo " --help Show this help" + echo "" + echo "Examples:" + echo " $0 start --profile minimal # Start with minimal services" + echo " $0 reset --service auth # Reset auth service only" + echo " $0 clean --profile dev # Clean start with dev profile" +} + +start_development() { + local profile="${1:-dev}" + local clean_slate="${2:-false}" + + echo -e "${BLUE}Starting development environment with profile: $profile${NC}" + + if [[ "$clean_slate" == "true" ]]; then + echo -e "${YELLOW}Enabling clean slate mode...${NC}" + kubectl create configmap development-config --dry-run=client -o yaml \ + --from-literal=DB_FORCE_RECREATE=true \ + --from-literal=DEVELOPMENT_MODE=true \ + --from-literal=DEBUG_LOGGING=true | \ + kubectl apply -f - + fi + + # Start with Skaffold + echo -e "${BLUE}Starting Skaffold with profile: $profile${NC}" + skaffold dev --profile="$profile" --port-forward +} + +reset_service_and_restart() { + local service="$1" + local profile="${2:-dev}" + + echo -e "${BLUE}Resetting service: $service${NC}" + + # Reset the database + ./scripts/dev-reset-database.sh --service "$service" --yes + + # Restart the deployment + kubectl rollout restart deployment "${service}-service" -n bakery-ia 2>/dev/null || \ + kubectl rollout restart deployment "$service" -n bakery-ia 2>/dev/null || true + + echo -e "${GREEN}Service $service reset and restarted${NC}" +} + +clean_start() { + local profile="${1:-dev}" + + echo -e "${YELLOW}Performing clean start...${NC}" + + # Stop existing Skaffold process + pkill -f "skaffold" || true + + # Clean up all deployments + kubectl delete jobs -l app.kubernetes.io/component=migration -n bakery-ia 2>/dev/null || true + + # Wait a moment + sleep 2 + + # Start with clean slate + start_development "$profile" "true" +} + +run_migrations() { + local service="$1" + + if [[ -n "$service" ]]; then + echo -e "${BLUE}Running migration for service: $service${NC}" + kubectl delete job "${service}-migration" -n bakery-ia 2>/dev/null || true + kubectl apply -f "infrastructure/kubernetes/base/migrations/${service}-migration-job.yaml" + kubectl wait --for=condition=complete job/"${service}-migration" -n bakery-ia --timeout=300s + else + echo -e "${BLUE}Running migrations for all services${NC}" + kubectl delete jobs -l app.kubernetes.io/component=migration -n bakery-ia 2>/dev/null || true + kubectl apply -f infrastructure/kubernetes/base/migrations/ + # Wait for all migration jobs + for job in $(kubectl get jobs -l app.kubernetes.io/component=migration -n bakery-ia -o name); do + kubectl wait --for=condition=complete "$job" -n bakery-ia --timeout=300s + done + fi + + echo -e "${GREEN}Migrations completed${NC}" +} + +show_logs() { + local service="$1" + + if [[ -n "$service" ]]; then + echo -e "${BLUE}Showing logs for service: $service${NC}" + kubectl logs -l app.kubernetes.io/name="${service}" -n bakery-ia --tail=100 -f + else + echo -e "${BLUE}Available services for logs:${NC}" + kubectl get deployments -n bakery-ia -o custom-columns="NAME:.metadata.name" + fi +} + +show_status() { + echo -e "${BLUE}Deployment Status:${NC}" + echo "" + + echo -e "${YELLOW}Pods:${NC}" + kubectl get pods -n bakery-ia + + echo "" + echo -e "${YELLOW}Services:${NC}" + kubectl get services -n bakery-ia + + echo "" + echo -e "${YELLOW}Jobs:${NC}" + kubectl get jobs -n bakery-ia + + echo "" + echo -e "${YELLOW}ConfigMaps:${NC}" + kubectl get configmaps -n bakery-ia +} + +main() { + local command="" + local service="" + local profile="dev" + local clean_slate="false" + + # Parse arguments + while [[ $# -gt 0 ]]; do + case $1 in + start|reset|clean|migrate|logs|status) + command="$1" + shift + ;; + --service) + service="$2" + shift 2 + ;; + --profile) + profile="$2" + shift 2 + ;; + --clean-slate) + clean_slate="true" + shift + ;; + --help) + show_usage + exit 0 + ;; + *) + if [[ -z "$command" ]]; then + command="$1" + fi + shift + ;; + esac + done + + if [[ -z "$command" ]]; then + show_usage + exit 1 + fi + + case "$command" in + "start") + start_development "$profile" "$clean_slate" + ;; + "reset") + if [[ -n "$service" ]]; then + reset_service_and_restart "$service" "$profile" + else + echo -e "${RED}Error: --service required for reset command${NC}" + exit 1 + fi + ;; + "clean") + clean_start "$profile" + ;; + "migrate") + run_migrations "$service" + ;; + "logs") + show_logs "$service" + ;; + "status") + show_status + ;; + *) + echo -e "${RED}Error: Unknown command: $command${NC}" + show_usage + exit 1 + ;; + esac +} + +# Check if kubectl and skaffold are available +if ! command -v kubectl &> /dev/null; then + echo -e "${RED}Error: kubectl is not installed or not in PATH${NC}" + exit 1 +fi + +if ! command -v skaffold &> /dev/null; then + echo -e "${RED}Error: skaffold is not installed or not in PATH${NC}" + exit 1 +fi + +# Run main function +main "$@" \ No newline at end of file diff --git a/scripts/run_migrations.py b/scripts/run_migrations.py new file mode 100755 index 00000000..ae24296c --- /dev/null +++ b/scripts/run_migrations.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +""" +Enhanced Migration Runner + +Handles automatic table creation and Alembic migrations for Kubernetes deployments. +Supports both first-time deployments and incremental migrations. +""" + +import os +import sys +import asyncio +import argparse +import structlog +from pathlib import Path + +# Add the project root to the Python path +project_root = Path(__file__).parent.parent +sys.path.insert(0, str(project_root)) + +from shared.database.base import DatabaseManager +from shared.database.init_manager import initialize_service_database + +# Configure logging +structlog.configure( + processors=[ + structlog.stdlib.filter_by_level, + structlog.stdlib.add_logger_name, + structlog.stdlib.add_log_level, + structlog.stdlib.PositionalArgumentsFormatter(), + structlog.processors.TimeStamper(fmt="iso"), + structlog.processors.StackInfoRenderer(), + structlog.processors.format_exc_info, + structlog.processors.JSONRenderer() + ], + context_class=dict, + logger_factory=structlog.stdlib.LoggerFactory(), + wrapper_class=structlog.stdlib.BoundLogger, + cache_logger_on_first_use=True, +) + +logger = structlog.get_logger() + + +async def run_service_migration(service_name: str, force_recreate: bool = False) -> bool: + """ + Run migration for a specific service + + Args: + service_name: Name of the service (e.g., 'auth', 'inventory') + force_recreate: Whether to force recreate tables (development mode) + + Returns: + True if successful, False otherwise + """ + logger.info("Starting migration for service", service=service_name, force_recreate=force_recreate) + + try: + # Get database URL from environment (try both constructed and direct approaches) + db_url_key = f"{service_name.upper().replace('-', '_')}_DATABASE_URL" + database_url = os.getenv(db_url_key) or os.getenv("DATABASE_URL") + + # If no direct URL, construct from components + if not database_url: + host = os.getenv("POSTGRES_HOST") + port = os.getenv("POSTGRES_PORT") + db_name = os.getenv("POSTGRES_DB") + user = os.getenv("POSTGRES_USER") + password = os.getenv("POSTGRES_PASSWORD") + + if all([host, port, db_name, user, password]): + database_url = f"postgresql+asyncpg://{user}:{password}@{host}:{port}/{db_name}" + logger.info("Constructed database URL from components", host=host, port=port, db=db_name) + else: + logger.error("Database connection details not found", + db_url_key=db_url_key, + host=bool(host), + port=bool(port), + db=bool(db_name), + user=bool(user), + password=bool(password)) + return False + + # Create database manager + db_manager = DatabaseManager(database_url=database_url) + + # Initialize the database + result = await initialize_service_database( + database_manager=db_manager, + service_name=service_name, + force_recreate=force_recreate + ) + + logger.info("Migration completed successfully", service=service_name, result=result) + return True + + except Exception as e: + logger.error("Migration failed", service=service_name, error=str(e)) + return False + + finally: + # Cleanup database connections + try: + await db_manager.close_connections() + except: + pass + + +async def main(): + """Main migration runner""" + parser = argparse.ArgumentParser(description="Enhanced Migration Runner") + parser.add_argument("service", help="Service name (e.g., auth, inventory)") + parser.add_argument("--force-recreate", action="store_true", + help="Force recreate tables (development mode)") + parser.add_argument("--verbose", "-v", action="store_true", help="Verbose logging") + + args = parser.parse_args() + + if args.verbose: + logger.info("Starting migration runner", service=args.service, + force_recreate=args.force_recreate) + + # Run the migration + success = await run_service_migration(args.service, args.force_recreate) + + if success: + logger.info("Migration runner completed successfully") + sys.exit(0) + else: + logger.error("Migration runner failed") + sys.exit(1) + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/services/alert_processor/alembic.ini b/services/alert_processor/alembic.ini index 5871f0f2..903ed566 100644 --- a/services/alert_processor/alembic.ini +++ b/services/alert_processor/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/alert_processor/migrations/env.py b/services/alert_processor/migrations/env.py index 9a6237f3..6f0f1a56 100644 --- a/services/alert_processor/migrations/env.py +++ b/services/alert_processor/migrations/env.py @@ -35,8 +35,23 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +database_url = os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/auth/Dockerfile b/services/auth/Dockerfile index 6699139b..1849bcfe 100644 --- a/services/auth/Dockerfile +++ b/services/auth/Dockerfile @@ -26,6 +26,9 @@ COPY --from=shared /shared /app/shared # Copy application code COPY services/auth/ . +# Copy scripts directory +COPY scripts/ /app/scripts/ + # Add shared libraries to Python path ENV PYTHONPATH="/app:/app/shared:${PYTHONPATH:-}" diff --git a/services/auth/migrations/alembic.ini b/services/auth/alembic.ini similarity index 98% rename from services/auth/migrations/alembic.ini rename to services/auth/alembic.ini index f6521104..d821d389 100644 --- a/services/auth/migrations/alembic.ini +++ b/services/auth/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/auth/app/main.py b/services/auth/app/main.py index 72c3295d..9e77dfee 100644 --- a/services/auth/app/main.py +++ b/services/auth/app/main.py @@ -3,6 +3,7 @@ Authentication Service Main Application """ from fastapi import FastAPI +from sqlalchemy import text from app.core.config import settings from app.core.database import database_manager from app.api import auth, users, onboarding @@ -13,6 +14,27 @@ from shared.service_base import StandardFastAPIService class AuthService(StandardFastAPIService): """Authentication Service with standardized setup""" + expected_migration_version = "001_initial_auth" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks auth_expected_tables = [ diff --git a/services/auth/migrations/env.py b/services/auth/migrations/env.py index 2d1b0dd6..be4b98e9 100644 --- a/services/auth/migrations/env.py +++ b/services/auth/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('AUTH_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/external/Dockerfile b/services/external/Dockerfile index 6818ef47..e59a43e4 100644 --- a/services/external/Dockerfile +++ b/services/external/Dockerfile @@ -20,6 +20,9 @@ COPY shared/ /app/shared/ # Copy application code COPY services/external/app/ /app/app/ +# Copy scripts directory +COPY scripts/ /app/scripts/ + # Set Python path to include shared modules ENV PYTHONPATH=/app diff --git a/services/external/alembic.ini b/services/external/alembic.ini index 64eb10a8..db5d7816 100644 --- a/services/external/alembic.ini +++ b/services/external/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/external/app/main.py b/services/external/app/main.py index 5e9b332f..869796c5 100644 --- a/services/external/app/main.py +++ b/services/external/app/main.py @@ -4,6 +4,7 @@ External Service Main Application """ from fastapi import FastAPI +from sqlalchemy import text from app.core.config import settings from app.core.database import database_manager from app.services.messaging import setup_messaging, cleanup_messaging @@ -16,6 +17,27 @@ from app.api.traffic import router as traffic_router class ExternalService(StandardFastAPIService): """External Data Service with standardized setup""" + expected_migration_version = "001_initial_external" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks external_expected_tables = [ diff --git a/services/external/migrations/env.py b/services/external/migrations/env.py index 4ff8c2c2..50b1dc42 100644 --- a/services/external/migrations/env.py +++ b/services/external/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('EXTERNAL_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/forecasting/alembic.ini b/services/forecasting/alembic.ini index a9c686cc..29ac23a3 100644 --- a/services/forecasting/alembic.ini +++ b/services/forecasting/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/forecasting/app/main.py b/services/forecasting/app/main.py index 0d8fbdd3..1138724a 100644 --- a/services/forecasting/app/main.py +++ b/services/forecasting/app/main.py @@ -7,6 +7,7 @@ Demand prediction and forecasting service for bakery operations """ from fastapi import FastAPI +from sqlalchemy import text from app.core.config import settings from app.core.database import database_manager from app.api import forecasts, predictions @@ -18,6 +19,27 @@ from shared.service_base import StandardFastAPIService class ForecastingService(StandardFastAPIService): """Forecasting Service with standardized setup""" + expected_migration_version = "001_initial_forecasting" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks forecasting_expected_tables = [ diff --git a/services/forecasting/migrations/env.py b/services/forecasting/migrations/env.py index 1c68f88c..a98f4d40 100644 --- a/services/forecasting/migrations/env.py +++ b/services/forecasting/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('FORECASTING_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/forecasting/migrations/env.py.bak b/services/forecasting/migrations/env.py.bak new file mode 100644 index 00000000..c96e5786 --- /dev/null +++ b/services/forecasting/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for forecasting service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('FORECASTING_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/inventory/Dockerfile b/services/inventory/Dockerfile index dd5ee4fe..cc843296 100644 --- a/services/inventory/Dockerfile +++ b/services/inventory/Dockerfile @@ -19,6 +19,9 @@ COPY shared/ /app/shared/ # Copy application code COPY services/inventory/app/ /app/app/ +# Copy scripts directory +COPY scripts/ /app/scripts/ + # Set Python path to include shared modules ENV PYTHONPATH=/app diff --git a/services/inventory/alembic.ini b/services/inventory/alembic.ini index d3ef91f4..812a1d67 100644 --- a/services/inventory/alembic.ini +++ b/services/inventory/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/inventory/app/main.py b/services/inventory/app/main.py index 178ed7c7..3b3b11eb 100644 --- a/services/inventory/app/main.py +++ b/services/inventory/app/main.py @@ -5,6 +5,7 @@ Inventory Service FastAPI Application import os from fastapi import FastAPI +from sqlalchemy import text # Import core modules from app.core.config import settings @@ -21,6 +22,27 @@ from app.api.food_safety import router as food_safety_router class InventoryService(StandardFastAPIService): """Inventory Service with standardized setup""" + expected_migration_version = "001_initial_inventory" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks inventory_expected_tables = [ diff --git a/services/inventory/migrations/alembic.ini b/services/inventory/migrations/alembic.ini deleted file mode 100644 index ed8d8e58..00000000 --- a/services/inventory/migrations/alembic.ini +++ /dev/null @@ -1,93 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = . - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# sys.path path, will be prepended to sys.path if present. -# defaults to the current working directory. -prepend_sys_path = . - -# timezone to use when rendering the date within the migration file -# as well as the filename. -# If specified, requires the python-dateutil library that can be -# installed by adding `alembic[tz]` to the pip requirements -# string value is passed to dateutil.tz.gettz() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the -# "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version number format -# Uses Alembic datetime format -version_num_format = %%(year)d%%(month).2d%%(day).2d_%%(hour).2d%%(minute).2d_%%(second).2d - -# version name format -version_path_separator = / - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -sqlalchemy.url = postgresql+asyncpg://inventory_user:inventory_pass123@inventory-db:5432/inventory_db - - -[post_write_hooks] -# post_write_hooks defines scripts or Python functions that are run -# on newly generated revision scripts. See the documentation for further -# detail and examples - -# format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks = black -# black.type = console_scripts -# black.entrypoint = black -# black.options = -l 79 REVISION_SCRIPT_FILENAME - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S \ No newline at end of file diff --git a/services/inventory/migrations/env.py b/services/inventory/migrations/env.py index bc39b566..7d97cab4 100644 --- a/services/inventory/migrations/env.py +++ b/services/inventory/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('INVENTORY_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/inventory/migrations/env.py.bak b/services/inventory/migrations/env.py.bak new file mode 100644 index 00000000..796835e6 --- /dev/null +++ b/services/inventory/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for inventory service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('INVENTORY_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/notification/alembic.ini b/services/notification/alembic.ini index 3c5f1a23..3a12028a 100644 --- a/services/notification/alembic.ini +++ b/services/notification/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/notification/app/main.py b/services/notification/app/main.py index 1295e7aa..fad076ab 100644 --- a/services/notification/app/main.py +++ b/services/notification/app/main.py @@ -7,6 +7,7 @@ Handles email, WhatsApp notifications and SSE for real-time alerts/recommendatio """ from fastapi import FastAPI +from sqlalchemy import text from app.core.config import settings from app.core.database import database_manager from app.api.notifications import router as notification_router @@ -22,6 +23,27 @@ from shared.service_base import StandardFastAPIService class NotificationService(StandardFastAPIService): """Notification Service with standardized setup""" + expected_migration_version = "001_initial_notification" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks notification_expected_tables = [ diff --git a/services/notification/migrations/env.py b/services/notification/migrations/env.py index 5dd75343..a5984e15 100644 --- a/services/notification/migrations/env.py +++ b/services/notification/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('NOTIFICATION_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/notification/migrations/env.py.bak b/services/notification/migrations/env.py.bak new file mode 100644 index 00000000..28987455 --- /dev/null +++ b/services/notification/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for notification service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('NOTIFICATION_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/orders/alembic.ini b/services/orders/alembic.ini index 4657e396..fc28923e 100644 --- a/services/orders/alembic.ini +++ b/services/orders/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/orders/app/main.py b/services/orders/app/main.py index 9e090c65..ba5dee99 100644 --- a/services/orders/app/main.py +++ b/services/orders/app/main.py @@ -7,6 +7,7 @@ Customer orders and procurement planning service """ from fastapi import FastAPI, Request +from sqlalchemy import text from app.core.config import settings from app.core.database import database_manager from app.api.orders import router as orders_router @@ -18,6 +19,27 @@ from shared.service_base import StandardFastAPIService class OrdersService(StandardFastAPIService): """Orders Service with standardized setup""" + expected_migration_version = "001_initial_orders" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks orders_expected_tables = [ diff --git a/services/orders/migrations/env.py b/services/orders/migrations/env.py index 23899741..b791ad0e 100644 --- a/services/orders/migrations/env.py +++ b/services/orders/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('ORDERS_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/orders/migrations/env.py.bak b/services/orders/migrations/env.py.bak new file mode 100644 index 00000000..dbede570 --- /dev/null +++ b/services/orders/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for orders service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('ORDERS_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/pos/alembic.ini b/services/pos/alembic.ini index 3bbc9c16..39a61e50 100644 --- a/services/pos/alembic.ini +++ b/services/pos/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/pos/app/main.py b/services/pos/app/main.py index 1c2372ad..5d728fa9 100644 --- a/services/pos/app/main.py +++ b/services/pos/app/main.py @@ -5,6 +5,7 @@ Handles integration with external POS systems (Square, Toast, Lightspeed) import time from fastapi import FastAPI, Request +from sqlalchemy import text from app.core.config import settings from app.api import pos_config, webhooks, sync from app.core.database import database_manager @@ -14,6 +15,27 @@ from shared.service_base import StandardFastAPIService class POSService(StandardFastAPIService): """POS Integration Service with standardized setup""" + expected_migration_version = "001_initial_pos" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks pos_expected_tables = [ diff --git a/services/pos/migrations/env.py b/services/pos/migrations/env.py index 3b816318..273706c5 100644 --- a/services/pos/migrations/env.py +++ b/services/pos/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('POS_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/pos/migrations/env.py.bak b/services/pos/migrations/env.py.bak new file mode 100644 index 00000000..abd6484c --- /dev/null +++ b/services/pos/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for pos service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('POS_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/production/alembic.ini b/services/production/alembic.ini index 930545ae..48183b60 100644 --- a/services/production/alembic.ini +++ b/services/production/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/production/app/main.py b/services/production/app/main.py index 8adfd758..b35a0a73 100644 --- a/services/production/app/main.py +++ b/services/production/app/main.py @@ -8,6 +8,7 @@ Production planning and batch management service import time from fastapi import FastAPI, Request +from sqlalchemy import text from app.core.config import settings from app.core.database import database_manager from app.api.production import router as production_router @@ -18,6 +19,27 @@ from shared.service_base import StandardFastAPIService class ProductionService(StandardFastAPIService): """Production Service with standardized setup""" + expected_migration_version = "001_initial_production" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks production_expected_tables = [ diff --git a/services/production/migrations/env.py b/services/production/migrations/env.py index 3c93d7da..9f80b66b 100644 --- a/services/production/migrations/env.py +++ b/services/production/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('PRODUCTION_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/production/migrations/env.py.bak b/services/production/migrations/env.py.bak new file mode 100644 index 00000000..f0ff6df3 --- /dev/null +++ b/services/production/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for production service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('PRODUCTION_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/recipes/alembic.ini b/services/recipes/alembic.ini index 0c5fb1b1..31416bc8 100644 --- a/services/recipes/alembic.ini +++ b/services/recipes/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/recipes/app/main.py b/services/recipes/app/main.py index ee3e1f1c..5a188c59 100644 --- a/services/recipes/app/main.py +++ b/services/recipes/app/main.py @@ -6,6 +6,7 @@ Handles recipe management, production planning, and inventory consumption tracki import time from fastapi import FastAPI, Request +from sqlalchemy import text from fastapi.middleware.gzip import GZipMiddleware from .core.config import settings @@ -19,6 +20,27 @@ from .models import recipes as recipe_models class RecipesService(StandardFastAPIService): """Recipes Service with standardized setup""" + expected_migration_version = "001_initial_recipes" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks recipes_expected_tables = [ diff --git a/services/recipes/migrations/env.py b/services/recipes/migrations/env.py index 9b745e99..eec898fc 100644 --- a/services/recipes/migrations/env.py +++ b/services/recipes/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('RECIPES_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/recipes/migrations/env.py.bak b/services/recipes/migrations/env.py.bak new file mode 100644 index 00000000..8611c9dd --- /dev/null +++ b/services/recipes/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for recipes service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('RECIPES_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/sales/alembic.ini b/services/sales/alembic.ini index 19270109..c2ee72e5 100644 --- a/services/sales/alembic.ini +++ b/services/sales/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/sales/app/main.py b/services/sales/app/main.py index 2b6129fd..2b55df95 100644 --- a/services/sales/app/main.py +++ b/services/sales/app/main.py @@ -4,6 +4,7 @@ Sales Service Main Application """ from fastapi import FastAPI +from sqlalchemy import text from app.core.config import settings from app.core.database import database_manager from shared.service_base import StandardFastAPIService @@ -15,6 +16,27 @@ from app.api.import_data import router as import_router class SalesService(StandardFastAPIService): """Sales Service with standardized setup""" + expected_migration_version = "001_initial_sales" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks sales_expected_tables = ['sales_data', 'sales_import_jobs'] diff --git a/services/sales/migrations/env.py b/services/sales/migrations/env.py index 8471c8e6..48860b98 100644 --- a/services/sales/migrations/env.py +++ b/services/sales/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('SALES_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/sales/migrations/env.py.bak b/services/sales/migrations/env.py.bak new file mode 100644 index 00000000..8a54ba85 --- /dev/null +++ b/services/sales/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for sales service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('SALES_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/suppliers/alembic.ini b/services/suppliers/alembic.ini index 2bdb187b..4a891bba 100644 --- a/services/suppliers/alembic.ini +++ b/services/suppliers/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/suppliers/app/main.py b/services/suppliers/app/main.py index 2fc2ea07..56dd9ed8 100644 --- a/services/suppliers/app/main.py +++ b/services/suppliers/app/main.py @@ -5,6 +5,7 @@ Supplier & Procurement Service FastAPI Application import os from fastapi import FastAPI +from sqlalchemy import text from app.core.config import settings from app.core.database import database_manager from app.api import suppliers, purchase_orders, deliveries @@ -16,6 +17,27 @@ from app.api.performance import router as performance_router class SuppliersService(StandardFastAPIService): """Suppliers Service with standardized setup""" + expected_migration_version = "001_initial_suppliers" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks suppliers_expected_tables = [ diff --git a/services/suppliers/migrations/env.py b/services/suppliers/migrations/env.py index d4e0f96b..34510858 100644 --- a/services/suppliers/migrations/env.py +++ b/services/suppliers/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('SUPPLIERS_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/suppliers/migrations/env.py.bak b/services/suppliers/migrations/env.py.bak new file mode 100644 index 00000000..2e73c44a --- /dev/null +++ b/services/suppliers/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for suppliers service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('SUPPLIERS_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/tenant/alembic.ini b/services/tenant/alembic.ini index 64ebc77e..bf8d0db8 100644 --- a/services/tenant/alembic.ini +++ b/services/tenant/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/tenant/app/main.py b/services/tenant/app/main.py index b446a7e5..6ddf9997 100644 --- a/services/tenant/app/main.py +++ b/services/tenant/app/main.py @@ -4,6 +4,7 @@ Tenant Service FastAPI application """ from fastapi import FastAPI +from sqlalchemy import text from app.core.config import settings from app.core.database import database_manager from app.api import tenants, subscriptions, webhooks @@ -13,6 +14,27 @@ from shared.service_base import StandardFastAPIService class TenantService(StandardFastAPIService): """Tenant Service with standardized setup""" + expected_migration_version = "001_initial_tenant" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks tenant_expected_tables = ['tenants', 'tenant_members', 'subscriptions'] diff --git a/services/tenant/migrations/env.py b/services/tenant/migrations/env.py index 7f463c4e..8ed2456b 100644 --- a/services/tenant/migrations/env.py +++ b/services/tenant/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('TENANT_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/tenant/migrations/env.py.bak b/services/tenant/migrations/env.py.bak new file mode 100644 index 00000000..de8fa14b --- /dev/null +++ b/services/tenant/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for tenant service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('TENANT_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/services/training/alembic.ini b/services/training/alembic.ini index 98351a83..f48da60c 100644 --- a/services/training/alembic.ini +++ b/services/training/alembic.ini @@ -29,7 +29,7 @@ revision_environment = false sourceless = false # version of a migration file's filename format -version_num_format = %s +version_num_format = %%s # version path separator version_path_separator = os diff --git a/services/training/app/main.py b/services/training/app/main.py index 7dbbc922..389e2c34 100644 --- a/services/training/app/main.py +++ b/services/training/app/main.py @@ -8,6 +8,7 @@ ML training service for bakery demand forecasting import asyncio from fastapi import FastAPI, Request +from sqlalchemy import text from app.core.config import settings from app.core.database import initialize_training_database, cleanup_training_database, database_manager from app.api import training, models @@ -19,6 +20,27 @@ from shared.service_base import StandardFastAPIService class TrainingService(StandardFastAPIService): """Training Service with standardized setup""" + expected_migration_version = "001_initial_training" + + async def on_startup(self, app): + """Custom startup logic including migration verification""" + await self.verify_migrations() + await super().on_startup(app) + + async def verify_migrations(self): + """Verify database schema matches the latest migrations.""" + try: + async with self.database_manager.get_session() as session: + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + if version != self.expected_migration_version: + self.logger.error(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + raise RuntimeError(f"Migration version mismatch: expected {self.expected_migration_version}, got {version}") + self.logger.info(f"Migration verification successful: {version}") + except Exception as e: + self.logger.error(f"Migration verification failed: {e}") + raise + def __init__(self): # Define expected database tables for health checks training_expected_tables = [ diff --git a/services/training/migrations/env.py b/services/training/migrations/env.py index 68d380ac..abf67d81 100644 --- a/services/training/migrations/env.py +++ b/services/training/migrations/env.py @@ -35,8 +35,24 @@ except ImportError as e: # this is the Alembic Config object config = context.config -# Set database URL from settings if not already set -database_url = os.getenv('DATABASE_URL') or getattr(settings, 'DATABASE_URL', None) +# Set database URL from environment variables or settings +# Try service-specific DATABASE_URL first, then fall back to generic +database_url = os.getenv('TRAINING_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + if database_url: config.set_main_option("sqlalchemy.url", database_url) diff --git a/services/training/migrations/env.py.bak b/services/training/migrations/env.py.bak new file mode 100644 index 00000000..44cc00a7 --- /dev/null +++ b/services/training/migrations/env.py.bak @@ -0,0 +1,111 @@ +"""Alembic environment configuration for training service""" + +import asyncio +import logging +import os +import sys +from logging.config import fileConfig +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config +from alembic import context + +# Add the service directory to the Python path +service_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if service_path not in sys.path: + sys.path.insert(0, service_path) + +# Add shared modules to path +shared_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "shared")) +if shared_path not in sys.path: + sys.path.insert(0, shared_path) + +try: + from app.core.config import settings + from shared.database.base import Base + + # Import all models to ensure they are registered with Base.metadata + from app.models import * # Import all models + +except ImportError as e: + print(f"Import error in migrations env.py: {e}") + print(f"Current Python path: {sys.path}") + raise + +# this is the Alembic Config object +config = context.config + +# Set database URL from environment variables or settings +database_url = os.getenv('TRAINING_DATABASE_URL') or os.getenv('DATABASE_URL') + +# If DATABASE_URL is not set, construct from individual components +if not database_url: + postgres_host = os.getenv('POSTGRES_HOST') + postgres_port = os.getenv('POSTGRES_PORT', '5432') + postgres_db = os.getenv('POSTGRES_DB') + postgres_user = os.getenv('POSTGRES_USER') + postgres_password = os.getenv('POSTGRES_PASSWORD') + + if all([postgres_host, postgres_db, postgres_user, postgres_password]): + database_url = f"postgresql+asyncpg://{postgres_user}:{postgres_password}@{postgres_host}:{postgres_port}/{postgres_db}" + else: + # Fallback to settings + database_url = getattr(settings, 'DATABASE_URL', None) + +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + +# Interpret the config file for Python logging +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Set target metadata +target_metadata = Base.metadata + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + compare_server_default=True, + ) + + with context.begin_transaction(): + context.run_migrations() + +async def run_async_migrations() -> None: + """Run migrations in 'online' mode.""" + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + asyncio.run(run_async_migrations()) + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/shared/database/init_manager.py b/shared/database/init_manager.py new file mode 100644 index 00000000..93291ad2 --- /dev/null +++ b/shared/database/init_manager.py @@ -0,0 +1,405 @@ +""" +Database Initialization Manager + +Handles automatic table creation and Alembic integration for both: +1. Production deployments (first-time table creation) +2. Development workflows (reset to clean slate) +""" + +import os +import asyncio +import structlog +from typing import Optional, List, Dict, Any +from pathlib import Path +from sqlalchemy import text, inspect +from sqlalchemy.ext.asyncio import AsyncSession +from alembic.config import Config +from alembic import command +from alembic.runtime.migration import MigrationContext +from alembic.script import ScriptDirectory + +from .base import DatabaseManager, Base + +logger = structlog.get_logger() + + +class DatabaseInitManager: + """ + Manages database initialization with support for: + - Automatic table creation from SQLAlchemy models + - Alembic integration and version management + - Development reset capabilities + """ + + def __init__( + self, + database_manager: DatabaseManager, + service_name: str, + alembic_ini_path: Optional[str] = None, + models_module: Optional[str] = None, + force_recreate: bool = False + ): + self.database_manager = database_manager + self.service_name = service_name + self.alembic_ini_path = alembic_ini_path + self.models_module = models_module + self.force_recreate = force_recreate + self.logger = logger.bind(service=service_name) + + async def initialize_database(self) -> Dict[str, Any]: + """ + Main initialization method that handles all scenarios: + 1. Check if database exists and has tables + 2. Create tables if needed (first-time deployment) + 3. Handle Alembic version management + 4. Support development reset scenarios + """ + self.logger.info("Starting database initialization") + + try: + # Check current database state + db_state = await self._check_database_state() + self.logger.info("Database state checked", state=db_state) + + # Handle different initialization scenarios + if self.force_recreate: + result = await self._handle_force_recreate() + elif db_state["is_empty"]: + result = await self._handle_first_time_deployment() + elif db_state["has_alembic_version"]: + result = await self._handle_existing_database_with_alembic() + else: + result = await self._handle_existing_database_without_alembic() + + self.logger.info("Database initialization completed", result=result) + return result + + except Exception as e: + self.logger.error("Database initialization failed", error=str(e)) + raise + + async def _check_database_state(self) -> Dict[str, Any]: + """Check the current state of the database""" + state = { + "is_empty": False, + "has_alembic_version": False, + "existing_tables": [], + "alembic_version": None + } + + try: + async with self.database_manager.get_session() as session: + # Check if database has any tables + inspector = await self._get_inspector(session) + existing_tables = await self._get_existing_tables(inspector) + state["existing_tables"] = existing_tables + state["is_empty"] = len(existing_tables) == 0 + + # Check if alembic_version table exists and has version + if "alembic_version" in existing_tables: + state["has_alembic_version"] = True + result = await session.execute(text("SELECT version_num FROM alembic_version")) + version = result.scalar() + state["alembic_version"] = version + + except Exception as e: + self.logger.warning("Error checking database state", error=str(e)) + state["is_empty"] = True + + return state + + async def _handle_first_time_deployment(self) -> Dict[str, Any]: + """Handle first-time deployment: create tables and set up Alembic""" + self.logger.info("Handling first-time deployment") + + # Import models to ensure they're registered with Base + if self.models_module: + await self._import_models() + + # Create all tables from SQLAlchemy models + await self._create_tables_from_models() + + # Initialize Alembic and stamp with the latest version + if self.alembic_ini_path and os.path.exists(self.alembic_ini_path): + await self._initialize_alembic() + + return { + "action": "first_time_deployment", + "tables_created": True, + "alembic_initialized": True, + "message": "Database initialized for first-time deployment" + } + + async def _handle_force_recreate(self) -> Dict[str, Any]: + """Handle development scenario: drop everything and recreate""" + self.logger.info("Handling force recreate (development mode)") + + # Drop all tables + await self._drop_all_tables() + + # Create tables from models + if self.models_module: + await self._import_models() + + await self._create_tables_from_models() + + # Re-initialize Alembic + if self.alembic_ini_path and os.path.exists(self.alembic_ini_path): + await self._initialize_alembic() + + return { + "action": "force_recreate", + "tables_dropped": True, + "tables_created": True, + "alembic_reinitialized": True, + "message": "Database recreated from scratch (development mode)" + } + + async def _handle_existing_database_with_alembic(self) -> Dict[str, Any]: + """Handle existing database with Alembic version management""" + self.logger.info("Handling existing database with Alembic") + + # Run pending migrations + if self.alembic_ini_path and os.path.exists(self.alembic_ini_path): + await self._run_migrations() + + return { + "action": "existing_with_alembic", + "migrations_run": True, + "message": "Existing database updated with pending migrations" + } + + async def _handle_existing_database_without_alembic(self) -> Dict[str, Any]: + """Handle existing database without Alembic (legacy scenario)""" + self.logger.info("Handling existing database without Alembic") + + # Initialize Alembic on existing database + if self.alembic_ini_path and os.path.exists(self.alembic_ini_path): + await self._initialize_alembic_on_existing() + + return { + "action": "existing_without_alembic", + "alembic_initialized": True, + "message": "Alembic initialized on existing database" + } + + async def _import_models(self): + """Import models module to ensure models are registered with Base""" + try: + import importlib + import sys + from pathlib import Path + + # Add project root to Python path if not already there + project_root = Path(__file__).parent.parent.parent + if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + + # Try to import the models module + try: + importlib.import_module(self.models_module) + self.logger.info("Models imported successfully", module=self.models_module) + except ImportError as import_error: + # Try alternative import path (from app directly) + alt_module = f"app.models" + self.logger.warning("Primary import failed, trying alternative", + primary=self.models_module, + alternative=alt_module, + error=str(import_error)) + + # Change working directory to service directory + import os + original_cwd = os.getcwd() + service_dir = project_root / "services" / self.service_name + + if service_dir.exists(): + os.chdir(service_dir) + try: + importlib.import_module(alt_module) + self.logger.info("Models imported with alternative path", module=alt_module) + finally: + os.chdir(original_cwd) + else: + raise import_error + + except Exception as e: + self.logger.error("Failed to import models", module=self.models_module, error=str(e)) + # Don't raise for now, continue without models import + self.logger.warning("Continuing without models import - tables may not be created") + + async def _create_tables_from_models(self): + """Create all tables from registered SQLAlchemy models""" + try: + async with self.database_manager.async_engine.begin() as conn: + await conn.run_sync(Base.metadata.create_all) + self.logger.info("Tables created from SQLAlchemy models") + except Exception as e: + self.logger.error("Failed to create tables from models", error=str(e)) + raise + + async def _drop_all_tables(self): + """Drop all tables (for development reset)""" + try: + async with self.database_manager.async_engine.begin() as conn: + await conn.run_sync(Base.metadata.drop_all) + self.logger.info("All tables dropped") + except Exception as e: + self.logger.error("Failed to drop tables", error=str(e)) + raise + + async def _initialize_alembic(self): + """Initialize Alembic and stamp with latest version""" + try: + def run_alembic_init(): + # Create Alembic config + alembic_cfg = Config(self.alembic_ini_path) + + # Get the latest revision + script_dir = ScriptDirectory.from_config(alembic_cfg) + latest_revision = script_dir.get_current_head() + + if latest_revision: + # Stamp the database with the latest revision + command.stamp(alembic_cfg, latest_revision) + return latest_revision + return None + + # Run Alembic operations in async executor + latest_revision = await asyncio.get_event_loop().run_in_executor(None, run_alembic_init) + + if latest_revision: + self.logger.info("Alembic initialized and stamped", revision=latest_revision) + else: + self.logger.warning("No Alembic revisions found") + + except Exception as e: + self.logger.error("Failed to initialize Alembic", error=str(e)) + raise + + async def _initialize_alembic_on_existing(self): + """Initialize Alembic on an existing database""" + try: + def run_alembic_stamp(): + alembic_cfg = Config(self.alembic_ini_path) + script_dir = ScriptDirectory.from_config(alembic_cfg) + latest_revision = script_dir.get_current_head() + + if latest_revision: + command.stamp(alembic_cfg, latest_revision) + return latest_revision + return None + + # Run Alembic operations in async executor + latest_revision = await asyncio.get_event_loop().run_in_executor(None, run_alembic_stamp) + + if latest_revision: + self.logger.info("Alembic initialized on existing database", revision=latest_revision) + + except Exception as e: + self.logger.error("Failed to initialize Alembic on existing database", error=str(e)) + raise + + async def _run_migrations(self): + """Run pending Alembic migrations""" + try: + def run_alembic_upgrade(): + alembic_cfg = Config(self.alembic_ini_path) + command.upgrade(alembic_cfg, "head") + + # Run Alembic operations in async executor + await asyncio.get_event_loop().run_in_executor(None, run_alembic_upgrade) + self.logger.info("Alembic migrations completed") + except Exception as e: + self.logger.error("Failed to run migrations", error=str(e)) + raise + + async def _get_inspector(self, session: AsyncSession): + """Get SQLAlchemy inspector for the current connection""" + def get_inspector_sync(connection): + return inspect(connection) + + connection = await session.connection() + return await connection.run_sync(get_inspector_sync) + + async def _get_existing_tables(self, inspector) -> List[str]: + """Get list of existing tables in the database""" + def get_tables_sync(connection): + insp = inspect(connection) + return insp.get_table_names() + + async with self.database_manager.get_session() as session: + connection = await session.connection() + return await connection.run_sync(get_tables_sync) + + +def create_init_manager( + database_manager: DatabaseManager, + service_name: str, + service_path: Optional[str] = None, + force_recreate: bool = False +) -> DatabaseInitManager: + """ + Factory function to create a DatabaseInitManager with auto-detected paths + + Args: + database_manager: DatabaseManager instance + service_name: Name of the service + service_path: Path to service directory (auto-detected if None) + force_recreate: Whether to force recreate tables (development mode) + """ + # Auto-detect paths if not provided + if service_path is None: + # Try Docker container path first (service files at root level) + if os.path.exists("alembic.ini"): + service_path = "." + else: + # Fallback to development path + service_path = f"services/{service_name}" + + # Set up paths based on environment + if service_path == ".": + # Docker container environment + alembic_ini_path = "alembic.ini" + models_module = "app.models" + else: + # Development environment + alembic_ini_path = f"{service_path}/alembic.ini" + models_module = f"services.{service_name}.app.models" + + # Check if paths exist + if not os.path.exists(alembic_ini_path): + logger.warning("Alembic config not found", path=alembic_ini_path) + alembic_ini_path = None + + return DatabaseInitManager( + database_manager=database_manager, + service_name=service_name, + alembic_ini_path=alembic_ini_path, + models_module=models_module, + force_recreate=force_recreate + ) + + +async def initialize_service_database( + database_manager: DatabaseManager, + service_name: str, + force_recreate: bool = False +) -> Dict[str, Any]: + """ + Convenience function for service database initialization + + Args: + database_manager: DatabaseManager instance + service_name: Name of the service + force_recreate: Whether to force recreate (development mode) + + Returns: + Dict with initialization results + """ + init_manager = create_init_manager( + database_manager=database_manager, + service_name=service_name, + force_recreate=force_recreate + ) + + return await init_manager.initialize_database() \ No newline at end of file diff --git a/shared/monitoring/health_checks.py b/shared/monitoring/health_checks.py index 5a8a7966..74062373 100644 --- a/shared/monitoring/health_checks.py +++ b/shared/monitoring/health_checks.py @@ -208,6 +208,10 @@ class HealthCheckManager: # Get connection pool information health_status["connection_info"] = await self.database_manager.get_connection_info() + # Check migration status + migration_status = await self._check_migration_status() + health_status.update(migration_status) + # Test table existence if expected tables are configured if self.expected_tables: tables_verified = await self._verify_tables_exist() @@ -266,6 +270,37 @@ class HealthCheckManager: except Exception as e: health_status["errors"].append(f"Error checking individual tables: {str(e)}") + async def _check_migration_status(self) -> Dict[str, Any]: + """Check database migration status""" + migration_info = { + "migration_version": None, + "migration_status": "unknown", + "migration_errors": [] + } + + try: + async with self.database_manager.get_session() as session: + # Check if alembic_version table exists + result = await session.execute( + text("SELECT version_num FROM alembic_version LIMIT 1") + ) + version = result.scalar() + + if version: + migration_info["migration_version"] = version + migration_info["migration_status"] = "healthy" + logger.debug(f"Migration version found: {version}", service=self.service_name) + else: + migration_info["migration_status"] = "no_version" + migration_info["migration_errors"].append("No migration version found in alembic_version table") + + except Exception as e: + migration_info["migration_status"] = "error" + migration_info["migration_errors"].append(f"Migration check failed: {str(e)}") + logger.error("Migration status check failed", service=self.service_name, error=str(e)) + + return migration_info + class FastAPIHealthChecker: """ @@ -315,6 +350,40 @@ class FastAPIHealthChecker: # Convenience functions for easy integration +async def check_database_health(db_manager: DatabaseManager) -> Dict[str, Any]: + """ + Enhanced database health check with migration status + + Args: + db_manager: DatabaseManager instance + + Returns: + Dict containing database health status including migration version + """ + try: + async with db_manager.get_session() as session: + # Basic connectivity test + await session.execute(text("SELECT 1")) + + # Get migration status + migration_status = await session.execute(text("SELECT version_num FROM alembic_version")) + version = migration_status.scalar() + + return { + "database": "healthy", + "migration_version": version, + "connectivity": True + } + except Exception as e: + logger.error("Database health check failed", error=str(e)) + return { + "database": "unhealthy", + "error": str(e), + "connectivity": False, + "migration_version": None + } + + def create_health_manager( service_name: str, version: str = "1.0.0", diff --git a/shared/service_base.py b/shared/service_base.py index 46fef5e9..e526febf 100644 --- a/shared/service_base.py +++ b/shared/service_base.py @@ -200,10 +200,15 @@ class BaseFastAPIService: pass async def _initialize_database(self): - """Initialize database connection""" + """Initialize database connection and tables""" try: # Test connection if await self.database_manager.test_connection(): + self.logger.info("Database connection established") + + # Handle automatic table initialization + await self._handle_database_tables() + self.logger.info("Database initialized successfully") else: raise Exception("Database connection test failed") @@ -211,6 +216,29 @@ class BaseFastAPIService: self.logger.error("Database initialization failed", error=str(e)) raise + async def _handle_database_tables(self): + """Handle automatic table creation and migration management""" + try: + # Import the init manager here to avoid circular imports + from shared.database.init_manager import initialize_service_database + + # Check if we're in force recreate mode (development) + force_recreate = os.getenv("DB_FORCE_RECREATE", "false").lower() == "true" + + # Initialize database with automatic table creation + result = await initialize_service_database( + database_manager=self.database_manager, + service_name=self.service_name.replace("-service", "").replace("_", ""), + force_recreate=force_recreate + ) + + self.logger.info("Database table initialization completed", result=result) + + except Exception as e: + self.logger.error("Database table initialization failed", error=str(e)) + # Don't raise here - let the service start even if table init fails + # This allows for manual intervention if needed + async def _cleanup_database(self): """Cleanup database connections""" try: diff --git a/skaffold.yaml b/skaffold.yaml index 96f7c4f3..d05a0831 100644 --- a/skaffold.yaml +++ b/skaffold.yaml @@ -102,6 +102,53 @@ deploy: # Access via: https://localhost (or http://localhost) profiles: + - name: minimal + build: + local: + push: false + tagPolicy: + envTemplate: + template: "dev" + deploy: + kubectl: + manifests: + - infrastructure/kubernetes/base/components/databases/auth-db.yaml + - infrastructure/kubernetes/base/components/databases/inventory-db.yaml + - infrastructure/kubernetes/base/migrations/auth-migration-job.yaml + - infrastructure/kubernetes/base/migrations/inventory-migration-job.yaml + - infrastructure/kubernetes/base/configs/*.yaml + + - name: full + build: + local: + push: false + tagPolicy: + envTemplate: + template: "dev" + deploy: + kubectl: + manifests: + - infrastructure/kubernetes/base/components/databases/*.yaml + - infrastructure/kubernetes/base/migrations/*.yaml + - infrastructure/kubernetes/base/configs/*.yaml + kustomize: + paths: + - infrastructure/kubernetes/overlays/dev + + - name: single + build: + local: + push: false + tagPolicy: + envTemplate: + template: "dev" + deploy: + kubectl: + manifests: + - infrastructure/kubernetes/base/components/databases/{{SERVICE_NAME}}.yaml + - infrastructure/kubernetes/base/migrations/{{SERVICE_NAME}}-migration-job.yaml + - infrastructure/kubernetes/base/configs/*.yaml + - name: dev build: local: