797 lines
35 KiB
Bash
Executable File
797 lines
35 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Script to regenerate Alembic migrations using Kubernetes local dev environment
|
|
# This script backs up existing migrations and generates new ones based on current models
|
|
|
|
set -euo pipefail # Exit on error, undefined variables, and pipe failures
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Configuration
|
|
NAMESPACE="${KUBE_NAMESPACE:-bakery-ia}"
|
|
LOG_FILE="migration_script_$(date +%Y%m%d_%H%M%S).log"
|
|
BACKUP_RETENTION_DAYS=7
|
|
CONTAINER_SUFFIX="service" # Default container name suffix (e.g., pos-service)
|
|
|
|
# Parse command line arguments
|
|
DRY_RUN=false
|
|
SKIP_BACKUP=false
|
|
APPLY_MIGRATIONS=false
|
|
CHECK_EXISTING=false
|
|
VERBOSE=false
|
|
SKIP_DB_CHECK=false
|
|
|
|
while [[ $# -gt 0 ]]; do
|
|
case $1 in
|
|
--dry-run) DRY_RUN=true; shift ;;
|
|
--skip-backup) SKIP_BACKUP=true; shift ;;
|
|
--apply) APPLY_MIGRATIONS=true; shift ;;
|
|
--check-existing) CHECK_EXISTING=true; shift ;;
|
|
--verbose) VERBOSE=true; shift ;;
|
|
--skip-db-check) SKIP_DB_CHECK=true; shift ;;
|
|
--namespace) NAMESPACE="$2"; shift 2 ;;
|
|
-h|--help)
|
|
echo "Usage: $0 [OPTIONS]"
|
|
echo ""
|
|
echo "Options:"
|
|
echo " --dry-run Show what would be done without making changes"
|
|
echo " --skip-backup Skip backing up existing migrations"
|
|
echo " --apply Automatically apply migrations after generation"
|
|
echo " --check-existing Check for and copy existing migrations from pods first"
|
|
echo " --verbose Enable detailed logging"
|
|
echo " --skip-db-check Skip database connectivity check"
|
|
echo " --namespace NAME Use specific Kubernetes namespace (default: bakery-ia)"
|
|
echo ""
|
|
echo "Examples:"
|
|
echo " $0 --namespace dev --dry-run # Simulate migration regeneration"
|
|
echo " $0 --apply --verbose # Generate and apply migrations with detailed logs"
|
|
echo " $0 --skip-db-check # Skip database connectivity check"
|
|
exit 0
|
|
;;
|
|
*) echo "Unknown option: $1"; echo "Use --help for usage information"; exit 1 ;;
|
|
esac
|
|
done
|
|
|
|
# List of all services
|
|
SERVICES=(
|
|
"pos" "sales" "recipes" "training" "auth" "orders" "inventory"
|
|
"suppliers" "tenant" "notification" "alert-processor" "forecasting"
|
|
"external" "production" "demo-session" "orchestrator" "procurement"
|
|
)
|
|
|
|
# Backup directory
|
|
BACKUP_DIR="migrations_backup_$(date +%Y%m%d_%H%M%S)"
|
|
mkdir -p "$BACKUP_DIR"
|
|
|
|
# Initialize log file
|
|
touch "$LOG_FILE"
|
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Starting migration regeneration" >> "$LOG_FILE"
|
|
|
|
# Function to perform pre-flight checks
|
|
preflight_checks() {
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo -e "${BLUE}Pre-flight Checks${NC}"
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo ""
|
|
|
|
local checks_passed=true
|
|
|
|
# Check kubectl
|
|
echo -e "${YELLOW}Checking kubectl...${NC}"
|
|
if ! command -v kubectl &> /dev/null; then
|
|
echo -e "${RED}✗ kubectl not found${NC}"
|
|
log_message "ERROR" "kubectl not found"
|
|
checks_passed=false
|
|
else
|
|
KUBECTL_VERSION=$(kubectl version --client --short 2>/dev/null | grep -oP 'v\d+\.\d+\.\d+' || echo "unknown")
|
|
echo -e "${GREEN}✓ kubectl found (version: $KUBECTL_VERSION)${NC}"
|
|
fi
|
|
|
|
# Check cluster connectivity
|
|
echo -e "${YELLOW}Checking Kubernetes cluster connectivity...${NC}"
|
|
if ! kubectl cluster-info &> /dev/null; then
|
|
echo -e "${RED}✗ Cannot connect to Kubernetes cluster${NC}"
|
|
log_message "ERROR" "Cannot connect to Kubernetes cluster"
|
|
checks_passed=false
|
|
else
|
|
CLUSTER_NAME=$(kubectl config current-context 2>/dev/null || echo "unknown")
|
|
echo -e "${GREEN}✓ Connected to cluster: $CLUSTER_NAME${NC}"
|
|
fi
|
|
|
|
# Check namespace exists
|
|
echo -e "${YELLOW}Checking namespace '$NAMESPACE'...${NC}"
|
|
if ! kubectl get namespace "$NAMESPACE" &> /dev/null; then
|
|
echo -e "${RED}✗ Namespace '$NAMESPACE' not found${NC}"
|
|
log_message "ERROR" "Namespace '$NAMESPACE' not found"
|
|
checks_passed=false
|
|
else
|
|
echo -e "${GREEN}✓ Namespace exists${NC}"
|
|
fi
|
|
|
|
# Check if all service pods are running
|
|
echo -e "${YELLOW}Checking service pods...${NC}"
|
|
local pods_found=0
|
|
local pods_running=0
|
|
for service in "${SERVICES[@]}"; do
|
|
local pod_name=$(kubectl get pods -n "$NAMESPACE" -l "app.kubernetes.io/name=${service}-service" --field-selector=status.phase=Running -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
|
if [ -n "$pod_name" ]; then
|
|
pods_found=$((pods_found + 1))
|
|
pods_running=$((pods_running + 1))
|
|
fi
|
|
done
|
|
echo -e "${GREEN}✓ Found $pods_running/${#SERVICES[@]} service pods running${NC}"
|
|
|
|
if [ $pods_running -lt ${#SERVICES[@]} ]; then
|
|
echo -e "${YELLOW}⚠ Not all service pods are running${NC}"
|
|
echo -e "${YELLOW} Missing services will be skipped${NC}"
|
|
fi
|
|
|
|
# Check database connectivity for running services
|
|
echo -e "${YELLOW}Checking database connectivity (sample)...${NC}"
|
|
local sample_service="auth"
|
|
local sample_pod=$(kubectl get pods -n "$NAMESPACE" -l "app.kubernetes.io/name=${sample_service}-service" --field-selector=status.phase=Running -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
|
|
|
if [ -n "$sample_pod" ]; then
|
|
local db_check=$(kubectl exec -n "$NAMESPACE" "$sample_pod" -c "${sample_service}-service" -- sh -c "python3 -c 'import asyncpg; print(\"OK\")' 2>/dev/null" || echo "FAIL")
|
|
if [ "$db_check" = "OK" ]; then
|
|
echo -e "${GREEN}✓ Database drivers available (asyncpg)${NC}"
|
|
else
|
|
echo -e "${YELLOW}⚠ Database driver check failed${NC}"
|
|
fi
|
|
else
|
|
echo -e "${YELLOW}⚠ Cannot check database connectivity (no sample pod running)${NC}"
|
|
fi
|
|
|
|
# Check local directory structure
|
|
echo -e "${YELLOW}Checking local directory structure...${NC}"
|
|
local dirs_found=0
|
|
for service in "${SERVICES[@]}"; do
|
|
local service_dir=$(echo "$service" | tr '-' '_')
|
|
if [ -d "services/$service_dir/migrations" ]; then
|
|
dirs_found=$((dirs_found + 1))
|
|
fi
|
|
done
|
|
echo -e "${GREEN}✓ Found $dirs_found/${#SERVICES[@]} service migration directories${NC}"
|
|
|
|
# Check disk space
|
|
echo -e "${YELLOW}Checking disk space...${NC}"
|
|
local available_space=$(df -h . | tail -1 | awk '{print $4}')
|
|
echo -e "${GREEN}✓ Available disk space: $available_space${NC}"
|
|
|
|
echo ""
|
|
if [ "$checks_passed" = false ]; then
|
|
echo -e "${RED}========================================${NC}"
|
|
echo -e "${RED}Pre-flight checks failed!${NC}"
|
|
echo -e "${RED}========================================${NC}"
|
|
echo ""
|
|
read -p "Continue anyway? (y/n) " -n 1 -r
|
|
echo
|
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
echo -e "${RED}Aborted.${NC}"
|
|
exit 1
|
|
fi
|
|
else
|
|
echo -e "${GREEN}========================================${NC}"
|
|
echo -e "${GREEN}All pre-flight checks passed!${NC}"
|
|
echo -e "${GREEN}========================================${NC}"
|
|
fi
|
|
echo ""
|
|
}
|
|
|
|
# Run pre-flight checks
|
|
preflight_checks
|
|
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo -e "${BLUE}Migration Regeneration Script (K8s)${NC}"
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo ""
|
|
|
|
if [ "$DRY_RUN" = true ]; then
|
|
echo -e "${YELLOW}🔍 DRY RUN MODE - No changes will be made${NC}"
|
|
echo ""
|
|
fi
|
|
|
|
echo -e "${YELLOW}This script will:${NC}"
|
|
if [ "$CHECK_EXISTING" = true ]; then
|
|
echo -e "${YELLOW}1. Check for existing migrations in pods and copy them${NC}"
|
|
fi
|
|
if [ "$SKIP_BACKUP" = false ]; then
|
|
echo -e "${YELLOW}2. Backup existing migration files${NC}"
|
|
fi
|
|
echo -e "${YELLOW}3. Generate new migrations in Kubernetes pods${NC}"
|
|
echo -e "${YELLOW}4. Copy generated files back to local machine${NC}"
|
|
if [ "$APPLY_MIGRATIONS" = true ]; then
|
|
echo -e "${YELLOW}5. Apply migrations to databases${NC}"
|
|
fi
|
|
if [ "$SKIP_BACKUP" = false ]; then
|
|
echo -e "${YELLOW}6. Keep the backup in: $BACKUP_DIR${NC}"
|
|
fi
|
|
echo ""
|
|
echo -e "${YELLOW}Using Kubernetes namespace: $NAMESPACE${NC}"
|
|
echo -e "${YELLOW}Logs will be saved to: $LOG_FILE${NC}"
|
|
echo ""
|
|
|
|
if [ "$DRY_RUN" = false ]; then
|
|
read -p "Continue? (y/n) " -n 1 -r
|
|
echo
|
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
echo -e "${RED}Aborted.${NC}"
|
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Aborted by user" >> "$LOG_FILE"
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# Kubernetes setup already verified in pre-flight checks
|
|
|
|
# Function to get a running pod for a service
|
|
get_running_pod() {
|
|
local service=$1
|
|
local pod_name=""
|
|
local selectors=(
|
|
"app.kubernetes.io/name=${service}-service,app.kubernetes.io/component=microservice"
|
|
"app.kubernetes.io/name=${service}-service,app.kubernetes.io/component=worker"
|
|
"app.kubernetes.io/name=${service}-service"
|
|
"app=${service}-service,component=${service}" # Fallback for demo-session
|
|
"app=${service}-service" # Additional fallback
|
|
)
|
|
|
|
for selector in "${selectors[@]}"; do
|
|
pod_name=$(kubectl get pods -n "$NAMESPACE" -l "$selector" --field-selector=status.phase=Running -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
|
if [ -n "$pod_name" ]; then
|
|
echo "$pod_name"
|
|
return 0
|
|
fi
|
|
done
|
|
|
|
echo ""
|
|
return 1
|
|
}
|
|
|
|
# Function to log messages
|
|
log_message() {
|
|
local level=$1
|
|
local message=$2
|
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $level: $message" >> "$LOG_FILE"
|
|
if [ "$VERBOSE" = true ] || [ "$level" = "ERROR" ]; then
|
|
echo -e "${YELLOW}$message${NC}"
|
|
fi
|
|
}
|
|
|
|
# Check for existing migrations in pods if requested
|
|
if [ "$CHECK_EXISTING" = true ]; then
|
|
echo -e "${BLUE}Step 1.5: Checking for existing migrations in pods...${NC}"
|
|
echo ""
|
|
|
|
FOUND_COUNT=0
|
|
COPIED_COUNT=0
|
|
|
|
for service in "${SERVICES[@]}"; do
|
|
service_dir=$(echo "$service" | tr '-' '_')
|
|
echo -e "${YELLOW}Checking $service...${NC}"
|
|
|
|
# Find a running pod
|
|
POD_NAME=$(get_running_pod "$service")
|
|
if [ -z "$POD_NAME" ]; then
|
|
echo -e "${YELLOW}⚠ Pod not found, skipping${NC}"
|
|
log_message "WARNING" "No running pod found for $service"
|
|
continue
|
|
fi
|
|
|
|
# Check container availability
|
|
CONTAINER="${service}-${CONTAINER_SUFFIX}"
|
|
if ! kubectl get pod -n "$NAMESPACE" "$POD_NAME" -o jsonpath='{.spec.containers[*].name}' | grep -qw "$CONTAINER"; then
|
|
echo -e "${RED}✗ Container $CONTAINER not found in pod $POD_NAME, skipping${NC}"
|
|
log_message "ERROR" "Container $CONTAINER not found in pod $POD_NAME for $service"
|
|
continue
|
|
fi
|
|
|
|
# Check if migration files exist in the pod
|
|
EXISTING_FILES=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "ls /app/migrations/versions/*.py 2>/dev/null | grep -v __pycache__ | grep -v __init__.py" 2>/dev/null || echo "")
|
|
|
|
if [ -n "$EXISTING_FILES" ]; then
|
|
FILE_COUNT=$(echo "$EXISTING_FILES" | wc -l | tr -d ' ')
|
|
echo -e "${GREEN}✓ Found $FILE_COUNT migration file(s) in pod${NC}"
|
|
FOUND_COUNT=$((FOUND_COUNT + 1))
|
|
|
|
# Create local versions directory
|
|
mkdir -p "services/$service_dir/migrations/versions"
|
|
|
|
# Copy each file
|
|
for pod_file in $EXISTING_FILES; do
|
|
filename=$(basename "$pod_file")
|
|
if [ "$DRY_RUN" = true ]; then
|
|
echo -e "${BLUE}[DRY RUN] Would copy: $filename${NC}"
|
|
log_message "INFO" "[DRY RUN] Would copy $filename for $service"
|
|
else
|
|
if kubectl cp -n "$NAMESPACE" "$POD_NAME:$pod_file" "services/$service_dir/migrations/versions/$filename" -c "$CONTAINER" 2>>"$LOG_FILE"; then
|
|
echo -e "${GREEN}✓ Copied: $filename${NC}"
|
|
COPIED_COUNT=$((COPIED_COUNT + 1))
|
|
log_message "INFO" "Copied $filename for $service"
|
|
# Display brief summary
|
|
echo -e "${BLUE}Preview:${NC}"
|
|
grep "def upgrade" "services/$service_dir/migrations/versions/$filename" | head -1
|
|
grep "op\." "services/$service_dir/migrations/versions/$filename" | head -3 | sed 's/^/ /'
|
|
else
|
|
echo -e "${RED}✗ Failed to copy: $filename${NC}"
|
|
log_message "ERROR" "Failed to copy $filename for $service"
|
|
fi
|
|
fi
|
|
done
|
|
else
|
|
echo -e "${YELLOW}⚠ No migration files found in pod${NC}"
|
|
log_message "WARNING" "No migration files found in pod for $service"
|
|
fi
|
|
echo ""
|
|
done
|
|
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo -e "${BLUE}Existing Migrations Check Summary${NC}"
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo -e "${GREEN}Services with migrations: $FOUND_COUNT${NC}"
|
|
if [ "$DRY_RUN" = false ]; then
|
|
echo -e "${GREEN}Files copied: $COPIED_COUNT${NC}"
|
|
fi
|
|
echo ""
|
|
|
|
if [ "$FOUND_COUNT" = 0 ] && [ "$DRY_RUN" = false ]; then
|
|
read -p "Do you want to continue with regeneration? (y/n) " -n 1 -r
|
|
echo
|
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
echo -e "${YELLOW}Stopping. Existing migrations have been copied.${NC}"
|
|
log_message "INFO" "Stopped after copying existing migrations"
|
|
exit 0
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Backup existing migrations
|
|
if [ "$SKIP_BACKUP" = false ] && [ "$DRY_RUN" = false ]; then
|
|
echo -e "${BLUE}Step 2: Backing up existing migrations...${NC}"
|
|
BACKUP_COUNT=0
|
|
for service in "${SERVICES[@]}"; do
|
|
service_dir=$(echo "$service" | tr '-' '_')
|
|
if [ -d "services/$service_dir/migrations/versions" ] && [ -n "$(ls services/$service_dir/migrations/versions/*.py 2>/dev/null)" ]; then
|
|
echo -e "${YELLOW}Backing up $service migrations...${NC}"
|
|
mkdir -p "$BACKUP_DIR/$service_dir/versions"
|
|
cp -r "services/$service_dir/migrations/versions/"*.py "$BACKUP_DIR/$service_dir/versions/" 2>>"$LOG_FILE"
|
|
BACKUP_COUNT=$((BACKUP_COUNT + 1))
|
|
log_message "INFO" "Backed up migrations for $service to $BACKUP_DIR/$service_dir/versions"
|
|
else
|
|
echo -e "${YELLOW}No migration files to backup for $service${NC}"
|
|
fi
|
|
done
|
|
if [ "$BACKUP_COUNT" -gt 0 ]; then
|
|
echo -e "${GREEN}✓ Backup complete: $BACKUP_DIR ($BACKUP_COUNT services)${NC}"
|
|
else
|
|
echo -e "${YELLOW}No migrations backed up (no migration files found)${NC}"
|
|
fi
|
|
echo ""
|
|
elif [ "$SKIP_BACKUP" = true ]; then
|
|
echo -e "${YELLOW}Skipping backup step (--skip-backup flag)${NC}"
|
|
log_message "INFO" "Backup skipped due to --skip-backup flag"
|
|
echo ""
|
|
fi
|
|
|
|
# Clean up old backups
|
|
find . -maxdepth 1 -type d -name 'migrations_backup_*' -mtime +"$BACKUP_RETENTION_DAYS" -exec rm -rf {} \; 2>/dev/null || true
|
|
log_message "INFO" "Cleaned up backups older than $BACKUP_RETENTION_DAYS days"
|
|
|
|
echo -e "${BLUE}Step 3: Generating new migrations in Kubernetes...${NC}"
|
|
echo ""
|
|
|
|
SUCCESS_COUNT=0
|
|
FAILED_COUNT=0
|
|
FAILED_SERVICES=()
|
|
|
|
# Function to process a single service
|
|
process_service() {
|
|
local service=$1
|
|
local service_dir=$(echo "$service" | tr '-' '_')
|
|
local db_env_var=$(echo "$service" | tr '[:lower:]-' '[:upper:]_')_DATABASE_URL # e.g., pos -> POS_DATABASE_URL, alert-processor -> ALERT_PROCESSOR_DATABASE_URL
|
|
|
|
echo -e "${BLUE}----------------------------------------${NC}"
|
|
echo -e "${BLUE}Processing: $service${NC}"
|
|
echo -e "${BLUE}----------------------------------------${NC}"
|
|
log_message "INFO" "Starting migration generation for $service"
|
|
|
|
# Skip if no local migrations directory and --check-existing is not set
|
|
if [ ! -d "services/$service_dir/migrations/versions" ] && [ "$CHECK_EXISTING" = false ]; then
|
|
echo -e "${YELLOW}⚠ No local migrations/versions directory for $service, skipping...${NC}"
|
|
log_message "WARNING" "No local migrations/versions directory for $service"
|
|
return
|
|
fi
|
|
|
|
# Find a running pod
|
|
echo -e "${YELLOW}Finding $service pod in namespace $NAMESPACE...${NC}"
|
|
POD_NAME=$(get_running_pod "$service")
|
|
if [ -z "$POD_NAME" ]; then
|
|
echo -e "${RED}✗ No running pod found for $service. Skipping...${NC}"
|
|
log_message "ERROR" "No running pod found for $service"
|
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
|
FAILED_SERVICES+=("$service (pod not found)")
|
|
return
|
|
fi
|
|
|
|
echo -e "${GREEN}✓ Found pod: $POD_NAME${NC}"
|
|
log_message "INFO" "Found pod $POD_NAME for $service"
|
|
|
|
# Check container availability
|
|
CONTAINER="${service}-${CONTAINER_SUFFIX}"
|
|
if ! kubectl get pod -n "$NAMESPACE" "$POD_NAME" -o jsonpath='{.spec.containers[*].name}' | grep -qw "$CONTAINER"; then
|
|
echo -e "${RED}✗ Container $CONTAINER not found in pod $POD_NAME, skipping${NC}"
|
|
log_message "ERROR" "Container $CONTAINER not found in pod $POD_NAME for $service"
|
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
|
FAILED_SERVICES+=("$service (container not found)")
|
|
return
|
|
fi
|
|
|
|
# Verify database connectivity
|
|
if [ "$SKIP_DB_CHECK" = false ]; then
|
|
echo -e "${YELLOW}Verifying database connectivity using $db_env_var...${NC}"
|
|
# Check if asyncpg is installed
|
|
ASYNCPG_CHECK=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "python3 -c \"import asyncpg; print('asyncpg OK')\" 2>/dev/null" || echo "asyncpg MISSING")
|
|
if [[ "$ASYNCPG_CHECK" != "asyncpg OK" ]]; then
|
|
echo -e "${YELLOW}Installing asyncpg...${NC}"
|
|
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "python3 -m pip install --quiet asyncpg" 2>>"$LOG_FILE"
|
|
fi
|
|
|
|
# Check for database URL
|
|
DB_URL_CHECK=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "env | grep $db_env_var" 2>/dev/null || echo "")
|
|
if [ -z "$DB_URL_CHECK" ]; then
|
|
echo -e "${RED}✗ Environment variable $db_env_var not found in pod $POD_NAME${NC}"
|
|
echo -e "${YELLOW}Available environment variables:${NC}"
|
|
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "env" 2>>"$LOG_FILE" | grep -i "database" || echo "No database-related variables found"
|
|
log_message "ERROR" "Environment variable $db_env_var not found for $service in pod $POD_NAME"
|
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
|
FAILED_SERVICES+=("$service (missing $db_env_var)")
|
|
return
|
|
fi
|
|
|
|
# Log redacted database URL for debugging
|
|
DB_URL=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "echo \$"$db_env_var"" 2>/dev/null | sed 's/\(password=\)[^@]*/\1[REDACTED]/')
|
|
log_message "INFO" "Using database URL for $service: $DB_URL"
|
|
|
|
# Perform async database connectivity check
|
|
DB_CHECK_OUTPUT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 -c \"import asyncio; from sqlalchemy.ext.asyncio import create_async_engine; async def check_db(): engine = create_async_engine(os.getenv('$db_env_var')); async with engine.connect() as conn: pass; await engine.dispose(); print('DB OK'); asyncio.run(check_db())\" 2>&1" || echo "DB ERROR")
|
|
if [[ "$DB_CHECK_OUTPUT" == *"DB OK"* ]]; then
|
|
echo -e "${GREEN}✓ Database connection verified${NC}"
|
|
log_message "INFO" "Database connection verified for $service"
|
|
else
|
|
echo -e "${RED}✗ Database connection failed for $service${NC}"
|
|
echo -e "${YELLOW}Error details: $DB_CHECK_OUTPUT${NC}"
|
|
log_message "ERROR" "Database connection failed for $service: $DB_CHECK_OUTPUT"
|
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
|
FAILED_SERVICES+=("$service (database connection failed)")
|
|
return
|
|
fi
|
|
else
|
|
echo -e "${YELLOW}Skipping database connectivity check (--skip-db-check)${NC}"
|
|
log_message "INFO" "Skipped database connectivity check for $service"
|
|
fi
|
|
|
|
# Reset alembic version tracking
|
|
echo -e "${YELLOW}Resetting alembic version tracking...${NC}"
|
|
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH alembic downgrade base" 2>&1 | tee -a "$LOG_FILE" | grep -v "^INFO" || true
|
|
log_message "INFO" "Attempted alembic downgrade for $service"
|
|
|
|
# Option 1: Complete database schema reset using CASCADE
|
|
echo -e "${YELLOW}Performing complete database schema reset...${NC}"
|
|
SCHEMA_DROP_RESULT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 << 'EOFPYTHON'
|
|
import asyncio
|
|
import os
|
|
from sqlalchemy.ext.asyncio import create_async_engine
|
|
from sqlalchemy import text
|
|
|
|
async def reset_database():
|
|
try:
|
|
engine = create_async_engine(os.getenv('$db_env_var'))
|
|
async with engine.begin() as conn:
|
|
# Drop and recreate public schema - cleanest approach
|
|
await conn.execute(text('DROP SCHEMA IF EXISTS public CASCADE'))
|
|
await conn.execute(text('CREATE SCHEMA public'))
|
|
await conn.execute(text('GRANT ALL ON SCHEMA public TO PUBLIC'))
|
|
await engine.dispose()
|
|
print('SUCCESS: Database schema reset complete')
|
|
return 0
|
|
except Exception as e:
|
|
print(f'ERROR: {str(e)}')
|
|
return 1
|
|
|
|
exit(asyncio.run(reset_database()))
|
|
EOFPYTHON
|
|
" 2>&1)
|
|
|
|
echo "$SCHEMA_DROP_RESULT" >> "$LOG_FILE"
|
|
|
|
if echo "$SCHEMA_DROP_RESULT" | grep -q "SUCCESS"; then
|
|
echo -e "${GREEN}✓ Database schema reset successfully${NC}"
|
|
log_message "INFO" "Database schema reset for $service"
|
|
else
|
|
echo -e "${RED}✗ Database schema reset failed${NC}"
|
|
echo -e "${YELLOW}Error details:${NC}"
|
|
echo "$SCHEMA_DROP_RESULT"
|
|
log_message "ERROR" "Database schema reset failed for $service: $SCHEMA_DROP_RESULT"
|
|
|
|
# Try alternative approach: Drop individual tables from database (not just models)
|
|
echo -e "${YELLOW}Attempting alternative: dropping all existing tables individually...${NC}"
|
|
TABLE_DROP_RESULT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 << 'EOFPYTHON'
|
|
import asyncio
|
|
import os
|
|
from sqlalchemy.ext.asyncio import create_async_engine
|
|
from sqlalchemy import text
|
|
|
|
async def drop_all_tables():
|
|
try:
|
|
engine = create_async_engine(os.getenv('$db_env_var'))
|
|
async with engine.begin() as conn:
|
|
# Get all tables from database
|
|
result = await conn.execute(text(\"\"\"
|
|
SELECT tablename
|
|
FROM pg_tables
|
|
WHERE schemaname = 'public'
|
|
\"\"\"))
|
|
tables = [row[0] for row in result]
|
|
|
|
# Drop each table
|
|
for table in tables:
|
|
await conn.execute(text(f'DROP TABLE IF EXISTS \"{table}\" CASCADE'))
|
|
|
|
print(f'SUCCESS: Dropped {len(tables)} tables: {tables}')
|
|
await engine.dispose()
|
|
return 0
|
|
except Exception as e:
|
|
print(f'ERROR: {str(e)}')
|
|
return 1
|
|
|
|
exit(asyncio.run(drop_all_tables()))
|
|
EOFPYTHON
|
|
" 2>&1)
|
|
|
|
echo "$TABLE_DROP_RESULT" >> "$LOG_FILE"
|
|
|
|
if echo "$TABLE_DROP_RESULT" | grep -q "SUCCESS"; then
|
|
echo -e "${GREEN}✓ All tables dropped successfully${NC}"
|
|
log_message "INFO" "All tables dropped for $service"
|
|
else
|
|
echo -e "${RED}✗ Failed to drop tables${NC}"
|
|
echo -e "${YELLOW}Error details:${NC}"
|
|
echo "$TABLE_DROP_RESULT"
|
|
log_message "ERROR" "Failed to drop tables for $service: $TABLE_DROP_RESULT"
|
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
|
FAILED_SERVICES+=("$service (database cleanup failed)")
|
|
return
|
|
fi
|
|
fi
|
|
|
|
# Verify database is empty
|
|
echo -e "${YELLOW}Verifying database is clean...${NC}"
|
|
VERIFY_RESULT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 << 'EOFPYTHON'
|
|
import asyncio
|
|
import os
|
|
from sqlalchemy.ext.asyncio import create_async_engine
|
|
from sqlalchemy import text
|
|
|
|
async def verify_empty():
|
|
engine = create_async_engine(os.getenv('$db_env_var'))
|
|
async with engine.connect() as conn:
|
|
result = await conn.execute(text(\"\"\"
|
|
SELECT COUNT(*)
|
|
FROM pg_tables
|
|
WHERE schemaname = 'public'
|
|
\"\"\"))
|
|
count = result.scalar()
|
|
print(f'Tables remaining: {count}')
|
|
await engine.dispose()
|
|
return count
|
|
|
|
exit(asyncio.run(verify_empty()))
|
|
EOFPYTHON
|
|
" 2>&1)
|
|
|
|
echo "$VERIFY_RESULT" >> "$LOG_FILE"
|
|
echo -e "${BLUE}$VERIFY_RESULT${NC}"
|
|
|
|
# Initialize alembic version table after schema reset
|
|
echo -e "${YELLOW}Initializing alembic version tracking...${NC}"
|
|
ALEMBIC_INIT_OUTPUT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH alembic stamp base" 2>&1)
|
|
ALEMBIC_INIT_EXIT_CODE=$?
|
|
|
|
echo "$ALEMBIC_INIT_OUTPUT" >> "$LOG_FILE"
|
|
|
|
if [ $ALEMBIC_INIT_EXIT_CODE -eq 0 ]; then
|
|
echo -e "${GREEN}✓ Alembic version tracking initialized${NC}"
|
|
log_message "INFO" "Alembic version tracking initialized for $service"
|
|
else
|
|
echo -e "${YELLOW}⚠ Alembic initialization warning (may be normal)${NC}"
|
|
log_message "WARNING" "Alembic initialization for $service: $ALEMBIC_INIT_OUTPUT"
|
|
fi
|
|
|
|
# Remove old migration files in pod
|
|
echo -e "${YELLOW}Removing old migration files in pod...${NC}"
|
|
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "rm -rf /app/migrations/versions/*.py /app/migrations/versions/__pycache__" 2>>"$LOG_FILE" || log_message "WARNING" "Failed to remove old migration files for $service"
|
|
|
|
# Ensure dependencies
|
|
echo -e "${YELLOW}Ensuring python-dateutil and asyncpg are installed...${NC}"
|
|
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "python3 -m pip install --quiet python-dateutil asyncpg" 2>>"$LOG_FILE"
|
|
|
|
# Generate migration
|
|
echo -e "${YELLOW}Running alembic autogenerate in pod...${NC}"
|
|
MIGRATION_TIMESTAMP=$(date +%Y%m%d_%H%M)
|
|
MIGRATION_OUTPUT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 -m alembic revision --autogenerate -m \"initial_schema_$MIGRATION_TIMESTAMP\"" 2>&1)
|
|
MIGRATION_EXIT_CODE=$?
|
|
|
|
echo "$MIGRATION_OUTPUT" >> "$LOG_FILE"
|
|
|
|
if [ $MIGRATION_EXIT_CODE -eq 0 ]; then
|
|
echo -e "${GREEN}✓ Migration generated in pod${NC}"
|
|
log_message "INFO" "Migration generated for $service"
|
|
|
|
# Copy migration file
|
|
MIGRATION_FILE=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "ls -t /app/migrations/versions/*.py 2>/dev/null | head -1" || echo "")
|
|
if [ -z "$MIGRATION_FILE" ]; then
|
|
echo -e "${RED}✗ No migration file found in pod${NC}"
|
|
log_message "ERROR" "No migration file generated for $service"
|
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
|
FAILED_SERVICES+=("$service (no file generated)")
|
|
return
|
|
fi
|
|
|
|
MIGRATION_FILENAME=$(basename "$MIGRATION_FILE")
|
|
mkdir -p "services/$service_dir/migrations/versions"
|
|
|
|
# Copy file with better error handling
|
|
echo -e "${YELLOW}Copying migration file from pod...${NC}"
|
|
CP_OUTPUT=$(kubectl cp -n "$NAMESPACE" "$POD_NAME:$MIGRATION_FILE" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" -c "$CONTAINER" 2>&1)
|
|
CP_EXIT_CODE=$?
|
|
|
|
echo "$CP_OUTPUT" >> "$LOG_FILE"
|
|
|
|
# Verify the file was actually copied
|
|
if [ $CP_EXIT_CODE -eq 0 ] && [ -f "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" ]; then
|
|
LOCAL_FILE_SIZE=$(wc -c < "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" | tr -d ' ')
|
|
|
|
if [ "$LOCAL_FILE_SIZE" -gt 0 ]; then
|
|
echo -e "${GREEN}✓ Migration file copied: $MIGRATION_FILENAME ($LOCAL_FILE_SIZE bytes)${NC}"
|
|
log_message "INFO" "Copied $MIGRATION_FILENAME for $service ($LOCAL_FILE_SIZE bytes)"
|
|
SUCCESS_COUNT=$((SUCCESS_COUNT + 1))
|
|
|
|
# Validate migration content
|
|
echo -e "${YELLOW}Validating migration content...${NC}"
|
|
if grep -E "op\.(create_table|add_column|create_index|alter_column|drop_table|drop_column|create_foreign_key)" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" >/dev/null; then
|
|
echo -e "${GREEN}✓ Migration contains schema operations${NC}"
|
|
log_message "INFO" "Migration contains schema operations for $service"
|
|
elif grep -q "pass" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" && grep -q "def upgrade()" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME"; then
|
|
echo -e "${YELLOW}⚠ WARNING: Migration is empty (no schema changes detected)${NC}"
|
|
echo -e "${YELLOW}⚠ This usually means tables already exist in database matching the models${NC}"
|
|
log_message "WARNING" "Empty migration generated for $service - possible database cleanup issue"
|
|
else
|
|
echo -e "${GREEN}✓ Migration file created${NC}"
|
|
fi
|
|
|
|
# Display summary
|
|
echo -e "${BLUE}Migration summary:${NC}"
|
|
grep -E "^def (upgrade|downgrade)" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" | head -2
|
|
echo -e "${BLUE}Operations:${NC}"
|
|
grep "op\." "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" | head -5 || echo " (none found)"
|
|
else
|
|
echo -e "${RED}✗ Migration file is empty (0 bytes)${NC}"
|
|
log_message "ERROR" "Migration file is empty for $service"
|
|
rm -f "services/$service_dir/migrations/versions/$MIGRATION_FILENAME"
|
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
|
FAILED_SERVICES+=("$service (empty file)")
|
|
fi
|
|
else
|
|
echo -e "${RED}✗ Failed to copy migration file${NC}"
|
|
echo -e "${YELLOW}kubectl cp exit code: $CP_EXIT_CODE${NC}"
|
|
echo -e "${YELLOW}kubectl cp output: $CP_OUTPUT${NC}"
|
|
log_message "ERROR" "Failed to copy migration file for $service: $CP_OUTPUT"
|
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
|
FAILED_SERVICES+=("$service (copy failed)")
|
|
fi
|
|
else
|
|
echo -e "${RED}✗ Failed to generate migration${NC}"
|
|
log_message "ERROR" "Failed to generate migration for $service"
|
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
|
FAILED_SERVICES+=("$service (generation failed)")
|
|
fi
|
|
}
|
|
|
|
# Process services sequentially
|
|
for service in "${SERVICES[@]}"; do
|
|
process_service "$service"
|
|
done
|
|
|
|
# Summary
|
|
echo ""
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo -e "${BLUE}Summary${NC}"
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo -e "${GREEN}✓ Successful: $SUCCESS_COUNT services${NC}"
|
|
echo -e "${RED}✗ Failed: $FAILED_COUNT services${NC}"
|
|
|
|
if [ "$FAILED_COUNT" -gt 0 ]; then
|
|
echo ""
|
|
echo -e "${RED}Failed services:${NC}"
|
|
for failed_service in "${FAILED_SERVICES[@]}"; do
|
|
echo -e "${RED} - $failed_service${NC}"
|
|
done
|
|
fi
|
|
|
|
echo ""
|
|
echo -e "${YELLOW}Backup location: $BACKUP_DIR${NC}"
|
|
echo -e "${YELLOW}Log file: $LOG_FILE${NC}"
|
|
echo ""
|
|
|
|
# Apply migrations if requested
|
|
if [ "$APPLY_MIGRATIONS" = true ] && [ "$DRY_RUN" = false ] && [ "$SUCCESS_COUNT" -gt 0 ]; then
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo -e "${BLUE}Applying Migrations${NC}"
|
|
echo -e "${BLUE}========================================${NC}"
|
|
echo ""
|
|
|
|
APPLIED_COUNT=0
|
|
APPLY_FAILED_COUNT=0
|
|
|
|
for service in "${SERVICES[@]}"; do
|
|
service_dir=$(echo "$service" | tr '-' '_')
|
|
local db_env_var=$(echo "$service" | tr '[:lower:]-' '[:upper:]_')_DATABASE_URL
|
|
if [ ! -d "services/$service_dir/migrations/versions" ] || [ -z "$(ls services/$service_dir/migrations/versions/*.py 2>/dev/null)" ]; then
|
|
continue
|
|
fi
|
|
|
|
echo -e "${BLUE}Applying migrations for: $service${NC}"
|
|
POD_NAME=$(get_running_pod "$service")
|
|
if [ -z "$POD_NAME" ]; then
|
|
echo -e "${YELLOW}⚠ Pod not found for $service, skipping...${NC}"
|
|
log_message "WARNING" "No running pod found for $service during migration application"
|
|
continue
|
|
fi
|
|
|
|
CONTAINER="${service}-${CONTAINER_SUFFIX}"
|
|
if ! kubectl get pod -n "$NAMESPACE" "$POD_NAME" -o jsonpath='{.spec.containers[*].name}' | grep -qw "$CONTAINER"; then
|
|
echo -e "${RED}✗ Container $CONTAINER not found in pod $POD_NAME, skipping${NC}"
|
|
log_message "ERROR" "Container $CONTAINER not found in pod $POD_NAME for $service"
|
|
continue
|
|
fi
|
|
|
|
if kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH alembic upgrade head" 2>>"$LOG_FILE"; then
|
|
echo -e "${GREEN}✓ Migrations applied successfully for $service${NC}"
|
|
log_message "INFO" "Migrations applied for $service"
|
|
APPLIED_COUNT=$((APPLIED_COUNT + 1))
|
|
else
|
|
echo -e "${RED}✗ Failed to apply migrations for $service${NC}"
|
|
log_message "ERROR" "Failed to apply migrations for $service"
|
|
APPLY_FAILED_COUNT=$((APPLY_FAILED_COUNT + 1))
|
|
fi
|
|
echo ""
|
|
done
|
|
|
|
echo -e "${BLUE}Migration Application Summary:${NC}"
|
|
echo -e "${GREEN}✓ Applied: $APPLIED_COUNT services${NC}"
|
|
echo -e "${RED}✗ Failed: $APPLY_FAILED_COUNT services${NC}"
|
|
echo ""
|
|
fi
|
|
|
|
# Clean up temporary files
|
|
rm -f /tmp/*_migration.log /tmp/*_downgrade.log /tmp/*_apply.log 2>/dev/null || true
|
|
log_message "INFO" "Cleaned up temporary files"
|
|
|
|
echo -e "${BLUE}Next steps:${NC}"
|
|
echo -e "${YELLOW}1. Review the generated migrations in services/*/migrations/versions/${NC}"
|
|
echo -e "${YELLOW}2. Compare with the backup in $BACKUP_DIR${NC}"
|
|
echo -e "${YELLOW}3. Check logs in $LOG_FILE for details${NC}"
|
|
echo -e "${YELLOW}4. Test migrations by applying them:${NC}"
|
|
echo -e " ${GREEN}kubectl exec -n $NAMESPACE -it <pod-name> -c <service>-${CONTAINER_SUFFIX} -- alembic upgrade head${NC}"
|
|
echo -e "${YELLOW}5. Verify tables were created:${NC}"
|
|
echo -e " ${GREEN}kubectl exec -n $NAMESPACE -it <pod-name> -c <service>-${CONTAINER_SUFFIX} -- python3 -c \"${NC}"
|
|
echo -e " ${GREEN}import asyncio; from sqlalchemy.ext.asyncio import create_async_engine; from sqlalchemy import inspect; async def check_tables(): engine = create_async_engine(os.getenv('<SERVICE>_DATABASE_URL')); async with engine.connect() as conn: print(inspect(conn).get_table_names()); await engine.dispose(); asyncio.run(check_tables())${NC}"
|
|
echo -e " ${GREEN}\"${NC}"
|
|
echo -e "${YELLOW}6. If issues occur, restore from backup:${NC}"
|
|
echo -e " ${GREEN}cp -r $BACKUP_DIR/*/versions/* services/*/migrations/versions/${NC}"
|
|
echo ""
|