Improve the demo feature of the project
This commit is contained in:
203
scripts/cleanup_databases_k8s.sh
Executable file
203
scripts/cleanup_databases_k8s.sh
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Helper script to manually clean all service databases in Kubernetes
|
||||
# This ensures databases are in a clean state before running migration generation
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
NAMESPACE="${KUBE_NAMESPACE:-bakery-ia}"
|
||||
|
||||
# Parse command line arguments
|
||||
CONFIRM=false
|
||||
SPECIFIC_SERVICE=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--namespace) NAMESPACE="$2"; shift 2 ;;
|
||||
--service) SPECIFIC_SERVICE="$2"; shift 2 ;;
|
||||
--yes) CONFIRM=true; shift ;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --namespace NAME Use specific Kubernetes namespace (default: bakery-ia)"
|
||||
echo " --service NAME Clean only specific service database"
|
||||
echo " --yes Skip confirmation prompt"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Clean all databases (with confirmation)"
|
||||
echo " $0 --service auth --yes # Clean only auth database without confirmation"
|
||||
exit 0
|
||||
;;
|
||||
*) echo "Unknown option: $1"; echo "Use --help for usage information"; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# List of all services
|
||||
SERVICES=(
|
||||
"pos" "sales" "recipes" "training" "auth" "orders" "inventory"
|
||||
"suppliers" "tenant" "notification" "alert-processor" "forecasting"
|
||||
"external" "production"
|
||||
)
|
||||
|
||||
# If specific service is provided, use only that
|
||||
if [ -n "$SPECIFIC_SERVICE" ]; then
|
||||
SERVICES=("$SPECIFIC_SERVICE")
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Database Cleanup Script (K8s)${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
echo -e "${RED}⚠ WARNING: This will DROP ALL TABLES in the following databases:${NC}"
|
||||
for service in "${SERVICES[@]}"; do
|
||||
echo -e " - ${service}"
|
||||
done
|
||||
echo ""
|
||||
|
||||
if [ "$CONFIRM" = false ]; then
|
||||
read -p "Are you sure you want to continue? (yes/no) " -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^yes$ ]]; then
|
||||
echo -e "${YELLOW}Aborted.${NC}"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
SUCCESS_COUNT=0
|
||||
FAILED_COUNT=0
|
||||
FAILED_SERVICES=()
|
||||
|
||||
# Function to get a running pod for a service
|
||||
get_running_pod() {
|
||||
local service=$1
|
||||
local pod_name=""
|
||||
local selectors=(
|
||||
"app.kubernetes.io/name=${service}-service,app.kubernetes.io/component=microservice"
|
||||
"app.kubernetes.io/name=${service}-service,app.kubernetes.io/component=worker"
|
||||
"app.kubernetes.io/name=${service}-service"
|
||||
)
|
||||
|
||||
for selector in "${selectors[@]}"; do
|
||||
pod_name=$(kubectl get pods -n "$NAMESPACE" -l "$selector" --field-selector=status.phase=Running -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -n "$pod_name" ]; then
|
||||
echo "$pod_name"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
return 1
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}Starting database cleanup...${NC}"
|
||||
echo ""
|
||||
|
||||
for service in "${SERVICES[@]}"; do
|
||||
echo -e "${BLUE}----------------------------------------${NC}"
|
||||
echo -e "${BLUE}Cleaning: $service${NC}"
|
||||
echo -e "${BLUE}----------------------------------------${NC}"
|
||||
|
||||
# Find pod
|
||||
POD_NAME=$(get_running_pod "$service")
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo -e "${RED}✗ No running pod found, skipping...${NC}"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (pod not found)")
|
||||
continue
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓ Found pod: $POD_NAME${NC}"
|
||||
|
||||
# Get database URL environment variable name
|
||||
db_env_var=$(echo "$service" | tr '[:lower:]-' '[:upper:]_')_DATABASE_URL
|
||||
CONTAINER="${service}-service"
|
||||
|
||||
# Drop all tables
|
||||
CLEANUP_RESULT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 << 'EOFPYTHON'
|
||||
import asyncio
|
||||
import os
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
from sqlalchemy import text
|
||||
|
||||
async def cleanup_database():
|
||||
try:
|
||||
engine = create_async_engine(os.getenv('$db_env_var'))
|
||||
|
||||
# Get list of tables before cleanup
|
||||
async with engine.connect() as conn:
|
||||
result = await conn.execute(text(\"\"\"
|
||||
SELECT COUNT(*)
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
\"\"\"))
|
||||
table_count_before = result.scalar()
|
||||
|
||||
# Drop and recreate public schema
|
||||
async with engine.begin() as conn:
|
||||
await conn.execute(text('DROP SCHEMA IF EXISTS public CASCADE'))
|
||||
await conn.execute(text('CREATE SCHEMA public'))
|
||||
await conn.execute(text('GRANT ALL ON SCHEMA public TO PUBLIC'))
|
||||
|
||||
# Verify cleanup
|
||||
async with engine.connect() as conn:
|
||||
result = await conn.execute(text(\"\"\"
|
||||
SELECT COUNT(*)
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
\"\"\"))
|
||||
table_count_after = result.scalar()
|
||||
|
||||
await engine.dispose()
|
||||
print(f'SUCCESS: Dropped {table_count_before} tables, {table_count_after} remaining')
|
||||
return 0
|
||||
except Exception as e:
|
||||
print(f'ERROR: {str(e)}')
|
||||
return 1
|
||||
|
||||
exit(asyncio.run(cleanup_database()))
|
||||
EOFPYTHON
|
||||
" 2>&1)
|
||||
|
||||
if echo "$CLEANUP_RESULT" | grep -q "SUCCESS"; then
|
||||
echo -e "${GREEN}✓ Database cleaned successfully${NC}"
|
||||
echo -e "${BLUE} $CLEANUP_RESULT${NC}"
|
||||
SUCCESS_COUNT=$((SUCCESS_COUNT + 1))
|
||||
else
|
||||
echo -e "${RED}✗ Database cleanup failed${NC}"
|
||||
echo -e "${YELLOW}Error details: $CLEANUP_RESULT${NC}"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (cleanup failed)")
|
||||
fi
|
||||
echo ""
|
||||
done
|
||||
|
||||
# Summary
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Summary${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${GREEN}✓ Successfully cleaned: $SUCCESS_COUNT databases${NC}"
|
||||
echo -e "${RED}✗ Failed: $FAILED_COUNT databases${NC}"
|
||||
|
||||
if [ "$FAILED_COUNT" -gt 0 ]; then
|
||||
echo ""
|
||||
echo -e "${RED}Failed services:${NC}"
|
||||
for failed_service in "${FAILED_SERVICES[@]}"; do
|
||||
echo -e "${RED} - $failed_service${NC}"
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [ "$SUCCESS_COUNT" -gt 0 ]; then
|
||||
echo -e "${GREEN}Databases are now clean and ready for migration generation!${NC}"
|
||||
echo -e "${YELLOW}Next step: Run ./regenerate_migrations_k8s.sh${NC}"
|
||||
fi
|
||||
246
scripts/complete-cleanup.sh
Executable file
246
scripts/complete-cleanup.sh
Executable file
@@ -0,0 +1,246 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Complete Cleanup Script for Kind + Colima + Skaffold Environment
|
||||
# This script removes all resources, images, and configurations
|
||||
|
||||
set -e
|
||||
|
||||
echo "🧹 Complete Cleanup for Bakery IA Development Environment"
|
||||
echo "========================================================"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
print_status() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Show what will be cleaned up
|
||||
show_cleanup_plan() {
|
||||
echo ""
|
||||
print_warning "This script will clean up:"
|
||||
echo " 🚀 Skaffold deployments and resources"
|
||||
echo " 🐋 Docker images (bakery/* images)"
|
||||
echo " ☸️ Kubernetes resources in bakery-ia namespace"
|
||||
echo " 🔒 cert-manager and TLS certificates"
|
||||
echo " 🌐 NGINX Ingress Controller"
|
||||
echo " 📦 Kind cluster (bakery-ia-local)"
|
||||
echo " 🐳 Colima Docker runtime"
|
||||
echo " 📝 Local certificate files"
|
||||
echo " 🗂️ /etc/hosts entries (optional)"
|
||||
echo ""
|
||||
read -p "❓ Do you want to continue? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
print_status "Cleanup cancelled"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
# 1. Cleanup Skaffold deployments
|
||||
cleanup_skaffold() {
|
||||
print_status "🚀 Cleaning up Skaffold deployments..."
|
||||
|
||||
if command -v skaffold &> /dev/null; then
|
||||
# Try to delete with different profiles
|
||||
skaffold delete --profile=dev 2>/dev/null || true
|
||||
skaffold delete --profile=debug 2>/dev/null || true
|
||||
skaffold delete 2>/dev/null || true
|
||||
print_success "Skaffold deployments cleaned up"
|
||||
else
|
||||
print_warning "Skaffold not found, skipping Skaffold cleanup"
|
||||
fi
|
||||
}
|
||||
|
||||
# 2. Cleanup Kubernetes resources
|
||||
cleanup_kubernetes() {
|
||||
print_status "☸️ Cleaning up Kubernetes resources..."
|
||||
|
||||
if command -v kubectl &> /dev/null && kubectl cluster-info &> /dev/null; then
|
||||
# Delete application namespace and all resources
|
||||
kubectl delete namespace bakery-ia --ignore-not-found=true
|
||||
|
||||
# Delete cert-manager
|
||||
kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.yaml --ignore-not-found=true 2>/dev/null || true
|
||||
|
||||
# Delete NGINX Ingress
|
||||
kubectl delete -f https://kind.sigs.k8s.io/examples/ingress/deploy-ingress-nginx.yaml --ignore-not-found=true 2>/dev/null || true
|
||||
|
||||
# Delete any remaining cluster-wide resources
|
||||
kubectl delete clusterissuers --all --ignore-not-found=true 2>/dev/null || true
|
||||
kubectl delete clusterroles,clusterrolebindings -l app.kubernetes.io/name=cert-manager --ignore-not-found=true 2>/dev/null || true
|
||||
|
||||
print_success "Kubernetes resources cleaned up"
|
||||
else
|
||||
print_warning "Kubectl not available or cluster not running, skipping Kubernetes cleanup"
|
||||
fi
|
||||
}
|
||||
|
||||
# 3. Cleanup Docker images in Colima
|
||||
cleanup_docker_images() {
|
||||
print_status "🐋 Cleaning up Docker images..."
|
||||
|
||||
if command -v docker &> /dev/null && docker info &> /dev/null; then
|
||||
# Remove bakery-specific images
|
||||
print_status "Removing bakery/* images..."
|
||||
docker images --format "table {{.Repository}}:{{.Tag}}" | grep "^bakery/" | xargs -r docker rmi -f 2>/dev/null || true
|
||||
|
||||
# Remove dangling images
|
||||
print_status "Removing dangling images..."
|
||||
docker image prune -f 2>/dev/null || true
|
||||
|
||||
# Remove unused images (optional - uncomment if you want aggressive cleanup)
|
||||
# print_status "Removing unused images..."
|
||||
# docker image prune -a -f 2>/dev/null || true
|
||||
|
||||
# Remove build cache
|
||||
print_status "Cleaning build cache..."
|
||||
docker builder prune -f 2>/dev/null || true
|
||||
|
||||
print_success "Docker images cleaned up"
|
||||
else
|
||||
print_warning "Docker not available, skipping Docker cleanup"
|
||||
fi
|
||||
}
|
||||
|
||||
# 4. Delete Kind cluster
|
||||
cleanup_kind_cluster() {
|
||||
print_status "📦 Deleting Kind cluster..."
|
||||
|
||||
if command -v kind &> /dev/null; then
|
||||
# Delete the specific cluster
|
||||
kind delete cluster --name bakery-ia-local 2>/dev/null || true
|
||||
|
||||
# Also clean up any other bakery clusters
|
||||
kind get clusters 2>/dev/null | grep -E "(bakery|dev)" | xargs -r -I {} kind delete cluster --name {} 2>/dev/null || true
|
||||
|
||||
print_success "Kind cluster deleted"
|
||||
else
|
||||
print_warning "Kind not found, skipping cluster cleanup"
|
||||
fi
|
||||
}
|
||||
|
||||
# 5. Stop and clean Colima
|
||||
cleanup_colima() {
|
||||
print_status "🐳 Cleaning up Colima..."
|
||||
|
||||
if command -v colima &> /dev/null; then
|
||||
# Stop the specific profile
|
||||
colima stop --profile k8s-local 2>/dev/null || true
|
||||
|
||||
# Delete the profile (removes all data)
|
||||
read -p "❓ Do you want to delete the Colima profile (removes all Docker data)? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
colima delete --profile k8s-local --force 2>/dev/null || true
|
||||
print_success "Colima profile deleted"
|
||||
else
|
||||
print_warning "Colima profile kept (stopped only)"
|
||||
fi
|
||||
else
|
||||
print_warning "Colima not found, skipping Colima cleanup"
|
||||
fi
|
||||
}
|
||||
|
||||
# 6. Cleanup local files
|
||||
cleanup_local_files() {
|
||||
print_status "📝 Cleaning up local files..."
|
||||
|
||||
# Remove certificate files
|
||||
rm -f bakery-ia-ca.crt 2>/dev/null || true
|
||||
rm -f *.crt *.key 2>/dev/null || true
|
||||
|
||||
# Remove any Skaffold cache (if exists)
|
||||
rm -rf ~/.skaffold/cache 2>/dev/null || true
|
||||
|
||||
print_success "Local files cleaned up"
|
||||
}
|
||||
|
||||
# 7. Cleanup /etc/hosts entries (optional)
|
||||
cleanup_hosts_file() {
|
||||
print_status "🗂️ Cleaning up /etc/hosts entries..."
|
||||
|
||||
if grep -q "bakery-ia.local" /etc/hosts 2>/dev/null; then
|
||||
read -p "❓ Remove bakery-ia entries from /etc/hosts? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
# Backup hosts file first
|
||||
sudo cp /etc/hosts /etc/hosts.backup.$(date +%Y%m%d_%H%M%S)
|
||||
|
||||
# Remove entries
|
||||
sudo sed -i '' '/bakery-ia.local/d' /etc/hosts
|
||||
sudo sed -i '' '/api.bakery-ia.local/d' /etc/hosts
|
||||
sudo sed -i '' '/monitoring.bakery-ia.local/d' /etc/hosts
|
||||
|
||||
print_success "Hosts file entries removed"
|
||||
else
|
||||
print_warning "Hosts file entries kept"
|
||||
fi
|
||||
else
|
||||
print_status "No bakery-ia entries found in /etc/hosts"
|
||||
fi
|
||||
}
|
||||
|
||||
# 8. Show system status after cleanup
|
||||
show_cleanup_summary() {
|
||||
echo ""
|
||||
print_success "🎉 Cleanup completed!"
|
||||
echo ""
|
||||
print_status "System status after cleanup:"
|
||||
|
||||
# Check remaining Docker images
|
||||
if command -v docker &> /dev/null && docker info &> /dev/null; then
|
||||
local bakery_images=$(docker images --format "table {{.Repository}}:{{.Tag}}" | grep "^bakery/" | wc -l)
|
||||
echo " 🐋 Bakery Docker images remaining: $bakery_images"
|
||||
fi
|
||||
|
||||
# Check Kind clusters
|
||||
if command -v kind &> /dev/null; then
|
||||
local clusters=$(kind get clusters 2>/dev/null | wc -l)
|
||||
echo " 📦 Kind clusters remaining: $clusters"
|
||||
fi
|
||||
|
||||
# Check Colima status
|
||||
if command -v colima &> /dev/null; then
|
||||
local colima_status=$(colima status --profile k8s-local 2>/dev/null | head -n1 || echo "Not running")
|
||||
echo " 🐳 Colima k8s-local status: $colima_status"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_status "To restart development environment:"
|
||||
echo " 🚀 Quick start: ./skaffold-dev.sh"
|
||||
echo " 🔒 With HTTPS: ./setup-https.sh"
|
||||
echo " 🏗️ Manual: colima start --cpu 4 --memory 8 --disk 50 --runtime docker --profile k8s-local"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
show_cleanup_plan
|
||||
cleanup_skaffold
|
||||
cleanup_kubernetes
|
||||
cleanup_docker_images
|
||||
cleanup_kind_cluster
|
||||
cleanup_colima
|
||||
cleanup_local_files
|
||||
cleanup_hosts_file
|
||||
show_cleanup_summary
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1 +0,0 @@
|
||||
"""Demo Data Seeding Scripts"""
|
||||
@@ -1,234 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Clone Demo Tenant Data - Database Level
|
||||
Clones all data from base template tenant to a virtual demo tenant across all databases
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import os
|
||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
|
||||
from sqlalchemy import select
|
||||
import uuid
|
||||
import structlog
|
||||
|
||||
# Add app to path for imports
|
||||
sys.path.insert(0, '/app')
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Base template tenant IDs
|
||||
DEMO_TENANT_SAN_PABLO = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
DEMO_TENANT_LA_ESPIGA = "b2c3d4e5-f6a7-48b9-c0d1-e2f3a4b5c6d7"
|
||||
|
||||
|
||||
async def clone_inventory_data(base_tenant_id: str, virtual_tenant_id: str):
|
||||
"""Clone inventory database tables using ORM"""
|
||||
database_url = os.getenv("INVENTORY_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.warning("INVENTORY_DATABASE_URL not set, skipping inventory data")
|
||||
return 0
|
||||
|
||||
engine = create_async_engine(database_url, echo=False)
|
||||
session_factory = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
||||
|
||||
total_cloned = 0
|
||||
|
||||
try:
|
||||
from app.models.inventory import Ingredient
|
||||
|
||||
async with session_factory() as session:
|
||||
# Clone ingredients
|
||||
result = await session.execute(
|
||||
select(Ingredient).where(Ingredient.tenant_id == uuid.UUID(base_tenant_id))
|
||||
)
|
||||
base_ingredients = result.scalars().all()
|
||||
|
||||
logger.info(f"Found {len(base_ingredients)} ingredients to clone")
|
||||
|
||||
for ing in base_ingredients:
|
||||
new_ing = Ingredient(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=uuid.UUID(virtual_tenant_id),
|
||||
name=ing.name,
|
||||
sku=ing.sku,
|
||||
barcode=ing.barcode,
|
||||
product_type=ing.product_type,
|
||||
ingredient_category=ing.ingredient_category,
|
||||
product_category=ing.product_category,
|
||||
subcategory=ing.subcategory,
|
||||
description=ing.description,
|
||||
brand=ing.brand,
|
||||
unit_of_measure=ing.unit_of_measure,
|
||||
package_size=ing.package_size,
|
||||
average_cost=ing.average_cost,
|
||||
last_purchase_price=ing.last_purchase_price,
|
||||
standard_cost=ing.standard_cost,
|
||||
low_stock_threshold=ing.low_stock_threshold,
|
||||
reorder_point=ing.reorder_point,
|
||||
reorder_quantity=ing.reorder_quantity,
|
||||
max_stock_level=ing.max_stock_level,
|
||||
shelf_life_days=ing.shelf_life_days,
|
||||
is_perishable=ing.is_perishable,
|
||||
is_active=ing.is_active,
|
||||
allergen_info=ing.allergen_info
|
||||
)
|
||||
session.add(new_ing)
|
||||
total_cloned += 1
|
||||
|
||||
await session.commit()
|
||||
logger.info(f"Cloned {total_cloned} ingredients")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to clone inventory data: {str(e)}", exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
return total_cloned
|
||||
|
||||
|
||||
async def clone_sales_data(base_tenant_id: str, virtual_tenant_id: str):
|
||||
"""Clone sales database tables"""
|
||||
database_url = os.getenv("SALES_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.warning("SALES_DATABASE_URL not set, skipping sales data")
|
||||
return 0
|
||||
|
||||
# Sales cloning not implemented yet
|
||||
logger.info("Sales data cloning not yet implemented")
|
||||
return 0
|
||||
|
||||
|
||||
async def clone_orders_data(base_tenant_id: str, virtual_tenant_id: str):
|
||||
"""Clone orders database tables"""
|
||||
database_url = os.getenv("ORDERS_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.warning("ORDERS_DATABASE_URL not set, skipping orders data")
|
||||
return 0
|
||||
|
||||
# Orders cloning not implemented yet
|
||||
logger.info("Orders data cloning not yet implemented")
|
||||
return 0
|
||||
|
||||
|
||||
async def create_virtual_tenant(virtual_tenant_id: str, demo_account_type: str):
|
||||
"""Create the virtual tenant record in tenant database"""
|
||||
database_url = os.getenv("TENANT_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.warning("TENANT_DATABASE_URL not set, skipping tenant creation")
|
||||
return
|
||||
|
||||
engine = create_async_engine(database_url, echo=False)
|
||||
session_factory = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
||||
|
||||
try:
|
||||
# Import after adding to path
|
||||
from services.tenant.app.models.tenants import Tenant
|
||||
|
||||
async with session_factory() as session:
|
||||
# Check if tenant already exists
|
||||
result = await session.execute(
|
||||
select(Tenant).where(Tenant.id == uuid.UUID(virtual_tenant_id))
|
||||
)
|
||||
existing = result.scalars().first()
|
||||
|
||||
if existing:
|
||||
logger.info(f"Virtual tenant {virtual_tenant_id} already exists")
|
||||
return
|
||||
|
||||
# Create virtual tenant
|
||||
tenant = Tenant(
|
||||
id=uuid.UUID(virtual_tenant_id),
|
||||
name=f"Demo Session Tenant",
|
||||
is_demo=True,
|
||||
is_demo_template=False,
|
||||
business_model=demo_account_type
|
||||
)
|
||||
session.add(tenant)
|
||||
await session.commit()
|
||||
logger.info(f"Created virtual tenant {virtual_tenant_id}")
|
||||
|
||||
except ImportError:
|
||||
# Tenant model not available, skip
|
||||
logger.warning("Could not import Tenant model, skipping virtual tenant creation")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create virtual tenant: {str(e)}", exc_info=True)
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
async def clone_demo_tenant(virtual_tenant_id: str, demo_account_type: str = "individual_bakery"):
|
||||
"""
|
||||
Main function to clone all demo data for a virtual tenant
|
||||
|
||||
Args:
|
||||
virtual_tenant_id: The UUID of the virtual tenant to create
|
||||
demo_account_type: Type of demo account (individual_bakery or central_baker)
|
||||
"""
|
||||
base_tenant_id = DEMO_TENANT_SAN_PABLO if demo_account_type == "individual_bakery" else DEMO_TENANT_LA_ESPIGA
|
||||
|
||||
logger.info(
|
||||
"Starting demo tenant cloning",
|
||||
virtual_tenant=virtual_tenant_id,
|
||||
base_tenant=base_tenant_id,
|
||||
demo_type=demo_account_type
|
||||
)
|
||||
|
||||
try:
|
||||
# Create virtual tenant record
|
||||
await create_virtual_tenant(virtual_tenant_id, demo_account_type)
|
||||
|
||||
# Clone data from each database
|
||||
stats = {
|
||||
"inventory": await clone_inventory_data(base_tenant_id, virtual_tenant_id),
|
||||
"sales": await clone_sales_data(base_tenant_id, virtual_tenant_id),
|
||||
"orders": await clone_orders_data(base_tenant_id, virtual_tenant_id),
|
||||
}
|
||||
|
||||
total_records = sum(stats.values())
|
||||
logger.info(
|
||||
"Demo tenant cloning completed successfully",
|
||||
virtual_tenant=virtual_tenant_id,
|
||||
total_records=total_records,
|
||||
stats=stats
|
||||
)
|
||||
|
||||
# Print summary for job logs
|
||||
print(f"✅ Cloning completed: {total_records} total records")
|
||||
print(f" - Inventory: {stats['inventory']} records")
|
||||
print(f" - Sales: {stats['sales']} records")
|
||||
print(f" - Orders: {stats['orders']} records")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Demo tenant cloning failed",
|
||||
virtual_tenant=virtual_tenant_id,
|
||||
error=str(e),
|
||||
exc_info=True
|
||||
)
|
||||
print(f"❌ Cloning failed: {str(e)}")
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Get virtual tenant ID from environment or CLI argument
|
||||
virtual_tenant_id = os.getenv("VIRTUAL_TENANT_ID") or (sys.argv[1] if len(sys.argv) > 1 else None)
|
||||
demo_type = os.getenv("DEMO_ACCOUNT_TYPE", "individual_bakery")
|
||||
|
||||
if not virtual_tenant_id:
|
||||
print("Usage: python clone_demo_tenant.py <virtual_tenant_id>")
|
||||
print(" or: VIRTUAL_TENANT_ID=<uuid> python clone_demo_tenant.py")
|
||||
sys.exit(1)
|
||||
|
||||
# Validate UUID
|
||||
try:
|
||||
uuid.UUID(virtual_tenant_id)
|
||||
except ValueError:
|
||||
print(f"Error: Invalid UUID format: {virtual_tenant_id}")
|
||||
sys.exit(1)
|
||||
|
||||
result = asyncio.run(clone_demo_tenant(virtual_tenant_id, demo_type))
|
||||
sys.exit(0 if result else 1)
|
||||
@@ -1,278 +0,0 @@
|
||||
"""
|
||||
Demo AI Models Seed Script
|
||||
Creates fake AI models for demo tenants to populate the models list
|
||||
without having actual trained model files.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import os
|
||||
from uuid import UUID
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from decimal import Decimal
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
|
||||
|
||||
from sqlalchemy import select
|
||||
from shared.database.base import create_database_manager
|
||||
import structlog
|
||||
|
||||
# Import models - these paths work both locally and in container
|
||||
try:
|
||||
# Container environment (training-service image)
|
||||
from app.models.training import TrainedModel
|
||||
except ImportError:
|
||||
# Local environment
|
||||
from services.training.app.models.training import TrainedModel
|
||||
|
||||
# Tenant model - define minimal version for container environment
|
||||
try:
|
||||
from services.tenant.app.models.tenants import Tenant
|
||||
except ImportError:
|
||||
# If running in training-service container, define minimal Tenant model
|
||||
from sqlalchemy import Column, String, Boolean
|
||||
from sqlalchemy.dialects.postgresql import UUID as PGUUID
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class Tenant(Base):
|
||||
__tablename__ = "tenants"
|
||||
id = Column(PGUUID(as_uuid=True), primary_key=True)
|
||||
name = Column(String)
|
||||
is_demo = Column(Boolean)
|
||||
is_demo_template = Column(Boolean)
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class DemoAIModelSeeder:
|
||||
"""Seed fake AI models for demo tenants"""
|
||||
|
||||
def __init__(self):
|
||||
self.training_db_url = os.getenv("TRAINING_DATABASE_URL")
|
||||
self.tenant_db_url = os.getenv("TENANT_DATABASE_URL")
|
||||
|
||||
if not self.training_db_url or not self.tenant_db_url:
|
||||
raise ValueError("Missing required database URLs")
|
||||
|
||||
self.training_db = create_database_manager(self.training_db_url, "demo-ai-seed")
|
||||
self.tenant_db = create_database_manager(self.tenant_db_url, "demo-tenant-seed")
|
||||
|
||||
async def get_demo_tenants(self):
|
||||
"""Get all demo tenants"""
|
||||
async with self.tenant_db.get_session() as session:
|
||||
result = await session.execute(
|
||||
select(Tenant).where(Tenant.is_demo == True, Tenant.is_demo_template == True)
|
||||
)
|
||||
return result.scalars().all()
|
||||
|
||||
async def get_tenant_products(self, tenant_id: UUID):
|
||||
"""
|
||||
Get finished products for a tenant from inventory database.
|
||||
We need to query the actual inventory to get real product UUIDs.
|
||||
"""
|
||||
try:
|
||||
inventory_db_url = os.getenv("INVENTORY_DATABASE_URL")
|
||||
if not inventory_db_url:
|
||||
logger.warning("INVENTORY_DATABASE_URL not set, cannot get products")
|
||||
return []
|
||||
|
||||
inventory_db = create_database_manager(inventory_db_url, "demo-inventory-check")
|
||||
|
||||
# Define minimal Ingredient model for querying
|
||||
from sqlalchemy import Column, String, Enum as SQLEnum
|
||||
from sqlalchemy.dialects.postgresql import UUID as PGUUID
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
import enum
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class IngredientType(str, enum.Enum):
|
||||
INGREDIENT = "INGREDIENT"
|
||||
FINISHED_PRODUCT = "FINISHED_PRODUCT"
|
||||
|
||||
class Ingredient(Base):
|
||||
__tablename__ = "ingredients"
|
||||
id = Column(PGUUID(as_uuid=True), primary_key=True)
|
||||
tenant_id = Column(PGUUID(as_uuid=True))
|
||||
name = Column(String)
|
||||
ingredient_type = Column(SQLEnum(IngredientType, name="ingredienttype"))
|
||||
|
||||
async with inventory_db.get_session() as session:
|
||||
result = await session.execute(
|
||||
select(Ingredient).where(
|
||||
Ingredient.tenant_id == tenant_id,
|
||||
Ingredient.ingredient_type == IngredientType.FINISHED_PRODUCT
|
||||
).limit(10) # Get up to 10 finished products
|
||||
)
|
||||
products = result.scalars().all()
|
||||
|
||||
product_list = [
|
||||
{"id": product.id, "name": product.name}
|
||||
for product in products
|
||||
]
|
||||
|
||||
logger.info(f"Found {len(product_list)} finished products for tenant",
|
||||
tenant_id=str(tenant_id))
|
||||
|
||||
return product_list
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error fetching tenant products", error=str(e), tenant_id=str(tenant_id))
|
||||
return []
|
||||
|
||||
async def create_fake_model(self, session, tenant_id: UUID, product_info: dict):
|
||||
"""Create a fake AI model entry for a product"""
|
||||
now = datetime.now(timezone.utc)
|
||||
training_start = now - timedelta(days=90)
|
||||
training_end = now - timedelta(days=7)
|
||||
|
||||
fake_model = TrainedModel(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_id=product_info["id"],
|
||||
model_type="prophet_optimized",
|
||||
model_version="1.0-demo",
|
||||
job_id=f"demo-job-{tenant_id}-{product_info['id']}",
|
||||
|
||||
# Fake file paths (files don't actually exist)
|
||||
model_path=f"/fake/models/{tenant_id}/{product_info['id']}/model.pkl",
|
||||
metadata_path=f"/fake/models/{tenant_id}/{product_info['id']}/metadata.json",
|
||||
|
||||
# Fake but realistic metrics
|
||||
mape=Decimal("12.5"), # Mean Absolute Percentage Error
|
||||
mae=Decimal("2.3"), # Mean Absolute Error
|
||||
rmse=Decimal("3.1"), # Root Mean Squared Error
|
||||
r2_score=Decimal("0.85"), # R-squared
|
||||
training_samples=60, # 60 days of training data
|
||||
|
||||
# Fake hyperparameters
|
||||
hyperparameters={
|
||||
"changepoint_prior_scale": 0.05,
|
||||
"seasonality_prior_scale": 10.0,
|
||||
"holidays_prior_scale": 10.0,
|
||||
"seasonality_mode": "multiplicative"
|
||||
},
|
||||
|
||||
# Features used
|
||||
features_used=["weekday", "month", "is_holiday", "temperature", "precipitation"],
|
||||
|
||||
# Normalization params (fake)
|
||||
normalization_params={
|
||||
"temperature": {"mean": 15.0, "std": 5.0},
|
||||
"precipitation": {"mean": 2.0, "std": 1.5}
|
||||
},
|
||||
|
||||
# Model status
|
||||
is_active=True,
|
||||
is_production=False, # Demo models are not production-ready
|
||||
|
||||
# Training data info
|
||||
training_start_date=training_start,
|
||||
training_end_date=training_end,
|
||||
data_quality_score=Decimal("0.75"), # Good but not excellent
|
||||
|
||||
# Metadata
|
||||
notes="Demo model - No actual trained file exists. For demonstration purposes only.",
|
||||
created_by="demo-seed-script",
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
last_used_at=None
|
||||
)
|
||||
|
||||
session.add(fake_model)
|
||||
return fake_model
|
||||
|
||||
async def seed_models_for_tenant(self, tenant: Tenant):
|
||||
"""Create fake AI models for a demo tenant"""
|
||||
logger.info("Creating fake AI models for demo tenant",
|
||||
tenant_id=str(tenant.id),
|
||||
tenant_name=tenant.name)
|
||||
|
||||
try:
|
||||
# Get products for this tenant
|
||||
products = await self.get_tenant_products(tenant.id)
|
||||
|
||||
async with self.training_db.get_session() as session:
|
||||
models_created = 0
|
||||
|
||||
for product in products:
|
||||
# Check if model already exists
|
||||
result = await session.execute(
|
||||
select(TrainedModel).where(
|
||||
TrainedModel.tenant_id == tenant.id,
|
||||
TrainedModel.inventory_product_id == product["id"]
|
||||
)
|
||||
)
|
||||
existing_model = result.scalars().first()
|
||||
|
||||
if existing_model:
|
||||
logger.info("Model already exists, skipping",
|
||||
tenant_id=str(tenant.id),
|
||||
product_id=product["id"])
|
||||
continue
|
||||
|
||||
# Create fake model
|
||||
model = await self.create_fake_model(session, tenant.id, product)
|
||||
models_created += 1
|
||||
|
||||
logger.info("Created fake AI model",
|
||||
tenant_id=str(tenant.id),
|
||||
product_id=product["id"],
|
||||
model_id=str(model.id))
|
||||
|
||||
await session.commit()
|
||||
|
||||
logger.info("Successfully created fake AI models for tenant",
|
||||
tenant_id=str(tenant.id),
|
||||
models_created=models_created)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error creating fake AI models for tenant",
|
||||
tenant_id=str(tenant.id),
|
||||
error=str(e))
|
||||
raise
|
||||
|
||||
async def seed_all_demo_models(self):
|
||||
"""Seed fake AI models for all demo tenants"""
|
||||
logger.info("Starting demo AI models seeding")
|
||||
|
||||
try:
|
||||
# Get all demo tenants
|
||||
demo_tenants = await self.get_demo_tenants()
|
||||
|
||||
if not demo_tenants:
|
||||
logger.warning("No demo tenants found")
|
||||
return
|
||||
|
||||
logger.info(f"Found {len(demo_tenants)} demo tenants")
|
||||
|
||||
# Seed models for each tenant
|
||||
for tenant in demo_tenants:
|
||||
await self.seed_models_for_tenant(tenant)
|
||||
|
||||
logger.info("✅ Demo AI models seeding completed successfully",
|
||||
tenants_processed=len(demo_tenants))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("❌ Demo AI models seeding failed", error=str(e))
|
||||
raise
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
logger.info("Demo AI Models Seed Script started")
|
||||
|
||||
try:
|
||||
seeder = DemoAIModelSeeder()
|
||||
await seeder.seed_all_demo_models()
|
||||
logger.info("Demo AI models seed completed successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Demo AI models seed failed", error=str(e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,338 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Seed Demo Inventory Data
|
||||
Populates comprehensive Spanish inventory data for both demo tenants
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
project_root = Path(__file__).parent.parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
import os
|
||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
|
||||
from sqlalchemy import select, delete
|
||||
import structlog
|
||||
import uuid
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Demo tenant IDs
|
||||
DEMO_TENANT_SAN_PABLO = "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6"
|
||||
DEMO_TENANT_LA_ESPIGA = "b2c3d4e5-f6a7-48b9-c0d1-e2f3a4b5c6d7"
|
||||
|
||||
|
||||
async def seed_inventory_for_tenant(session, tenant_id: str, business_model: str):
|
||||
"""Seed inventory data for a specific tenant"""
|
||||
try:
|
||||
from app.models.inventory import Ingredient, Stock, StockMovement
|
||||
except ImportError:
|
||||
from services.inventory.app.models.inventory import Ingredient, Stock, StockMovement
|
||||
|
||||
logger.info(f"Seeding inventory for {business_model}", tenant_id=tenant_id)
|
||||
|
||||
# Check if data already exists - if so, skip seeding to avoid duplicates
|
||||
result = await session.execute(select(Ingredient).where(Ingredient.tenant_id == uuid.UUID(tenant_id)).limit(1))
|
||||
existing = result.scalars().first()
|
||||
if existing:
|
||||
logger.info(f"Demo tenant {tenant_id} already has inventory data, skipping seed")
|
||||
return
|
||||
|
||||
if business_model == "individual_bakery":
|
||||
await seed_individual_bakery_inventory(session, tenant_id)
|
||||
elif business_model == "central_baker_satellite":
|
||||
await seed_central_baker_inventory(session, tenant_id)
|
||||
|
||||
|
||||
async def seed_individual_bakery_inventory(session, tenant_id: str):
|
||||
"""Seed inventory for individual bakery (produces locally)"""
|
||||
try:
|
||||
from app.models.inventory import Ingredient, Stock
|
||||
except ImportError:
|
||||
from services.inventory.app.models.inventory import Ingredient, Stock
|
||||
|
||||
tenant_uuid = uuid.UUID(tenant_id)
|
||||
|
||||
# Raw ingredients for local production
|
||||
ingredients_data = [
|
||||
# Harinas
|
||||
("Harina de Trigo 000", "INGREDIENT", "FLOUR", None, "KILOGRAMS", 25.0, 50.0, 200.0, 2.50, "Molinos del Valle"),
|
||||
("Harina Integral", "INGREDIENT", "FLOUR", None, "KILOGRAMS", 15.0, 30.0, 100.0, 3.20, "Bio Natural"),
|
||||
("Harina de Centeno", "INGREDIENT", "FLOUR", None, "KILOGRAMS", 10.0, 20.0, 50.0, 3.50, "Ecológica"),
|
||||
|
||||
# Levaduras
|
||||
("Levadura Fresca", "INGREDIENT", "YEAST", None, "KILOGRAMS", 1.0, 2.5, 10.0, 8.50, "Levapan"),
|
||||
("Levadura Seca Activa", "INGREDIENT", "YEAST", None, "KILOGRAMS", 0.5, 1.0, 5.0, 12.00, "Fleischmann"),
|
||||
|
||||
# Grasas
|
||||
("Mantequilla", "INGREDIENT", "FATS", None, "KILOGRAMS", 3.0, 8.0, 25.0, 6.80, "La Serenísima"),
|
||||
("Aceite de Oliva Virgen Extra", "INGREDIENT", "FATS", None, "LITERS", 2.0, 5.0, 20.0, 15.50, "Cocinero"),
|
||||
|
||||
# Lácteos y Huevos
|
||||
("Huevos Frescos", "INGREDIENT", "EGGS", None, "UNITS", 36, 60, 180, 0.25, "Granja San José"),
|
||||
("Leche Entera", "INGREDIENT", "DAIRY", None, "LITERS", 5.0, 12.0, 50.0, 1.80, "La Serenísima"),
|
||||
("Nata para Montar", "INGREDIENT", "DAIRY", None, "LITERS", 2.0, 5.0, 20.0, 3.50, "Central Lechera"),
|
||||
|
||||
# Azúcares
|
||||
("Azúcar Blanca", "INGREDIENT", "SUGAR", None, "KILOGRAMS", 8.0, 20.0, 100.0, 1.20, "Ledesma"),
|
||||
("Azúcar Morena", "INGREDIENT", "SUGAR", None, "KILOGRAMS", 3.0, 8.0, 25.0, 2.80, "Orgánica"),
|
||||
("Azúcar Glass", "INGREDIENT", "SUGAR", None, "KILOGRAMS", 2.0, 5.0, 20.0, 2.20, "Ledesma"),
|
||||
|
||||
# Sal y Especias
|
||||
("Sal Fina", "INGREDIENT", "SALT", None, "KILOGRAMS", 2.0, 5.0, 20.0, 0.80, "Celusal"),
|
||||
("Canela en Polvo", "INGREDIENT", "SPICES", None, "GRAMS", 50, 150, 500, 0.08, "Alicante"),
|
||||
("Vainilla en Extracto", "INGREDIENT", "SPICES", None, "MILLILITERS", 100, 250, 1000, 0.15, "McCormick"),
|
||||
|
||||
# Chocolates y Aditivos
|
||||
("Chocolate Negro 70%", "INGREDIENT", "ADDITIVES", None, "KILOGRAMS", 1.0, 3.0, 15.0, 8.50, "Valor"),
|
||||
("Cacao en Polvo", "INGREDIENT", "ADDITIVES", None, "KILOGRAMS", 0.5, 2.0, 10.0, 6.50, "Nestlé"),
|
||||
("Nueces Peladas", "INGREDIENT", "ADDITIVES", None, "KILOGRAMS", 0.5, 1.5, 8.0, 12.00, "Los Nogales"),
|
||||
("Pasas de Uva", "INGREDIENT", "ADDITIVES", None, "KILOGRAMS", 1.0, 2.0, 10.0, 4.50, "Mendoza Premium"),
|
||||
|
||||
# Productos Terminados (producción local)
|
||||
("Croissant Clásico", "FINISHED_PRODUCT", None, "CROISSANTS", "PIECES", 12, 30, 80, 1.20, None),
|
||||
("Pan Integral", "FINISHED_PRODUCT", None, "BREAD", "PIECES", 8, 20, 50, 2.50, None),
|
||||
("Napolitana de Chocolate", "FINISHED_PRODUCT", None, "PASTRIES", "PIECES", 10, 25, 60, 1.80, None),
|
||||
("Pan de Masa Madre", "FINISHED_PRODUCT", None, "BREAD", "PIECES", 6, 15, 40, 3.50, None),
|
||||
("Magdalena de Vainilla", "FINISHED_PRODUCT", None, "PASTRIES", "PIECES", 8, 20, 50, 1.00, None),
|
||||
]
|
||||
|
||||
ingredient_map = {}
|
||||
for name, product_type, ing_cat, prod_cat, uom, low_stock, reorder, reorder_qty, cost, brand in ingredients_data:
|
||||
ing = Ingredient(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=tenant_uuid,
|
||||
name=name,
|
||||
product_type=product_type,
|
||||
ingredient_category=ing_cat,
|
||||
product_category=prod_cat,
|
||||
unit_of_measure=uom,
|
||||
low_stock_threshold=low_stock,
|
||||
reorder_point=reorder,
|
||||
reorder_quantity=reorder_qty,
|
||||
average_cost=cost,
|
||||
brand=brand,
|
||||
is_active=True,
|
||||
is_perishable=(ing_cat in ["DAIRY", "EGGS"] if ing_cat else False),
|
||||
shelf_life_days=7 if ing_cat in ["DAIRY", "EGGS"] else (365 if ing_cat else 2),
|
||||
created_at=datetime.now(timezone.utc)
|
||||
)
|
||||
session.add(ing)
|
||||
ingredient_map[name] = ing
|
||||
|
||||
await session.commit()
|
||||
|
||||
# Create stock lots
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
# Harina de Trigo - Good stock
|
||||
harina_trigo = ingredient_map["Harina de Trigo 000"]
|
||||
session.add(Stock(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=tenant_uuid,
|
||||
ingredient_id=harina_trigo.id,
|
||||
production_stage="raw_ingredient",
|
||||
current_quantity=120.0,
|
||||
reserved_quantity=15.0,
|
||||
available_quantity=105.0,
|
||||
batch_number=f"HARINA-TRI-{now.strftime('%Y%m%d')}-001",
|
||||
received_date=now - timedelta(days=5),
|
||||
expiration_date=now + timedelta(days=360),
|
||||
unit_cost=2.50,
|
||||
total_cost=300.0,
|
||||
storage_location="Almacén Principal - Estante A1",
|
||||
is_available=True,
|
||||
is_expired=False,
|
||||
quality_status="good",
|
||||
created_at=now
|
||||
))
|
||||
|
||||
# Levadura Fresca - Low stock (critical)
|
||||
levadura = ingredient_map["Levadura Fresca"]
|
||||
session.add(Stock(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=tenant_uuid,
|
||||
ingredient_id=levadura.id,
|
||||
production_stage="raw_ingredient",
|
||||
current_quantity=0.8,
|
||||
reserved_quantity=0.3,
|
||||
available_quantity=0.5,
|
||||
batch_number=f"LEVAD-FRE-{now.strftime('%Y%m%d')}-001",
|
||||
received_date=now - timedelta(days=2),
|
||||
expiration_date=now + timedelta(days=5),
|
||||
unit_cost=8.50,
|
||||
total_cost=6.8,
|
||||
storage_location="Cámara Fría - Nivel 2",
|
||||
is_available=True,
|
||||
is_expired=False,
|
||||
quality_status="good",
|
||||
created_at=now
|
||||
))
|
||||
|
||||
# Croissants - Fresh batch
|
||||
croissant = ingredient_map["Croissant Clásico"]
|
||||
session.add(Stock(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=tenant_uuid,
|
||||
ingredient_id=croissant.id,
|
||||
production_stage="fully_baked",
|
||||
current_quantity=35,
|
||||
reserved_quantity=5,
|
||||
available_quantity=30,
|
||||
batch_number=f"CROIS-FRESH-{now.strftime('%Y%m%d')}-001",
|
||||
received_date=now - timedelta(hours=4),
|
||||
expiration_date=now + timedelta(hours=20),
|
||||
unit_cost=1.20,
|
||||
total_cost=42.0,
|
||||
storage_location="Vitrina Principal - Nivel 1",
|
||||
is_available=True,
|
||||
is_expired=False,
|
||||
quality_status="good",
|
||||
created_at=now
|
||||
))
|
||||
|
||||
await session.commit()
|
||||
logger.info("Individual bakery inventory seeded")
|
||||
|
||||
|
||||
async def seed_central_baker_inventory(session, tenant_id: str):
|
||||
"""Seed inventory for central baker satellite (receives products)"""
|
||||
try:
|
||||
from app.models.inventory import Ingredient, Stock
|
||||
except ImportError:
|
||||
from services.inventory.app.models.inventory import Ingredient, Stock
|
||||
|
||||
tenant_uuid = uuid.UUID(tenant_id)
|
||||
|
||||
# Finished and par-baked products from central baker
|
||||
ingredients_data = [
|
||||
# Productos Pre-Horneados (del obrador central)
|
||||
("Croissant Pre-Horneado", "FINISHED_PRODUCT", None, "CROISSANTS", "PIECES", 20, 50, 150, 0.85, "Obrador Central"),
|
||||
("Pan Baguette Pre-Horneado", "FINISHED_PRODUCT", None, "BREAD", "PIECES", 15, 40, 120, 1.20, "Obrador Central"),
|
||||
("Napolitana Pre-Horneada", "FINISHED_PRODUCT", None, "PASTRIES", "PIECES", 15, 35, 100, 1.50, "Obrador Central"),
|
||||
("Pan de Molde Pre-Horneado", "FINISHED_PRODUCT", None, "BREAD", "PIECES", 10, 25, 80, 1.80, "Obrador Central"),
|
||||
|
||||
# Productos Terminados (listos para venta)
|
||||
("Croissant de Mantequilla", "FINISHED_PRODUCT", None, "CROISSANTS", "PIECES", 15, 40, 100, 1.20, "Obrador Central"),
|
||||
("Palmera de Hojaldre", "FINISHED_PRODUCT", None, "PASTRIES", "PIECES", 10, 30, 80, 2.20, "Obrador Central"),
|
||||
("Magdalena Tradicional", "FINISHED_PRODUCT", None, "PASTRIES", "PIECES", 12, 30, 80, 1.00, "Obrador Central"),
|
||||
("Empanada de Atún", "FINISHED_PRODUCT", None, "OTHER_PRODUCTS", "PIECES", 8, 20, 60, 3.50, "Obrador Central"),
|
||||
("Pan Integral de Molde", "FINISHED_PRODUCT", None, "BREAD", "PIECES", 10, 25, 75, 2.80, "Obrador Central"),
|
||||
|
||||
# Algunos ingredientes básicos
|
||||
("Café en Grano", "INGREDIENT", "OTHER", None, "KILOGRAMS", 2.0, 5.0, 20.0, 18.50, "Lavazza"),
|
||||
("Leche para Cafetería", "INGREDIENT", "DAIRY", None, "LITERS", 10.0, 20.0, 80.0, 1.50, "Central Lechera"),
|
||||
("Azúcar para Cafetería", "INGREDIENT", "SUGAR", None, "KILOGRAMS", 3.0, 8.0, 30.0, 1.00, "Azucarera"),
|
||||
]
|
||||
|
||||
ingredient_map = {}
|
||||
for name, product_type, ing_cat, prod_cat, uom, low_stock, reorder, reorder_qty, cost, brand in ingredients_data:
|
||||
ing = Ingredient(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=tenant_uuid,
|
||||
name=name,
|
||||
product_type=product_type,
|
||||
ingredient_category=ing_cat,
|
||||
product_category=prod_cat,
|
||||
unit_of_measure=uom,
|
||||
low_stock_threshold=low_stock,
|
||||
reorder_point=reorder,
|
||||
reorder_quantity=reorder_qty,
|
||||
average_cost=cost,
|
||||
brand=brand,
|
||||
is_active=True,
|
||||
is_perishable=True,
|
||||
shelf_life_days=3,
|
||||
created_at=datetime.now(timezone.utc)
|
||||
)
|
||||
session.add(ing)
|
||||
ingredient_map[name] = ing
|
||||
|
||||
await session.commit()
|
||||
|
||||
# Create stock lots
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
# Croissants pre-horneados
|
||||
croissant_pre = ingredient_map["Croissant Pre-Horneado"]
|
||||
session.add(Stock(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=tenant_uuid,
|
||||
ingredient_id=croissant_pre.id,
|
||||
production_stage="par_baked",
|
||||
current_quantity=75,
|
||||
reserved_quantity=15,
|
||||
available_quantity=60,
|
||||
batch_number=f"CROIS-PAR-{now.strftime('%Y%m%d')}-001",
|
||||
received_date=now - timedelta(days=1),
|
||||
expiration_date=now + timedelta(days=4),
|
||||
unit_cost=0.85,
|
||||
total_cost=63.75,
|
||||
storage_location="Congelador - Sección A",
|
||||
is_available=True,
|
||||
is_expired=False,
|
||||
quality_status="good",
|
||||
created_at=now
|
||||
))
|
||||
|
||||
# Palmeras terminadas
|
||||
palmera = ingredient_map["Palmera de Hojaldre"]
|
||||
session.add(Stock(
|
||||
id=uuid.uuid4(),
|
||||
tenant_id=tenant_uuid,
|
||||
ingredient_id=palmera.id,
|
||||
production_stage="fully_baked",
|
||||
current_quantity=28,
|
||||
reserved_quantity=4,
|
||||
available_quantity=24,
|
||||
batch_number=f"PALM-{now.strftime('%Y%m%d')}-001",
|
||||
received_date=now - timedelta(hours=3),
|
||||
expiration_date=now + timedelta(hours=45),
|
||||
unit_cost=2.20,
|
||||
total_cost=61.6,
|
||||
storage_location="Vitrina Pasteles - Nivel 2",
|
||||
is_available=True,
|
||||
is_expired=False,
|
||||
quality_status="good",
|
||||
created_at=now
|
||||
))
|
||||
|
||||
await session.commit()
|
||||
logger.info("Central baker satellite inventory seeded")
|
||||
|
||||
|
||||
async def seed_demo_inventory():
|
||||
"""Main seeding function"""
|
||||
database_url = os.getenv("INVENTORY_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.error("INVENTORY_DATABASE_URL not set")
|
||||
return False
|
||||
|
||||
engine = create_async_engine(database_url, echo=False)
|
||||
session_factory = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
||||
|
||||
try:
|
||||
async with session_factory() as session:
|
||||
# Seed both demo tenants
|
||||
await seed_inventory_for_tenant(session, DEMO_TENANT_SAN_PABLO, "individual_bakery")
|
||||
await seed_inventory_for_tenant(session, DEMO_TENANT_LA_ESPIGA, "central_baker_satellite")
|
||||
|
||||
logger.info("Demo inventory data seeded successfully")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to seed inventory: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = asyncio.run(seed_demo_inventory())
|
||||
sys.exit(0 if result else 1)
|
||||
@@ -1,144 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Seed Demo Tenants
|
||||
Creates base demo tenant templates with Spanish data
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
project_root = Path(__file__).parent.parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
import os
|
||||
os.environ.setdefault("TENANT_DATABASE_URL", os.getenv("TENANT_DATABASE_URL"))
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
|
||||
from sqlalchemy import select
|
||||
import structlog
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Demo tenant configurations
|
||||
DEMO_TENANTS = [
|
||||
{
|
||||
"id": "a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6",
|
||||
"name": "Panadería San Pablo - Demo",
|
||||
"subdomain": "demo-sanpablo",
|
||||
"business_type": "bakery",
|
||||
"business_model": "individual_bakery",
|
||||
"owner_id": "c1a2b3c4-d5e6-47a8-b9c0-d1e2f3a4b5c6", # María García
|
||||
"address": "Calle Mayor, 15",
|
||||
"city": "Madrid",
|
||||
"postal_code": "28013",
|
||||
"latitude": 40.4168,
|
||||
"longitude": -3.7038,
|
||||
"phone": "+34 912 345 678",
|
||||
"email": "contacto@panaderiasanpablo.com",
|
||||
"subscription_tier": "professional",
|
||||
"is_active": True,
|
||||
"is_demo": True,
|
||||
"is_demo_template": True,
|
||||
"ml_model_trained": True,
|
||||
},
|
||||
{
|
||||
"id": "b2c3d4e5-f6a7-48b9-c0d1-e2f3a4b5c6d7",
|
||||
"name": "Panadería La Espiga - Demo",
|
||||
"subdomain": "demo-laespiga",
|
||||
"business_type": "bakery",
|
||||
"business_model": "central_baker_satellite",
|
||||
"owner_id": "d2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7", # Carlos Martínez
|
||||
"address": "Avenida de la Constitución, 42",
|
||||
"city": "Barcelona",
|
||||
"postal_code": "08001",
|
||||
"latitude": 41.3851,
|
||||
"longitude": 2.1734,
|
||||
"phone": "+34 913 456 789",
|
||||
"email": "contacto@panaderialaespiga.com",
|
||||
"subscription_tier": "enterprise",
|
||||
"is_active": True,
|
||||
"is_demo": True,
|
||||
"is_demo_template": True,
|
||||
"ml_model_trained": True,
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
async def seed_demo_tenants():
|
||||
"""Seed demo tenants into tenant database"""
|
||||
|
||||
database_url = os.getenv("TENANT_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.error("TENANT_DATABASE_URL environment variable not set")
|
||||
return False
|
||||
|
||||
logger.info("Connecting to tenant database", url=database_url.split("@")[-1])
|
||||
|
||||
engine = create_async_engine(database_url, echo=False)
|
||||
session_factory = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
||||
|
||||
try:
|
||||
async with session_factory() as session:
|
||||
try:
|
||||
from app.models.tenants import Tenant
|
||||
except ImportError:
|
||||
from services.tenant.app.models.tenants import Tenant
|
||||
|
||||
for tenant_data in DEMO_TENANTS:
|
||||
# Check if tenant already exists
|
||||
result = await session.execute(
|
||||
select(Tenant).where(Tenant.subdomain == tenant_data["subdomain"])
|
||||
)
|
||||
existing_tenant = result.scalar_one_or_none()
|
||||
|
||||
if existing_tenant:
|
||||
logger.info(f"Demo tenant already exists: {tenant_data['subdomain']}")
|
||||
continue
|
||||
|
||||
# Create new demo tenant
|
||||
tenant = Tenant(
|
||||
id=uuid.UUID(tenant_data["id"]),
|
||||
name=tenant_data["name"],
|
||||
subdomain=tenant_data["subdomain"],
|
||||
business_type=tenant_data["business_type"],
|
||||
business_model=tenant_data["business_model"],
|
||||
owner_id=uuid.UUID(tenant_data["owner_id"]),
|
||||
address=tenant_data["address"],
|
||||
city=tenant_data["city"],
|
||||
postal_code=tenant_data["postal_code"],
|
||||
latitude=tenant_data.get("latitude"),
|
||||
longitude=tenant_data.get("longitude"),
|
||||
phone=tenant_data.get("phone"),
|
||||
email=tenant_data.get("email"),
|
||||
subscription_tier=tenant_data["subscription_tier"],
|
||||
is_active=tenant_data["is_active"],
|
||||
is_demo=tenant_data["is_demo"],
|
||||
is_demo_template=tenant_data["is_demo_template"],
|
||||
ml_model_trained=tenant_data.get("ml_model_trained", False),
|
||||
created_at=datetime.now(timezone.utc),
|
||||
updated_at=datetime.now(timezone.utc)
|
||||
)
|
||||
|
||||
session.add(tenant)
|
||||
logger.info(f"Created demo tenant: {tenant_data['name']}")
|
||||
|
||||
await session.commit()
|
||||
logger.info("Demo tenants seeded successfully")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to seed demo tenants: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = asyncio.run(seed_demo_tenants())
|
||||
sys.exit(0 if result else 1)
|
||||
@@ -1,121 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Seed Demo Users
|
||||
Creates demo user accounts for production demo environment
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
project_root = Path(__file__).parent.parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
import os
|
||||
os.environ.setdefault("AUTH_DATABASE_URL", os.getenv("AUTH_DATABASE_URL"))
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
|
||||
from sqlalchemy import select
|
||||
import structlog
|
||||
import uuid
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# Demo user configurations (public credentials for prospects)
|
||||
DEMO_USERS = [
|
||||
{
|
||||
"id": "c1a2b3c4-d5e6-47a8-b9c0-d1e2f3a4b5c6",
|
||||
"email": "demo.individual@panaderiasanpablo.com",
|
||||
"password_hash": "$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/LewY5GyYVPWzO8hGi", # DemoSanPablo2024!
|
||||
"full_name": "María García López",
|
||||
"phone": "+34 912 345 678",
|
||||
"language": "es",
|
||||
"timezone": "Europe/Madrid",
|
||||
"role": "owner",
|
||||
"is_active": True,
|
||||
"is_verified": True,
|
||||
"is_demo": True
|
||||
},
|
||||
{
|
||||
"id": "d2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7",
|
||||
"email": "demo.central@panaderialaespiga.com",
|
||||
"password_hash": "$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/LewY5GyYVPWzO8hGi", # DemoLaEspiga2024!
|
||||
"full_name": "Carlos Martínez Ruiz",
|
||||
"phone": "+34 913 456 789",
|
||||
"language": "es",
|
||||
"timezone": "Europe/Madrid",
|
||||
"role": "owner",
|
||||
"is_active": True,
|
||||
"is_verified": True,
|
||||
"is_demo": True
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
async def seed_demo_users():
|
||||
"""Seed demo users into auth database"""
|
||||
|
||||
database_url = os.getenv("AUTH_DATABASE_URL")
|
||||
if not database_url:
|
||||
logger.error("AUTH_DATABASE_URL environment variable not set")
|
||||
return False
|
||||
|
||||
logger.info("Connecting to auth database", url=database_url.split("@")[-1])
|
||||
|
||||
engine = create_async_engine(database_url, echo=False)
|
||||
session_factory = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
||||
|
||||
try:
|
||||
async with session_factory() as session:
|
||||
# Import User model
|
||||
try:
|
||||
from app.models.users import User
|
||||
except ImportError:
|
||||
from services.auth.app.models.users import User
|
||||
from datetime import datetime, timezone
|
||||
|
||||
for user_data in DEMO_USERS:
|
||||
# Check if user already exists
|
||||
result = await session.execute(
|
||||
select(User).where(User.email == user_data["email"])
|
||||
)
|
||||
existing_user = result.scalar_one_or_none()
|
||||
|
||||
if existing_user:
|
||||
logger.info(f"Demo user already exists: {user_data['email']}")
|
||||
continue
|
||||
|
||||
# Create new demo user
|
||||
user = User(
|
||||
id=uuid.UUID(user_data["id"]),
|
||||
email=user_data["email"],
|
||||
hashed_password=user_data["password_hash"],
|
||||
full_name=user_data["full_name"],
|
||||
phone=user_data.get("phone"),
|
||||
language=user_data.get("language", "es"),
|
||||
timezone=user_data.get("timezone", "Europe/Madrid"),
|
||||
role=user_data.get("role", "owner"),
|
||||
is_active=user_data.get("is_active", True),
|
||||
is_verified=user_data.get("is_verified", True),
|
||||
created_at=datetime.now(timezone.utc),
|
||||
updated_at=datetime.now(timezone.utc)
|
||||
)
|
||||
|
||||
session.add(user)
|
||||
logger.info(f"Created demo user: {user_data['email']}")
|
||||
|
||||
await session.commit()
|
||||
logger.info("Demo users seeded successfully")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to seed demo users: {str(e)}")
|
||||
return False
|
||||
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = asyncio.run(seed_demo_users())
|
||||
sys.exit(0 if result else 1)
|
||||
72
scripts/docker-compose.yml
Normal file
72
scripts/docker-compose.yml
Normal file
@@ -0,0 +1,72 @@
|
||||
# ================================================================
|
||||
# services/auth/docker-compose.yml (For standalone testing)
|
||||
# ================================================================
|
||||
|
||||
services:
|
||||
auth-db:
|
||||
image: postgres:15-alpine
|
||||
container_name: auth-db
|
||||
environment:
|
||||
POSTGRES_DB: auth_db
|
||||
POSTGRES_USER: auth_user
|
||||
POSTGRES_PASSWORD: auth_pass123
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- auth_db_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U auth_user -d auth_db"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: auth-redis
|
||||
ports:
|
||||
- "6379:6379"
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
rabbitmq:
|
||||
image: rabbitmq:3-management-alpine
|
||||
container_name: auth-rabbitmq
|
||||
environment:
|
||||
RABBITMQ_DEFAULT_USER: bakery
|
||||
RABBITMQ_DEFAULT_PASS: forecast123
|
||||
ports:
|
||||
- "5672:5672"
|
||||
- "15672:15672"
|
||||
healthcheck:
|
||||
test: ["CMD", "rabbitmq-diagnostics", "ping"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
|
||||
auth-service:
|
||||
build: .
|
||||
container_name: auth-service
|
||||
environment:
|
||||
- DATABASE_URL=postgresql+asyncpg://auth_user:auth_pass123@auth-db:5432/auth_db
|
||||
- REDIS_URL=redis://redis:6379/0
|
||||
- RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/
|
||||
- DEBUG=true
|
||||
- LOG_LEVEL=INFO
|
||||
ports:
|
||||
- "8001:8000"
|
||||
depends_on:
|
||||
auth-db:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- .:/app
|
||||
command: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
|
||||
|
||||
volumes:
|
||||
auth_db_data:
|
||||
@@ -1,49 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Manual demo data seeding script
|
||||
Run this to populate the base demo template tenant with inventory data
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add the project root to Python path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
||||
|
||||
async def seed_demo_data():
|
||||
"""Seed demo data by running all seed scripts in order"""
|
||||
from scripts.demo.seed_demo_users import main as seed_users
|
||||
from scripts.demo.seed_demo_tenants import main as seed_tenants
|
||||
from scripts.demo.seed_demo_inventory import main as seed_inventory
|
||||
from scripts.demo.seed_demo_ai_models import main as seed_ai_models
|
||||
|
||||
print("🌱 Starting demo data seeding...")
|
||||
|
||||
try:
|
||||
print("\n📝 Step 1: Seeding demo users...")
|
||||
await seed_users()
|
||||
print("✅ Demo users seeded successfully")
|
||||
|
||||
print("\n🏢 Step 2: Seeding demo tenants...")
|
||||
await seed_tenants()
|
||||
print("✅ Demo tenants seeded successfully")
|
||||
|
||||
print("\n📦 Step 3: Seeding demo inventory...")
|
||||
await seed_inventory()
|
||||
print("✅ Demo inventory seeded successfully")
|
||||
|
||||
print("\n🤖 Step 4: Seeding demo AI models...")
|
||||
await seed_ai_models()
|
||||
print("✅ Demo AI models seeded successfully")
|
||||
|
||||
print("\n🎉 All demo data seeded successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ Error during seeding: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(seed_demo_data())
|
||||
166
scripts/regenerate_all_migrations.sh
Executable file
166
scripts/regenerate_all_migrations.sh
Executable file
@@ -0,0 +1,166 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Convenience script: Clean databases and regenerate all migrations in one command
|
||||
# This wraps the two-step process into a single workflow
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
NAMESPACE="${KUBE_NAMESPACE:-bakery-ia}"
|
||||
SKIP_CONFIRMATION=false
|
||||
APPLY_MIGRATIONS=false
|
||||
VERBOSE=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--namespace) NAMESPACE="$2"; shift 2 ;;
|
||||
--yes) SKIP_CONFIRMATION=true; shift ;;
|
||||
--apply) APPLY_MIGRATIONS=true; shift ;;
|
||||
--verbose) VERBOSE=true; shift ;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "This script performs the complete migration regeneration workflow:"
|
||||
echo " 1. Clean all service databases"
|
||||
echo " 2. Generate new migrations from models"
|
||||
echo " 3. Optionally apply migrations"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --namespace NAME Use specific Kubernetes namespace (default: bakery-ia)"
|
||||
echo " --yes Skip all confirmation prompts"
|
||||
echo " --apply Apply migrations after generation"
|
||||
echo " --verbose Enable detailed logging"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Interactive mode (with confirmations)"
|
||||
echo " $0 --yes --verbose # Automated mode with detailed output"
|
||||
echo " $0 --apply # Generate and apply migrations"
|
||||
exit 0
|
||||
;;
|
||||
*) echo "Unknown option: $1"; echo "Use --help for usage information"; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Complete Migration Regeneration Workflow${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}This script will:${NC}"
|
||||
echo -e "${YELLOW} 1. Clean all service databases (DROP all tables)${NC}"
|
||||
echo -e "${YELLOW} 2. Generate new migrations from models${NC}"
|
||||
if [ "$APPLY_MIGRATIONS" = true ]; then
|
||||
echo -e "${YELLOW} 3. Apply migrations to databases${NC}"
|
||||
fi
|
||||
echo ""
|
||||
echo -e "${YELLOW}Namespace: $NAMESPACE${NC}"
|
||||
echo ""
|
||||
|
||||
if [ "$SKIP_CONFIRMATION" = false ]; then
|
||||
echo -e "${RED}⚠ WARNING: This will DROP ALL TABLES in all service databases!${NC}"
|
||||
echo ""
|
||||
read -p "Continue? (yes/no) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo -e "${YELLOW}Aborted.${NC}"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Step 1: Cleaning Databases${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
|
||||
# Build cleanup command
|
||||
CLEANUP_CMD="./cleanup_databases_k8s.sh --namespace $NAMESPACE"
|
||||
if [ "$SKIP_CONFIRMATION" = true ]; then
|
||||
CLEANUP_CMD="$CLEANUP_CMD --yes"
|
||||
fi
|
||||
|
||||
# Run cleanup
|
||||
if ! $CLEANUP_CMD; then
|
||||
echo -e "${RED}✗ Database cleanup failed!${NC}"
|
||||
echo -e "${YELLOW}Cannot proceed with migration generation.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}✓ Database cleanup completed${NC}"
|
||||
echo ""
|
||||
|
||||
# Wait a moment for database connections to settle
|
||||
sleep 2
|
||||
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Step 2: Generating Migrations${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
|
||||
# Build migration command
|
||||
MIGRATION_CMD="./regenerate_migrations_k8s.sh --namespace $NAMESPACE"
|
||||
if [ "$VERBOSE" = true ]; then
|
||||
MIGRATION_CMD="$MIGRATION_CMD --verbose"
|
||||
fi
|
||||
if [ "$APPLY_MIGRATIONS" = true ]; then
|
||||
MIGRATION_CMD="$MIGRATION_CMD --apply"
|
||||
fi
|
||||
|
||||
# Run migration generation (with automatic 'y' response)
|
||||
if [ "$SKIP_CONFIRMATION" = true ]; then
|
||||
echo "y" | $MIGRATION_CMD
|
||||
else
|
||||
$MIGRATION_CMD
|
||||
fi
|
||||
|
||||
MIGRATION_EXIT_CODE=$?
|
||||
|
||||
echo ""
|
||||
if [ $MIGRATION_EXIT_CODE -eq 0 ]; then
|
||||
echo -e "${GREEN}========================================${NC}"
|
||||
echo -e "${GREEN}✓ Workflow Completed Successfully!${NC}"
|
||||
echo -e "${GREEN}========================================${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Summary:${NC}"
|
||||
echo -e " ${GREEN}✓${NC} Databases cleaned"
|
||||
echo -e " ${GREEN}✓${NC} Migrations generated"
|
||||
if [ "$APPLY_MIGRATIONS" = true ]; then
|
||||
echo -e " ${GREEN}✓${NC} Migrations applied"
|
||||
fi
|
||||
echo ""
|
||||
echo -e "${YELLOW}Generated migration files:${NC}"
|
||||
find services/*/migrations/versions/ -name "*.py" -type f -mmin -5 2>/dev/null | while read file; do
|
||||
size=$(wc -c < "$file" | tr -d ' ')
|
||||
echo -e " ${GREEN}✓${NC} $file ($size bytes)"
|
||||
done
|
||||
echo ""
|
||||
echo -e "${YELLOW}Next steps:${NC}"
|
||||
echo -e " 1. Review the generated migrations above"
|
||||
echo -e " 2. Verify migrations contain schema operations (not just 'pass')"
|
||||
if [ "$APPLY_MIGRATIONS" = false ]; then
|
||||
echo -e " 3. Apply migrations: ./regenerate_migrations_k8s.sh --apply"
|
||||
echo -e " 4. Commit migrations: git add services/*/migrations/versions/*.py"
|
||||
else
|
||||
echo -e " 3. Commit migrations: git add services/*/migrations/versions/*.py"
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}========================================${NC}"
|
||||
echo -e "${RED}✗ Migration Generation Failed${NC}"
|
||||
echo -e "${RED}========================================${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Check the log file for details.${NC}"
|
||||
echo -e "${YELLOW}Common issues:${NC}"
|
||||
echo -e " - Pod not running for some services"
|
||||
echo -e " - Database connectivity issues"
|
||||
echo -e " - Model import errors"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
779
scripts/regenerate_migrations_k8s.sh
Executable file
779
scripts/regenerate_migrations_k8s.sh
Executable file
@@ -0,0 +1,779 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to regenerate Alembic migrations using Kubernetes local dev environment
|
||||
# This script backs up existing migrations and generates new ones based on current models
|
||||
|
||||
set -euo pipefail # Exit on error, undefined variables, and pipe failures
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
NAMESPACE="${KUBE_NAMESPACE:-bakery-ia}"
|
||||
LOG_FILE="migration_script_$(date +%Y%m%d_%H%M%S).log"
|
||||
BACKUP_RETENTION_DAYS=7
|
||||
CONTAINER_SUFFIX="service" # Default container name suffix (e.g., pos-service)
|
||||
|
||||
# Parse command line arguments
|
||||
DRY_RUN=false
|
||||
SKIP_BACKUP=false
|
||||
APPLY_MIGRATIONS=false
|
||||
CHECK_EXISTING=false
|
||||
VERBOSE=false
|
||||
SKIP_DB_CHECK=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--dry-run) DRY_RUN=true; shift ;;
|
||||
--skip-backup) SKIP_BACKUP=true; shift ;;
|
||||
--apply) APPLY_MIGRATIONS=true; shift ;;
|
||||
--check-existing) CHECK_EXISTING=true; shift ;;
|
||||
--verbose) VERBOSE=true; shift ;;
|
||||
--skip-db-check) SKIP_DB_CHECK=true; shift ;;
|
||||
--namespace) NAMESPACE="$2"; shift 2 ;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --dry-run Show what would be done without making changes"
|
||||
echo " --skip-backup Skip backing up existing migrations"
|
||||
echo " --apply Automatically apply migrations after generation"
|
||||
echo " --check-existing Check for and copy existing migrations from pods first"
|
||||
echo " --verbose Enable detailed logging"
|
||||
echo " --skip-db-check Skip database connectivity check"
|
||||
echo " --namespace NAME Use specific Kubernetes namespace (default: bakery-ia)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 --namespace dev --dry-run # Simulate migration regeneration"
|
||||
echo " $0 --apply --verbose # Generate and apply migrations with detailed logs"
|
||||
echo " $0 --skip-db-check # Skip database connectivity check"
|
||||
exit 0
|
||||
;;
|
||||
*) echo "Unknown option: $1"; echo "Use --help for usage information"; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# List of all services
|
||||
SERVICES=(
|
||||
"pos" "sales" "recipes" "training" "auth" "orders" "inventory"
|
||||
"suppliers" "tenant" "notification" "alert-processor" "forecasting"
|
||||
"external" "production" "demo-session"
|
||||
)
|
||||
|
||||
# Backup directory
|
||||
BACKUP_DIR="migrations_backup_$(date +%Y%m%d_%H%M%S)"
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
|
||||
# Initialize log file
|
||||
touch "$LOG_FILE"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Starting migration regeneration" >> "$LOG_FILE"
|
||||
|
||||
# Function to perform pre-flight checks
|
||||
preflight_checks() {
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Pre-flight Checks${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
|
||||
local checks_passed=true
|
||||
|
||||
# Check kubectl
|
||||
echo -e "${YELLOW}Checking kubectl...${NC}"
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo -e "${RED}✗ kubectl not found${NC}"
|
||||
log_message "ERROR" "kubectl not found"
|
||||
checks_passed=false
|
||||
else
|
||||
KUBECTL_VERSION=$(kubectl version --client --short 2>/dev/null | grep -oP 'v\d+\.\d+\.\d+' || echo "unknown")
|
||||
echo -e "${GREEN}✓ kubectl found (version: $KUBECTL_VERSION)${NC}"
|
||||
fi
|
||||
|
||||
# Check cluster connectivity
|
||||
echo -e "${YELLOW}Checking Kubernetes cluster connectivity...${NC}"
|
||||
if ! kubectl cluster-info &> /dev/null; then
|
||||
echo -e "${RED}✗ Cannot connect to Kubernetes cluster${NC}"
|
||||
log_message "ERROR" "Cannot connect to Kubernetes cluster"
|
||||
checks_passed=false
|
||||
else
|
||||
CLUSTER_NAME=$(kubectl config current-context 2>/dev/null || echo "unknown")
|
||||
echo -e "${GREEN}✓ Connected to cluster: $CLUSTER_NAME${NC}"
|
||||
fi
|
||||
|
||||
# Check namespace exists
|
||||
echo -e "${YELLOW}Checking namespace '$NAMESPACE'...${NC}"
|
||||
if ! kubectl get namespace "$NAMESPACE" &> /dev/null; then
|
||||
echo -e "${RED}✗ Namespace '$NAMESPACE' not found${NC}"
|
||||
log_message "ERROR" "Namespace '$NAMESPACE' not found"
|
||||
checks_passed=false
|
||||
else
|
||||
echo -e "${GREEN}✓ Namespace exists${NC}"
|
||||
fi
|
||||
|
||||
# Check if all service pods are running
|
||||
echo -e "${YELLOW}Checking service pods...${NC}"
|
||||
local pods_found=0
|
||||
local pods_running=0
|
||||
for service in "${SERVICES[@]}"; do
|
||||
local pod_name=$(kubectl get pods -n "$NAMESPACE" -l "app.kubernetes.io/name=${service}-service" --field-selector=status.phase=Running -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
||||
if [ -n "$pod_name" ]; then
|
||||
pods_found=$((pods_found + 1))
|
||||
pods_running=$((pods_running + 1))
|
||||
fi
|
||||
done
|
||||
echo -e "${GREEN}✓ Found $pods_running/${#SERVICES[@]} service pods running${NC}"
|
||||
|
||||
if [ $pods_running -lt ${#SERVICES[@]} ]; then
|
||||
echo -e "${YELLOW}⚠ Not all service pods are running${NC}"
|
||||
echo -e "${YELLOW} Missing services will be skipped${NC}"
|
||||
fi
|
||||
|
||||
# Check database connectivity for running services
|
||||
echo -e "${YELLOW}Checking database connectivity (sample)...${NC}"
|
||||
local sample_service="auth"
|
||||
local sample_pod=$(kubectl get pods -n "$NAMESPACE" -l "app.kubernetes.io/name=${sample_service}-service" --field-selector=status.phase=Running -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$sample_pod" ]; then
|
||||
local db_check=$(kubectl exec -n "$NAMESPACE" "$sample_pod" -c "${sample_service}-service" -- sh -c "python3 -c 'import asyncpg; print(\"OK\")' 2>/dev/null" || echo "FAIL")
|
||||
if [ "$db_check" = "OK" ]; then
|
||||
echo -e "${GREEN}✓ Database drivers available (asyncpg)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠ Database driver check failed${NC}"
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠ Cannot check database connectivity (no sample pod running)${NC}"
|
||||
fi
|
||||
|
||||
# Check local directory structure
|
||||
echo -e "${YELLOW}Checking local directory structure...${NC}"
|
||||
local dirs_found=0
|
||||
for service in "${SERVICES[@]}"; do
|
||||
local service_dir=$(echo "$service" | tr '-' '_')
|
||||
if [ -d "services/$service_dir/migrations" ]; then
|
||||
dirs_found=$((dirs_found + 1))
|
||||
fi
|
||||
done
|
||||
echo -e "${GREEN}✓ Found $dirs_found/${#SERVICES[@]} service migration directories${NC}"
|
||||
|
||||
# Check disk space
|
||||
echo -e "${YELLOW}Checking disk space...${NC}"
|
||||
local available_space=$(df -h . | tail -1 | awk '{print $4}')
|
||||
echo -e "${GREEN}✓ Available disk space: $available_space${NC}"
|
||||
|
||||
echo ""
|
||||
if [ "$checks_passed" = false ]; then
|
||||
echo -e "${RED}========================================${NC}"
|
||||
echo -e "${RED}Pre-flight checks failed!${NC}"
|
||||
echo -e "${RED}========================================${NC}"
|
||||
echo ""
|
||||
read -p "Continue anyway? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo -e "${RED}Aborted.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "${GREEN}========================================${NC}"
|
||||
echo -e "${GREEN}All pre-flight checks passed!${NC}"
|
||||
echo -e "${GREEN}========================================${NC}"
|
||||
fi
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Run pre-flight checks
|
||||
preflight_checks
|
||||
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Migration Regeneration Script (K8s)${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
echo -e "${YELLOW}🔍 DRY RUN MODE - No changes will be made${NC}"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}This script will:${NC}"
|
||||
if [ "$CHECK_EXISTING" = true ]; then
|
||||
echo -e "${YELLOW}1. Check for existing migrations in pods and copy them${NC}"
|
||||
fi
|
||||
if [ "$SKIP_BACKUP" = false ]; then
|
||||
echo -e "${YELLOW}2. Backup existing migration files${NC}"
|
||||
fi
|
||||
echo -e "${YELLOW}3. Generate new migrations in Kubernetes pods${NC}"
|
||||
echo -e "${YELLOW}4. Copy generated files back to local machine${NC}"
|
||||
if [ "$APPLY_MIGRATIONS" = true ]; then
|
||||
echo -e "${YELLOW}5. Apply migrations to databases${NC}"
|
||||
fi
|
||||
if [ "$SKIP_BACKUP" = false ]; then
|
||||
echo -e "${YELLOW}6. Keep the backup in: $BACKUP_DIR${NC}"
|
||||
fi
|
||||
echo ""
|
||||
echo -e "${YELLOW}Using Kubernetes namespace: $NAMESPACE${NC}"
|
||||
echo -e "${YELLOW}Logs will be saved to: $LOG_FILE${NC}"
|
||||
echo ""
|
||||
|
||||
if [ "$DRY_RUN" = false ]; then
|
||||
read -p "Continue? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo -e "${RED}Aborted.${NC}"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Aborted by user" >> "$LOG_FILE"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Kubernetes setup already verified in pre-flight checks
|
||||
|
||||
# Function to get a running pod for a service
|
||||
get_running_pod() {
|
||||
local service=$1
|
||||
local pod_name=""
|
||||
local selectors=(
|
||||
"app.kubernetes.io/name=${service}-service,app.kubernetes.io/component=microservice"
|
||||
"app.kubernetes.io/name=${service}-service,app.kubernetes.io/component=worker"
|
||||
"app.kubernetes.io/name=${service}-service"
|
||||
)
|
||||
|
||||
for selector in "${selectors[@]}"; do
|
||||
pod_name=$(kubectl get pods -n "$NAMESPACE" -l "$selector" --field-selector=status.phase=Running -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -n "$pod_name" ]; then
|
||||
echo "$pod_name"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to log messages
|
||||
log_message() {
|
||||
local level=$1
|
||||
local message=$2
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $level: $message" >> "$LOG_FILE"
|
||||
if [ "$VERBOSE" = true ] || [ "$level" = "ERROR" ]; then
|
||||
echo -e "${YELLOW}$message${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check for existing migrations in pods if requested
|
||||
if [ "$CHECK_EXISTING" = true ]; then
|
||||
echo -e "${BLUE}Step 1.5: Checking for existing migrations in pods...${NC}"
|
||||
echo ""
|
||||
|
||||
FOUND_COUNT=0
|
||||
COPIED_COUNT=0
|
||||
|
||||
for service in "${SERVICES[@]}"; do
|
||||
service_dir=$(echo "$service" | tr '-' '_')
|
||||
echo -e "${YELLOW}Checking $service...${NC}"
|
||||
|
||||
# Find a running pod
|
||||
POD_NAME=$(get_running_pod "$service")
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo -e "${YELLOW}⚠ Pod not found, skipping${NC}"
|
||||
log_message "WARNING" "No running pod found for $service"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check container availability
|
||||
CONTAINER="${service}-${CONTAINER_SUFFIX}"
|
||||
if ! kubectl get pod -n "$NAMESPACE" "$POD_NAME" -o jsonpath='{.spec.containers[*].name}' | grep -qw "$CONTAINER"; then
|
||||
echo -e "${RED}✗ Container $CONTAINER not found in pod $POD_NAME, skipping${NC}"
|
||||
log_message "ERROR" "Container $CONTAINER not found in pod $POD_NAME for $service"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check if migration files exist in the pod
|
||||
EXISTING_FILES=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "ls /app/migrations/versions/*.py 2>/dev/null | grep -v __pycache__ | grep -v __init__.py" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$EXISTING_FILES" ]; then
|
||||
FILE_COUNT=$(echo "$EXISTING_FILES" | wc -l | tr -d ' ')
|
||||
echo -e "${GREEN}✓ Found $FILE_COUNT migration file(s) in pod${NC}"
|
||||
FOUND_COUNT=$((FOUND_COUNT + 1))
|
||||
|
||||
# Create local versions directory
|
||||
mkdir -p "services/$service_dir/migrations/versions"
|
||||
|
||||
# Copy each file
|
||||
for pod_file in $EXISTING_FILES; do
|
||||
filename=$(basename "$pod_file")
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
echo -e "${BLUE}[DRY RUN] Would copy: $filename${NC}"
|
||||
log_message "INFO" "[DRY RUN] Would copy $filename for $service"
|
||||
else
|
||||
if kubectl cp -n "$NAMESPACE" "$POD_NAME:$pod_file" "services/$service_dir/migrations/versions/$filename" -c "$CONTAINER" 2>>"$LOG_FILE"; then
|
||||
echo -e "${GREEN}✓ Copied: $filename${NC}"
|
||||
COPIED_COUNT=$((COPIED_COUNT + 1))
|
||||
log_message "INFO" "Copied $filename for $service"
|
||||
# Display brief summary
|
||||
echo -e "${BLUE}Preview:${NC}"
|
||||
grep "def upgrade" "services/$service_dir/migrations/versions/$filename" | head -1
|
||||
grep "op\." "services/$service_dir/migrations/versions/$filename" | head -3 | sed 's/^/ /'
|
||||
else
|
||||
echo -e "${RED}✗ Failed to copy: $filename${NC}"
|
||||
log_message "ERROR" "Failed to copy $filename for $service"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo -e "${YELLOW}⚠ No migration files found in pod${NC}"
|
||||
log_message "WARNING" "No migration files found in pod for $service"
|
||||
fi
|
||||
echo ""
|
||||
done
|
||||
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Existing Migrations Check Summary${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${GREEN}Services with migrations: $FOUND_COUNT${NC}"
|
||||
if [ "$DRY_RUN" = false ]; then
|
||||
echo -e "${GREEN}Files copied: $COPIED_COUNT${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
if [ "$FOUND_COUNT" = 0 ] && [ "$DRY_RUN" = false ]; then
|
||||
read -p "Do you want to continue with regeneration? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo -e "${YELLOW}Stopping. Existing migrations have been copied.${NC}"
|
||||
log_message "INFO" "Stopped after copying existing migrations"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Backup existing migrations
|
||||
if [ "$SKIP_BACKUP" = false ] && [ "$DRY_RUN" = false ]; then
|
||||
echo -e "${BLUE}Step 2: Backing up existing migrations...${NC}"
|
||||
BACKUP_COUNT=0
|
||||
for service in "${SERVICES[@]}"; do
|
||||
service_dir=$(echo "$service" | tr '-' '_')
|
||||
if [ -d "services/$service_dir/migrations/versions" ] && [ -n "$(ls services/$service_dir/migrations/versions/*.py 2>/dev/null)" ]; then
|
||||
echo -e "${YELLOW}Backing up $service migrations...${NC}"
|
||||
mkdir -p "$BACKUP_DIR/$service_dir/versions"
|
||||
cp -r "services/$service_dir/migrations/versions/"*.py "$BACKUP_DIR/$service_dir/versions/" 2>>"$LOG_FILE"
|
||||
BACKUP_COUNT=$((BACKUP_COUNT + 1))
|
||||
log_message "INFO" "Backed up migrations for $service to $BACKUP_DIR/$service_dir/versions"
|
||||
else
|
||||
echo -e "${YELLOW}No migration files to backup for $service${NC}"
|
||||
fi
|
||||
done
|
||||
if [ "$BACKUP_COUNT" -gt 0 ]; then
|
||||
echo -e "${GREEN}✓ Backup complete: $BACKUP_DIR ($BACKUP_COUNT services)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}No migrations backed up (no migration files found)${NC}"
|
||||
fi
|
||||
echo ""
|
||||
elif [ "$SKIP_BACKUP" = true ]; then
|
||||
echo -e "${YELLOW}Skipping backup step (--skip-backup flag)${NC}"
|
||||
log_message "INFO" "Backup skipped due to --skip-backup flag"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Clean up old backups
|
||||
find . -maxdepth 1 -type d -name 'migrations_backup_*' -mtime +"$BACKUP_RETENTION_DAYS" -exec rm -rf {} \; 2>/dev/null || true
|
||||
log_message "INFO" "Cleaned up backups older than $BACKUP_RETENTION_DAYS days"
|
||||
|
||||
echo -e "${BLUE}Step 3: Generating new migrations in Kubernetes...${NC}"
|
||||
echo ""
|
||||
|
||||
SUCCESS_COUNT=0
|
||||
FAILED_COUNT=0
|
||||
FAILED_SERVICES=()
|
||||
|
||||
# Function to process a single service
|
||||
process_service() {
|
||||
local service=$1
|
||||
local service_dir=$(echo "$service" | tr '-' '_')
|
||||
local db_env_var=$(echo "$service" | tr '[:lower:]-' '[:upper:]_')_DATABASE_URL # e.g., pos -> POS_DATABASE_URL, alert-processor -> ALERT_PROCESSOR_DATABASE_URL
|
||||
|
||||
echo -e "${BLUE}----------------------------------------${NC}"
|
||||
echo -e "${BLUE}Processing: $service${NC}"
|
||||
echo -e "${BLUE}----------------------------------------${NC}"
|
||||
log_message "INFO" "Starting migration generation for $service"
|
||||
|
||||
# Skip if no local migrations directory and --check-existing is not set
|
||||
if [ ! -d "services/$service_dir/migrations/versions" ] && [ "$CHECK_EXISTING" = false ]; then
|
||||
echo -e "${YELLOW}⚠ No local migrations/versions directory for $service, skipping...${NC}"
|
||||
log_message "WARNING" "No local migrations/versions directory for $service"
|
||||
return
|
||||
fi
|
||||
|
||||
# Find a running pod
|
||||
echo -e "${YELLOW}Finding $service pod in namespace $NAMESPACE...${NC}"
|
||||
POD_NAME=$(get_running_pod "$service")
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo -e "${RED}✗ No running pod found for $service. Skipping...${NC}"
|
||||
log_message "ERROR" "No running pod found for $service"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (pod not found)")
|
||||
return
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓ Found pod: $POD_NAME${NC}"
|
||||
log_message "INFO" "Found pod $POD_NAME for $service"
|
||||
|
||||
# Check container availability
|
||||
CONTAINER="${service}-${CONTAINER_SUFFIX}"
|
||||
if ! kubectl get pod -n "$NAMESPACE" "$POD_NAME" -o jsonpath='{.spec.containers[*].name}' | grep -qw "$CONTAINER"; then
|
||||
echo -e "${RED}✗ Container $CONTAINER not found in pod $POD_NAME, skipping${NC}"
|
||||
log_message "ERROR" "Container $CONTAINER not found in pod $POD_NAME for $service"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (container not found)")
|
||||
return
|
||||
fi
|
||||
|
||||
# Verify database connectivity
|
||||
if [ "$SKIP_DB_CHECK" = false ]; then
|
||||
echo -e "${YELLOW}Verifying database connectivity using $db_env_var...${NC}"
|
||||
# Check if asyncpg is installed
|
||||
ASYNCPG_CHECK=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "python3 -c \"import asyncpg; print('asyncpg OK')\" 2>/dev/null" || echo "asyncpg MISSING")
|
||||
if [[ "$ASYNCPG_CHECK" != "asyncpg OK" ]]; then
|
||||
echo -e "${YELLOW}Installing asyncpg...${NC}"
|
||||
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "python3 -m pip install --quiet asyncpg" 2>>"$LOG_FILE"
|
||||
fi
|
||||
|
||||
# Check for database URL
|
||||
DB_URL_CHECK=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "env | grep $db_env_var" 2>/dev/null || echo "")
|
||||
if [ -z "$DB_URL_CHECK" ]; then
|
||||
echo -e "${RED}✗ Environment variable $db_env_var not found in pod $POD_NAME${NC}"
|
||||
echo -e "${YELLOW}Available environment variables:${NC}"
|
||||
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "env" 2>>"$LOG_FILE" | grep -i "database" || echo "No database-related variables found"
|
||||
log_message "ERROR" "Environment variable $db_env_var not found for $service in pod $POD_NAME"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (missing $db_env_var)")
|
||||
return
|
||||
fi
|
||||
|
||||
# Log redacted database URL for debugging
|
||||
DB_URL=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "echo \$"$db_env_var"" 2>/dev/null | sed 's/\(password=\)[^@]*/\1[REDACTED]/')
|
||||
log_message "INFO" "Using database URL for $service: $DB_URL"
|
||||
|
||||
# Perform async database connectivity check
|
||||
DB_CHECK_OUTPUT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 -c \"import asyncio; from sqlalchemy.ext.asyncio import create_async_engine; async def check_db(): engine = create_async_engine(os.getenv('$db_env_var')); async with engine.connect() as conn: pass; await engine.dispose(); print('DB OK'); asyncio.run(check_db())\" 2>&1" || echo "DB ERROR")
|
||||
if [[ "$DB_CHECK_OUTPUT" == *"DB OK"* ]]; then
|
||||
echo -e "${GREEN}✓ Database connection verified${NC}"
|
||||
log_message "INFO" "Database connection verified for $service"
|
||||
else
|
||||
echo -e "${RED}✗ Database connection failed for $service${NC}"
|
||||
echo -e "${YELLOW}Error details: $DB_CHECK_OUTPUT${NC}"
|
||||
log_message "ERROR" "Database connection failed for $service: $DB_CHECK_OUTPUT"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (database connection failed)")
|
||||
return
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}Skipping database connectivity check (--skip-db-check)${NC}"
|
||||
log_message "INFO" "Skipped database connectivity check for $service"
|
||||
fi
|
||||
|
||||
# Reset alembic version tracking
|
||||
echo -e "${YELLOW}Resetting alembic version tracking...${NC}"
|
||||
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH alembic downgrade base" 2>&1 | tee -a "$LOG_FILE" | grep -v "^INFO" || true
|
||||
log_message "INFO" "Attempted alembic downgrade for $service"
|
||||
|
||||
# Option 1: Complete database schema reset using CASCADE
|
||||
echo -e "${YELLOW}Performing complete database schema reset...${NC}"
|
||||
SCHEMA_DROP_RESULT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 << 'EOFPYTHON'
|
||||
import asyncio
|
||||
import os
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
from sqlalchemy import text
|
||||
|
||||
async def reset_database():
|
||||
try:
|
||||
engine = create_async_engine(os.getenv('$db_env_var'))
|
||||
async with engine.begin() as conn:
|
||||
# Drop and recreate public schema - cleanest approach
|
||||
await conn.execute(text('DROP SCHEMA IF EXISTS public CASCADE'))
|
||||
await conn.execute(text('CREATE SCHEMA public'))
|
||||
await conn.execute(text('GRANT ALL ON SCHEMA public TO PUBLIC'))
|
||||
await engine.dispose()
|
||||
print('SUCCESS: Database schema reset complete')
|
||||
return 0
|
||||
except Exception as e:
|
||||
print(f'ERROR: {str(e)}')
|
||||
return 1
|
||||
|
||||
exit(asyncio.run(reset_database()))
|
||||
EOFPYTHON
|
||||
" 2>&1)
|
||||
|
||||
echo "$SCHEMA_DROP_RESULT" >> "$LOG_FILE"
|
||||
|
||||
if echo "$SCHEMA_DROP_RESULT" | grep -q "SUCCESS"; then
|
||||
echo -e "${GREEN}✓ Database schema reset successfully${NC}"
|
||||
log_message "INFO" "Database schema reset for $service"
|
||||
else
|
||||
echo -e "${RED}✗ Database schema reset failed${NC}"
|
||||
echo -e "${YELLOW}Error details:${NC}"
|
||||
echo "$SCHEMA_DROP_RESULT"
|
||||
log_message "ERROR" "Database schema reset failed for $service: $SCHEMA_DROP_RESULT"
|
||||
|
||||
# Try alternative approach: Drop individual tables from database (not just models)
|
||||
echo -e "${YELLOW}Attempting alternative: dropping all existing tables individually...${NC}"
|
||||
TABLE_DROP_RESULT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 << 'EOFPYTHON'
|
||||
import asyncio
|
||||
import os
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
from sqlalchemy import text
|
||||
|
||||
async def drop_all_tables():
|
||||
try:
|
||||
engine = create_async_engine(os.getenv('$db_env_var'))
|
||||
async with engine.begin() as conn:
|
||||
# Get all tables from database
|
||||
result = await conn.execute(text(\"\"\"
|
||||
SELECT tablename
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
\"\"\"))
|
||||
tables = [row[0] for row in result]
|
||||
|
||||
# Drop each table
|
||||
for table in tables:
|
||||
await conn.execute(text(f'DROP TABLE IF EXISTS \"{table}\" CASCADE'))
|
||||
|
||||
print(f'SUCCESS: Dropped {len(tables)} tables: {tables}')
|
||||
await engine.dispose()
|
||||
return 0
|
||||
except Exception as e:
|
||||
print(f'ERROR: {str(e)}')
|
||||
return 1
|
||||
|
||||
exit(asyncio.run(drop_all_tables()))
|
||||
EOFPYTHON
|
||||
" 2>&1)
|
||||
|
||||
echo "$TABLE_DROP_RESULT" >> "$LOG_FILE"
|
||||
|
||||
if echo "$TABLE_DROP_RESULT" | grep -q "SUCCESS"; then
|
||||
echo -e "${GREEN}✓ All tables dropped successfully${NC}"
|
||||
log_message "INFO" "All tables dropped for $service"
|
||||
else
|
||||
echo -e "${RED}✗ Failed to drop tables${NC}"
|
||||
echo -e "${YELLOW}Error details:${NC}"
|
||||
echo "$TABLE_DROP_RESULT"
|
||||
log_message "ERROR" "Failed to drop tables for $service: $TABLE_DROP_RESULT"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (database cleanup failed)")
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
# Verify database is empty
|
||||
echo -e "${YELLOW}Verifying database is clean...${NC}"
|
||||
VERIFY_RESULT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 << 'EOFPYTHON'
|
||||
import asyncio
|
||||
import os
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
from sqlalchemy import text
|
||||
|
||||
async def verify_empty():
|
||||
engine = create_async_engine(os.getenv('$db_env_var'))
|
||||
async with engine.connect() as conn:
|
||||
result = await conn.execute(text(\"\"\"
|
||||
SELECT COUNT(*)
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
\"\"\"))
|
||||
count = result.scalar()
|
||||
print(f'Tables remaining: {count}')
|
||||
await engine.dispose()
|
||||
return count
|
||||
|
||||
exit(asyncio.run(verify_empty()))
|
||||
EOFPYTHON
|
||||
" 2>&1)
|
||||
|
||||
echo "$VERIFY_RESULT" >> "$LOG_FILE"
|
||||
echo -e "${BLUE}$VERIFY_RESULT${NC}"
|
||||
|
||||
# Remove old migration files in pod
|
||||
echo -e "${YELLOW}Removing old migration files in pod...${NC}"
|
||||
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "rm -rf /app/migrations/versions/*.py /app/migrations/versions/__pycache__" 2>>"$LOG_FILE" || log_message "WARNING" "Failed to remove old migration files for $service"
|
||||
|
||||
# Ensure dependencies
|
||||
echo -e "${YELLOW}Ensuring python-dateutil and asyncpg are installed...${NC}"
|
||||
kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "python3 -m pip install --quiet python-dateutil asyncpg" 2>>"$LOG_FILE"
|
||||
|
||||
# Generate migration
|
||||
echo -e "${YELLOW}Running alembic autogenerate in pod...${NC}"
|
||||
MIGRATION_TIMESTAMP=$(date +%Y%m%d_%H%M)
|
||||
MIGRATION_OUTPUT=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH python3 -m alembic revision --autogenerate -m \"initial_schema_$MIGRATION_TIMESTAMP\"" 2>&1)
|
||||
MIGRATION_EXIT_CODE=$?
|
||||
|
||||
echo "$MIGRATION_OUTPUT" >> "$LOG_FILE"
|
||||
|
||||
if [ $MIGRATION_EXIT_CODE -eq 0 ]; then
|
||||
echo -e "${GREEN}✓ Migration generated in pod${NC}"
|
||||
log_message "INFO" "Migration generated for $service"
|
||||
|
||||
# Copy migration file
|
||||
MIGRATION_FILE=$(kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "ls -t /app/migrations/versions/*.py 2>/dev/null | head -1" || echo "")
|
||||
if [ -z "$MIGRATION_FILE" ]; then
|
||||
echo -e "${RED}✗ No migration file found in pod${NC}"
|
||||
log_message "ERROR" "No migration file generated for $service"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (no file generated)")
|
||||
return
|
||||
fi
|
||||
|
||||
MIGRATION_FILENAME=$(basename "$MIGRATION_FILE")
|
||||
mkdir -p "services/$service_dir/migrations/versions"
|
||||
|
||||
# Copy file with better error handling
|
||||
echo -e "${YELLOW}Copying migration file from pod...${NC}"
|
||||
CP_OUTPUT=$(kubectl cp -n "$NAMESPACE" "$POD_NAME:$MIGRATION_FILE" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" -c "$CONTAINER" 2>&1)
|
||||
CP_EXIT_CODE=$?
|
||||
|
||||
echo "$CP_OUTPUT" >> "$LOG_FILE"
|
||||
|
||||
# Verify the file was actually copied
|
||||
if [ $CP_EXIT_CODE -eq 0 ] && [ -f "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" ]; then
|
||||
LOCAL_FILE_SIZE=$(wc -c < "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" | tr -d ' ')
|
||||
|
||||
if [ "$LOCAL_FILE_SIZE" -gt 0 ]; then
|
||||
echo -e "${GREEN}✓ Migration file copied: $MIGRATION_FILENAME ($LOCAL_FILE_SIZE bytes)${NC}"
|
||||
log_message "INFO" "Copied $MIGRATION_FILENAME for $service ($LOCAL_FILE_SIZE bytes)"
|
||||
SUCCESS_COUNT=$((SUCCESS_COUNT + 1))
|
||||
|
||||
# Validate migration content
|
||||
echo -e "${YELLOW}Validating migration content...${NC}"
|
||||
if grep -E "op\.(create_table|add_column|create_index|alter_column|drop_table|drop_column|create_foreign_key)" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" >/dev/null; then
|
||||
echo -e "${GREEN}✓ Migration contains schema operations${NC}"
|
||||
log_message "INFO" "Migration contains schema operations for $service"
|
||||
elif grep -q "pass" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" && grep -q "def upgrade()" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME"; then
|
||||
echo -e "${YELLOW}⚠ WARNING: Migration is empty (no schema changes detected)${NC}"
|
||||
echo -e "${YELLOW}⚠ This usually means tables already exist in database matching the models${NC}"
|
||||
log_message "WARNING" "Empty migration generated for $service - possible database cleanup issue"
|
||||
else
|
||||
echo -e "${GREEN}✓ Migration file created${NC}"
|
||||
fi
|
||||
|
||||
# Display summary
|
||||
echo -e "${BLUE}Migration summary:${NC}"
|
||||
grep -E "^def (upgrade|downgrade)" "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" | head -2
|
||||
echo -e "${BLUE}Operations:${NC}"
|
||||
grep "op\." "services/$service_dir/migrations/versions/$MIGRATION_FILENAME" | head -5 || echo " (none found)"
|
||||
else
|
||||
echo -e "${RED}✗ Migration file is empty (0 bytes)${NC}"
|
||||
log_message "ERROR" "Migration file is empty for $service"
|
||||
rm -f "services/$service_dir/migrations/versions/$MIGRATION_FILENAME"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (empty file)")
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}✗ Failed to copy migration file${NC}"
|
||||
echo -e "${YELLOW}kubectl cp exit code: $CP_EXIT_CODE${NC}"
|
||||
echo -e "${YELLOW}kubectl cp output: $CP_OUTPUT${NC}"
|
||||
log_message "ERROR" "Failed to copy migration file for $service: $CP_OUTPUT"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (copy failed)")
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}✗ Failed to generate migration${NC}"
|
||||
log_message "ERROR" "Failed to generate migration for $service"
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
FAILED_SERVICES+=("$service (generation failed)")
|
||||
fi
|
||||
}
|
||||
|
||||
# Process services sequentially
|
||||
for service in "${SERVICES[@]}"; do
|
||||
process_service "$service"
|
||||
done
|
||||
|
||||
# Summary
|
||||
echo ""
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Summary${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${GREEN}✓ Successful: $SUCCESS_COUNT services${NC}"
|
||||
echo -e "${RED}✗ Failed: $FAILED_COUNT services${NC}"
|
||||
|
||||
if [ "$FAILED_COUNT" -gt 0 ]; then
|
||||
echo ""
|
||||
echo -e "${RED}Failed services:${NC}"
|
||||
for failed_service in "${FAILED_SERVICES[@]}"; do
|
||||
echo -e "${RED} - $failed_service${NC}"
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}Backup location: $BACKUP_DIR${NC}"
|
||||
echo -e "${YELLOW}Log file: $LOG_FILE${NC}"
|
||||
echo ""
|
||||
|
||||
# Apply migrations if requested
|
||||
if [ "$APPLY_MIGRATIONS" = true ] && [ "$DRY_RUN" = false ] && [ "$SUCCESS_COUNT" -gt 0 ]; then
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Applying Migrations${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
|
||||
APPLIED_COUNT=0
|
||||
APPLY_FAILED_COUNT=0
|
||||
|
||||
for service in "${SERVICES[@]}"; do
|
||||
service_dir=$(echo "$service" | tr '-' '_')
|
||||
local db_env_var=$(echo "$service" | tr '[:lower:]-' '[:upper:]_')_DATABASE_URL
|
||||
if [ ! -d "services/$service_dir/migrations/versions" ] || [ -z "$(ls services/$service_dir/migrations/versions/*.py 2>/dev/null)" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}Applying migrations for: $service${NC}"
|
||||
POD_NAME=$(get_running_pod "$service")
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo -e "${YELLOW}⚠ Pod not found for $service, skipping...${NC}"
|
||||
log_message "WARNING" "No running pod found for $service during migration application"
|
||||
continue
|
||||
fi
|
||||
|
||||
CONTAINER="${service}-${CONTAINER_SUFFIX}"
|
||||
if ! kubectl get pod -n "$NAMESPACE" "$POD_NAME" -o jsonpath='{.spec.containers[*].name}' | grep -qw "$CONTAINER"; then
|
||||
echo -e "${RED}✗ Container $CONTAINER not found in pod $POD_NAME, skipping${NC}"
|
||||
log_message "ERROR" "Container $CONTAINER not found in pod $POD_NAME for $service"
|
||||
continue
|
||||
fi
|
||||
|
||||
if kubectl exec -n "$NAMESPACE" "$POD_NAME" -c "$CONTAINER" -- sh -c "cd /app && PYTHONPATH=/app:/app/shared:\$PYTHONPATH alembic upgrade head" 2>>"$LOG_FILE"; then
|
||||
echo -e "${GREEN}✓ Migrations applied successfully for $service${NC}"
|
||||
log_message "INFO" "Migrations applied for $service"
|
||||
APPLIED_COUNT=$((APPLIED_COUNT + 1))
|
||||
else
|
||||
echo -e "${RED}✗ Failed to apply migrations for $service${NC}"
|
||||
log_message "ERROR" "Failed to apply migrations for $service"
|
||||
APPLY_FAILED_COUNT=$((APPLY_FAILED_COUNT + 1))
|
||||
fi
|
||||
echo ""
|
||||
done
|
||||
|
||||
echo -e "${BLUE}Migration Application Summary:${NC}"
|
||||
echo -e "${GREEN}✓ Applied: $APPLIED_COUNT services${NC}"
|
||||
echo -e "${RED}✗ Failed: $APPLY_FAILED_COUNT services${NC}"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Clean up temporary files
|
||||
rm -f /tmp/*_migration.log /tmp/*_downgrade.log /tmp/*_apply.log 2>/dev/null || true
|
||||
log_message "INFO" "Cleaned up temporary files"
|
||||
|
||||
echo -e "${BLUE}Next steps:${NC}"
|
||||
echo -e "${YELLOW}1. Review the generated migrations in services/*/migrations/versions/${NC}"
|
||||
echo -e "${YELLOW}2. Compare with the backup in $BACKUP_DIR${NC}"
|
||||
echo -e "${YELLOW}3. Check logs in $LOG_FILE for details${NC}"
|
||||
echo -e "${YELLOW}4. Test migrations by applying them:${NC}"
|
||||
echo -e " ${GREEN}kubectl exec -n $NAMESPACE -it <pod-name> -c <service>-${CONTAINER_SUFFIX} -- alembic upgrade head${NC}"
|
||||
echo -e "${YELLOW}5. Verify tables were created:${NC}"
|
||||
echo -e " ${GREEN}kubectl exec -n $NAMESPACE -it <pod-name> -c <service>-${CONTAINER_SUFFIX} -- python3 -c \"${NC}"
|
||||
echo -e " ${GREEN}import asyncio; from sqlalchemy.ext.asyncio import create_async_engine; from sqlalchemy import inspect; async def check_tables(): engine = create_async_engine(os.getenv('<SERVICE>_DATABASE_URL')); async with engine.connect() as conn: print(inspect(conn).get_table_names()); await engine.dispose(); asyncio.run(check_tables())${NC}"
|
||||
echo -e " ${GREEN}\"${NC}"
|
||||
echo -e "${YELLOW}6. If issues occur, restore from backup:${NC}"
|
||||
echo -e " ${GREEN}cp -r $BACKUP_DIR/*/versions/* services/*/migrations/versions/${NC}"
|
||||
echo ""
|
||||
@@ -1,138 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Enhanced Migration Runner
|
||||
|
||||
Handles automatic table creation and Alembic migrations for Kubernetes deployments.
|
||||
Supports both first-time deployments and incremental migrations.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import asyncio
|
||||
import argparse
|
||||
import structlog
|
||||
from pathlib import Path
|
||||
|
||||
# Add the project root to the Python path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from shared.database.base import DatabaseManager
|
||||
from shared.database.init_manager import initialize_service_database
|
||||
|
||||
# Configure logging
|
||||
structlog.configure(
|
||||
processors=[
|
||||
structlog.stdlib.filter_by_level,
|
||||
structlog.stdlib.add_logger_name,
|
||||
structlog.stdlib.add_log_level,
|
||||
structlog.stdlib.PositionalArgumentsFormatter(),
|
||||
structlog.processors.TimeStamper(fmt="iso"),
|
||||
structlog.processors.StackInfoRenderer(),
|
||||
structlog.processors.format_exc_info,
|
||||
structlog.processors.JSONRenderer()
|
||||
],
|
||||
context_class=dict,
|
||||
logger_factory=structlog.stdlib.LoggerFactory(),
|
||||
wrapper_class=structlog.stdlib.BoundLogger,
|
||||
cache_logger_on_first_use=True,
|
||||
)
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
async def run_service_migration(service_name: str, force_recreate: bool = False) -> bool:
|
||||
"""
|
||||
Run migrations for a specific service.
|
||||
|
||||
This script is for MIGRATION JOBS ONLY.
|
||||
Services themselves never run migrations - they only verify DB is ready.
|
||||
|
||||
Args:
|
||||
service_name: Name of the service (e.g., 'auth', 'inventory')
|
||||
force_recreate: Whether to force recreate tables (development mode)
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
logger.info("Migration job starting", service=service_name, force_recreate=force_recreate)
|
||||
|
||||
try:
|
||||
# Get database URL from environment (try both constructed and direct approaches)
|
||||
db_url_key = f"{service_name.upper().replace('-', '_')}_DATABASE_URL"
|
||||
database_url = os.getenv(db_url_key) or os.getenv("DATABASE_URL")
|
||||
|
||||
# If no direct URL, construct from components
|
||||
if not database_url:
|
||||
host = os.getenv("POSTGRES_HOST")
|
||||
port = os.getenv("POSTGRES_PORT")
|
||||
db_name = os.getenv("POSTGRES_DB")
|
||||
user = os.getenv("POSTGRES_USER")
|
||||
password = os.getenv("POSTGRES_PASSWORD")
|
||||
|
||||
if all([host, port, db_name, user, password]):
|
||||
database_url = f"postgresql+asyncpg://{user}:{password}@{host}:{port}/{db_name}"
|
||||
logger.info("Constructed database URL from components", host=host, port=port, db=db_name)
|
||||
else:
|
||||
logger.error("Database connection details not found",
|
||||
db_url_key=db_url_key,
|
||||
host=bool(host),
|
||||
port=bool(port),
|
||||
db=bool(db_name),
|
||||
user=bool(user),
|
||||
password=bool(password))
|
||||
return False
|
||||
|
||||
# Create database manager
|
||||
db_manager = DatabaseManager(database_url=database_url)
|
||||
|
||||
# Run migrations (verify_only=False means actually run migrations)
|
||||
result = await initialize_service_database(
|
||||
database_manager=db_manager,
|
||||
service_name=service_name,
|
||||
verify_only=False, # Migration jobs RUN migrations
|
||||
force_recreate=force_recreate
|
||||
)
|
||||
|
||||
logger.info("Migration job completed successfully", service=service_name, result=result)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Migration job failed", service=service_name, error=str(e))
|
||||
return False
|
||||
|
||||
finally:
|
||||
# Cleanup database connections
|
||||
try:
|
||||
await db_manager.close_connections()
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main migration runner"""
|
||||
parser = argparse.ArgumentParser(description="Enhanced Migration Runner")
|
||||
parser.add_argument("service", help="Service name (e.g., auth, inventory)")
|
||||
parser.add_argument("--force-recreate", action="store_true",
|
||||
help="Force recreate tables (development mode)")
|
||||
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose logging")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.verbose:
|
||||
logger.info("Starting migration runner", service=args.service,
|
||||
force_recreate=args.force_recreate)
|
||||
|
||||
# Run the migration
|
||||
success = await run_service_migration(args.service, args.force_recreate)
|
||||
|
||||
if success:
|
||||
logger.info("Migration runner completed successfully")
|
||||
sys.exit(0)
|
||||
else:
|
||||
logger.error("Migration runner failed")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
649
scripts/setup-https.sh
Executable file
649
scripts/setup-https.sh
Executable file
@@ -0,0 +1,649 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Bakery IA HTTPS Setup Script
|
||||
# This script sets up HTTPS with cert-manager and Let's Encrypt for local development
|
||||
|
||||
# Remove -e to handle errors more gracefully
|
||||
set -u
|
||||
|
||||
echo "🔒 Setting up HTTPS for Bakery IA with cert-manager and Let's Encrypt"
|
||||
echo "==============================================================="
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Check prerequisites
|
||||
check_prerequisites() {
|
||||
print_status "Checking prerequisites..."
|
||||
|
||||
# Check required tools
|
||||
local missing_tools=()
|
||||
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
missing_tools+=("kubectl")
|
||||
fi
|
||||
|
||||
if ! command -v kind &> /dev/null; then
|
||||
missing_tools+=("kind")
|
||||
fi
|
||||
|
||||
if ! command -v skaffold &> /dev/null; then
|
||||
missing_tools+=("skaffold")
|
||||
fi
|
||||
|
||||
if ! command -v colima &> /dev/null; then
|
||||
missing_tools+=("colima")
|
||||
fi
|
||||
|
||||
# Report missing tools
|
||||
if [ ${#missing_tools[@]} -ne 0 ]; then
|
||||
print_error "Missing required tools: ${missing_tools[*]}"
|
||||
print_error "Please install them with: brew install ${missing_tools[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if Colima is running
|
||||
if ! colima status --profile k8s-local &> /dev/null; then
|
||||
print_warning "Colima is not running. Starting Colima..."
|
||||
colima start --cpu 4 --memory 8 --disk 100 --runtime docker --profile k8s-local
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "Failed to start Colima. Please check your Docker installation."
|
||||
exit 1
|
||||
fi
|
||||
print_success "Colima started successfully"
|
||||
fi
|
||||
|
||||
# Check if cluster is running or exists
|
||||
local cluster_exists=false
|
||||
local cluster_running=false
|
||||
|
||||
# Check if Kind cluster exists
|
||||
if kind get clusters | grep -q "bakery-ia-local"; then
|
||||
cluster_exists=true
|
||||
print_status "Kind cluster 'bakery-ia-local' already exists"
|
||||
|
||||
# Check if kubectl can connect to it
|
||||
if kubectl cluster-info --context kind-bakery-ia-local &> /dev/null; then
|
||||
cluster_running=true
|
||||
print_success "Kubernetes cluster is running and accessible"
|
||||
else
|
||||
print_warning "Kind cluster exists but is not accessible via kubectl"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Handle cluster creation/recreation
|
||||
if [ "$cluster_exists" = true ] && [ "$cluster_running" = false ]; then
|
||||
print_warning "Kind cluster exists but is not running. Recreating..."
|
||||
kind delete cluster --name bakery-ia-local || true
|
||||
cluster_exists=false
|
||||
fi
|
||||
|
||||
if [ "$cluster_exists" = false ]; then
|
||||
print_warning "Creating new Kind cluster..."
|
||||
if [ ! -f "kind-config.yaml" ]; then
|
||||
print_error "kind-config.yaml not found. Please ensure you're running this script from the project root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if kind create cluster --config kind-config.yaml; then
|
||||
print_success "Kind cluster created successfully"
|
||||
else
|
||||
print_error "Failed to create Kind cluster. Please check your Kind installation."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Ensure we're using the correct kubectl context
|
||||
kubectl config use-context kind-bakery-ia-local || {
|
||||
print_error "Failed to set kubectl context to kind-bakery-ia-local"
|
||||
exit 1
|
||||
}
|
||||
|
||||
print_success "Prerequisites check passed"
|
||||
}
|
||||
|
||||
# Install cert-manager
|
||||
install_cert_manager() {
|
||||
print_status "Installing cert-manager..."
|
||||
|
||||
# Check if cert-manager is already installed
|
||||
if kubectl get namespace cert-manager &> /dev/null; then
|
||||
print_warning "cert-manager namespace already exists. Checking if installation is complete..."
|
||||
|
||||
# Check if pods are running
|
||||
if kubectl get pods -n cert-manager | grep -q "Running"; then
|
||||
print_success "cert-manager is already installed and running"
|
||||
return 0
|
||||
else
|
||||
print_status "cert-manager exists but pods are not ready. Waiting..."
|
||||
fi
|
||||
else
|
||||
# Install cert-manager
|
||||
print_status "Installing cert-manager from official release..."
|
||||
if kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.yaml; then
|
||||
print_success "cert-manager installation started"
|
||||
else
|
||||
print_error "Failed to install cert-manager. Please check your internet connection and try again."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Wait for cert-manager namespace to be created
|
||||
print_status "Waiting for cert-manager namespace..."
|
||||
for i in {1..30}; do
|
||||
if kubectl get namespace cert-manager &> /dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Wait for cert-manager pods to be created
|
||||
print_status "Waiting for cert-manager pods to be created..."
|
||||
for i in {1..60}; do
|
||||
if kubectl get pods -n cert-manager &> /dev/null && [ $(kubectl get pods -n cert-manager --no-headers | wc -l) -ge 3 ]; then
|
||||
print_success "cert-manager pods created"
|
||||
break
|
||||
fi
|
||||
print_status "Waiting for cert-manager pods... (attempt $i/60)"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Wait for cert-manager pods to be ready
|
||||
print_status "Waiting for cert-manager pods to be ready..."
|
||||
|
||||
# Use more reliable selectors for cert-manager components
|
||||
local components=(
|
||||
"app.kubernetes.io/name=cert-manager"
|
||||
"app.kubernetes.io/name=cainjector"
|
||||
"app.kubernetes.io/name=webhook"
|
||||
)
|
||||
local component_names=("cert-manager" "cert-manager-cainjector" "cert-manager-webhook")
|
||||
|
||||
for i in "${!components[@]}"; do
|
||||
local selector="${components[$i]}"
|
||||
local name="${component_names[$i]}"
|
||||
|
||||
print_status "Waiting for $name to be ready..."
|
||||
|
||||
# First check if pods exist with this selector
|
||||
local pod_count=0
|
||||
for attempt in {1..30}; do
|
||||
pod_count=$(kubectl get pods -n cert-manager -l "$selector" --no-headers 2>/dev/null | wc -l)
|
||||
if [ "$pod_count" -gt 0 ]; then
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
if [ "$pod_count" -eq 0 ]; then
|
||||
print_warning "No pods found for $name with selector $selector, trying alternative approach..."
|
||||
# Fallback: wait for any pods containing the component name
|
||||
if kubectl wait --for=condition=ready pod -n cert-manager --all --timeout=300s 2>/dev/null; then
|
||||
print_success "All cert-manager pods are ready"
|
||||
break
|
||||
else
|
||||
print_warning "$name pods not found, but continuing..."
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# Wait for the specific component to be ready
|
||||
if kubectl wait --for=condition=ready pod -l "$selector" -n cert-manager --timeout=300s 2>/dev/null; then
|
||||
print_success "$name is ready"
|
||||
else
|
||||
print_warning "$name is taking longer than expected. Checking status..."
|
||||
kubectl get pods -n cert-manager -l "$selector" 2>/dev/null || true
|
||||
|
||||
# Continue anyway, sometimes it works despite timeout
|
||||
print_warning "Continuing with setup. $name may still be starting..."
|
||||
fi
|
||||
done
|
||||
|
||||
# Final verification
|
||||
if kubectl get pods -n cert-manager | grep -q "Running"; then
|
||||
print_success "cert-manager installed successfully"
|
||||
else
|
||||
print_warning "cert-manager installation may not be complete. Current status:"
|
||||
kubectl get pods -n cert-manager
|
||||
print_status "Continuing with setup anyway..."
|
||||
fi
|
||||
}
|
||||
|
||||
# Install NGINX Ingress Controller
|
||||
install_nginx_ingress() {
|
||||
print_status "Installing NGINX Ingress Controller for Kind..."
|
||||
|
||||
# Check if NGINX Ingress is already installed
|
||||
if kubectl get namespace ingress-nginx &> /dev/null; then
|
||||
print_warning "NGINX Ingress Controller namespace already exists. Checking status..."
|
||||
|
||||
# Check if controller is running
|
||||
if kubectl get pods -n ingress-nginx -l app.kubernetes.io/component=controller | grep -q "Running"; then
|
||||
print_success "NGINX Ingress Controller is already running"
|
||||
else
|
||||
print_status "NGINX Ingress Controller exists but not ready. Waiting..."
|
||||
kubectl wait --namespace ingress-nginx \
|
||||
--for=condition=ready pod \
|
||||
--selector=app.kubernetes.io/component=controller \
|
||||
--timeout=300s 2>/dev/null || {
|
||||
print_warning "Ingress controller taking longer than expected, but continuing..."
|
||||
}
|
||||
fi
|
||||
else
|
||||
# Install NGINX Ingress Controller for Kind (updated URL)
|
||||
print_status "Installing NGINX Ingress Controller for Kind..."
|
||||
if kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml; then
|
||||
print_success "NGINX Ingress Controller installation started"
|
||||
|
||||
# Wait for ingress controller to be ready
|
||||
print_status "Waiting for NGINX Ingress Controller to be ready..."
|
||||
kubectl wait --namespace ingress-nginx \
|
||||
--for=condition=ready pod \
|
||||
--selector=app.kubernetes.io/component=controller \
|
||||
--timeout=300s 2>/dev/null || {
|
||||
print_warning "Ingress controller taking longer than expected, but continuing..."
|
||||
}
|
||||
else
|
||||
print_error "Failed to install NGINX Ingress Controller"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Configure ingress for permanent localhost access
|
||||
print_status "Configuring permanent localhost access..."
|
||||
kubectl patch svc ingress-nginx-controller -n ingress-nginx -p '{"spec":{"type":"NodePort","ports":[{"name":"http","port":80,"targetPort":"http","nodePort":30080},{"name":"https","port":443,"targetPort":"https","nodePort":30443}]}}' || true
|
||||
|
||||
print_success "NGINX Ingress Controller configured successfully"
|
||||
}
|
||||
|
||||
# Setup cluster issuers
|
||||
setup_cluster_issuers() {
|
||||
print_status "Setting up cluster issuers..."
|
||||
|
||||
# Check if cert-manager components exist
|
||||
if [ ! -f "infrastructure/kubernetes/base/components/cert-manager/cluster-issuer-staging.yaml" ]; then
|
||||
print_error "cert-manager component files not found. Please ensure you're running this script from the project root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Apply cluster issuers
|
||||
print_status "Applying cluster issuers..."
|
||||
|
||||
local issuer_files=(
|
||||
"infrastructure/kubernetes/base/components/cert-manager/cluster-issuer-staging.yaml"
|
||||
"infrastructure/kubernetes/base/components/cert-manager/local-ca-issuer.yaml"
|
||||
"infrastructure/kubernetes/base/components/cert-manager/cluster-issuer-production.yaml"
|
||||
)
|
||||
|
||||
for issuer_file in "${issuer_files[@]}"; do
|
||||
if [ -f "$issuer_file" ]; then
|
||||
print_status "Applying $issuer_file..."
|
||||
kubectl apply -f "$issuer_file" || {
|
||||
print_warning "Failed to apply $issuer_file, but continuing..."
|
||||
}
|
||||
else
|
||||
print_warning "$issuer_file not found, skipping..."
|
||||
fi
|
||||
done
|
||||
|
||||
# Wait for the issuers to be created
|
||||
print_status "Waiting for cluster issuers to be ready..."
|
||||
sleep 15
|
||||
|
||||
# Check if issuers are ready
|
||||
print_status "Checking cluster issuer status..."
|
||||
kubectl get clusterissuers 2>/dev/null || print_warning "No cluster issuers found yet"
|
||||
|
||||
# Verify that the local CA issuer is ready (if it exists)
|
||||
if kubectl get clusterissuer local-ca-issuer &> /dev/null; then
|
||||
for i in {1..10}; do
|
||||
local issuer_ready=$(kubectl get clusterissuer local-ca-issuer -o jsonpath='{.status.conditions[0].type}' 2>/dev/null || echo "")
|
||||
if [[ "$issuer_ready" == "Ready" ]]; then
|
||||
print_success "Local CA issuer is ready"
|
||||
break
|
||||
fi
|
||||
print_status "Waiting for local CA issuer to be ready... (attempt $i/10)"
|
||||
sleep 10
|
||||
done
|
||||
else
|
||||
print_warning "Local CA issuer not found, skipping readiness check"
|
||||
fi
|
||||
|
||||
print_success "Cluster issuers configured successfully"
|
||||
}
|
||||
|
||||
# Deploy the application with HTTPS using Skaffold
|
||||
deploy_with_https() {
|
||||
print_status "Deploying Bakery IA with HTTPS support using Skaffold..."
|
||||
|
||||
# Check if Skaffold is available
|
||||
if ! command -v skaffold &> /dev/null; then
|
||||
print_error "Skaffold is not installed. Please install skaffold first:"
|
||||
print_error "brew install skaffold"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if skaffold.yaml exists
|
||||
if [ ! -f "skaffold.yaml" ]; then
|
||||
print_error "skaffold.yaml not found. Please ensure you're running this script from the project root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Deploy with Skaffold (builds and deploys automatically with HTTPS support)
|
||||
print_status "Building and deploying with Skaffold (dev profile includes HTTPS)..."
|
||||
if skaffold run --profile=dev; then
|
||||
print_success "Skaffold deployment started"
|
||||
else
|
||||
print_warning "Skaffold deployment had issues, but continuing..."
|
||||
fi
|
||||
|
||||
# Wait for namespace to be created
|
||||
print_status "Waiting for bakery-ia namespace..."
|
||||
for i in {1..30}; do
|
||||
if kubectl get namespace bakery-ia &> /dev/null; then
|
||||
print_success "bakery-ia namespace found"
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Check if namespace was created
|
||||
if ! kubectl get namespace bakery-ia &> /dev/null; then
|
||||
print_warning "bakery-ia namespace not found. Deployment may have failed."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Wait for deployments to be ready
|
||||
print_status "Waiting for deployments to be ready..."
|
||||
if kubectl wait --for=condition=available --timeout=600s deployment --all -n bakery-ia 2>/dev/null; then
|
||||
print_success "All deployments are ready"
|
||||
else
|
||||
print_warning "Some deployments are taking longer than expected, but continuing..."
|
||||
fi
|
||||
|
||||
# Verify ingress exists
|
||||
if kubectl get ingress bakery-ingress -n bakery-ia &> /dev/null; then
|
||||
print_success "HTTPS ingress configured successfully"
|
||||
else
|
||||
print_warning "Ingress not found, but continuing with setup..."
|
||||
fi
|
||||
|
||||
print_success "Application deployed with HTTPS support using Skaffold"
|
||||
}
|
||||
|
||||
# Check certificate status
|
||||
check_certificates() {
|
||||
print_status "Checking certificate status..."
|
||||
|
||||
# Wait for certificate to be issued
|
||||
print_status "Waiting for certificates to be issued..."
|
||||
|
||||
# Check if certificate exists
|
||||
for i in {1..12}; do
|
||||
if kubectl get certificate bakery-ia-tls-cert -n bakery-ia &> /dev/null; then
|
||||
print_success "Certificate found"
|
||||
break
|
||||
fi
|
||||
print_status "Waiting for certificate to be created... (attempt $i/12)"
|
||||
sleep 10
|
||||
done
|
||||
|
||||
# Wait for certificate to be ready
|
||||
for i in {1..20}; do
|
||||
if kubectl get certificate bakery-ia-tls-cert -n bakery-ia -o jsonpath='{.status.conditions[0].type}' 2>/dev/null | grep -q "Ready"; then
|
||||
print_success "Certificate is ready"
|
||||
break
|
||||
fi
|
||||
print_status "Waiting for certificate to be ready... (attempt $i/20)"
|
||||
sleep 15
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "📋 Certificate status:"
|
||||
kubectl get certificates -n bakery-ia 2>/dev/null || print_warning "No certificates found"
|
||||
|
||||
echo ""
|
||||
echo "🔍 Certificate details:"
|
||||
kubectl describe certificate bakery-ia-tls-cert -n bakery-ia 2>/dev/null || print_warning "Certificate not found"
|
||||
|
||||
echo ""
|
||||
echo "🔐 TLS secret status:"
|
||||
kubectl get secret bakery-ia-tls-cert -n bakery-ia 2>/dev/null || print_warning "TLS secret not found"
|
||||
}
|
||||
|
||||
# Update hosts file
|
||||
update_hosts_file() {
|
||||
print_status "Checking hosts file configuration..."
|
||||
|
||||
# Get the external IP for Kind
|
||||
EXTERNAL_IP="127.0.0.1"
|
||||
|
||||
# Check if entries exist in hosts file
|
||||
if ! grep -q "bakery-ia.local" /etc/hosts 2>/dev/null; then
|
||||
print_warning "Adding entries to /etc/hosts file for named host access..."
|
||||
|
||||
# Ask for user permission
|
||||
read -p "Do you want to add entries to /etc/hosts for named host access? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
# Add hosts entries with proper error handling
|
||||
{
|
||||
echo "$EXTERNAL_IP bakery-ia.local"
|
||||
echo "$EXTERNAL_IP api.bakery-ia.local"
|
||||
echo "$EXTERNAL_IP monitoring.bakery-ia.local"
|
||||
} | sudo tee -a /etc/hosts > /dev/null
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Hosts file entries added successfully"
|
||||
else
|
||||
print_error "Failed to update hosts file. You may need to add entries manually."
|
||||
fi
|
||||
else
|
||||
print_warning "Skipping hosts file update. You can still access via https://localhost"
|
||||
fi
|
||||
else
|
||||
print_success "Hosts file entries already exist"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_status "Available access methods:"
|
||||
echo " 🌐 Primary: https://localhost (no hosts file needed)"
|
||||
echo " 🏷️ Named: https://bakery-ia.local (requires hosts file)"
|
||||
echo " 🔗 API: https://localhost/api or https://api.bakery-ia.local"
|
||||
}
|
||||
|
||||
# Export CA certificate for browser trust
|
||||
export_ca_certificate() {
|
||||
print_status "Exporting CA certificate for browser trust..."
|
||||
|
||||
# Wait for CA certificate to be created
|
||||
for i in {1..10}; do
|
||||
if kubectl get secret local-ca-key-pair -n cert-manager &> /dev/null; then
|
||||
print_success "CA certificate secret found"
|
||||
break
|
||||
fi
|
||||
print_status "Waiting for CA certificate secret... (attempt $i/10)"
|
||||
sleep 10
|
||||
done
|
||||
|
||||
# Extract the CA certificate
|
||||
if kubectl get secret local-ca-key-pair -n cert-manager &> /dev/null; then
|
||||
if kubectl get secret local-ca-key-pair -n cert-manager -o jsonpath='{.data.tls\.crt}' | base64 -d > bakery-ia-ca.crt 2>/dev/null; then
|
||||
print_success "CA certificate exported as 'bakery-ia-ca.crt'"
|
||||
|
||||
# Make the certificate file readable
|
||||
chmod 644 bakery-ia-ca.crt
|
||||
else
|
||||
print_warning "Failed to extract CA certificate from secret"
|
||||
fi
|
||||
|
||||
print_warning "To trust this certificate and remove browser warnings:"
|
||||
echo ""
|
||||
echo "📱 macOS:"
|
||||
echo " 1. Double-click 'bakery-ia-ca.crt' to open Keychain Access"
|
||||
echo " 2. Find 'bakery-ia-local-ca' in the certificates list"
|
||||
echo " 3. Double-click it and set to 'Always Trust'"
|
||||
echo ""
|
||||
echo "🐧 Linux:"
|
||||
echo " sudo cp bakery-ia-ca.crt /usr/local/share/ca-certificates/"
|
||||
echo " sudo update-ca-certificates"
|
||||
echo ""
|
||||
echo "🪟 Windows:"
|
||||
echo " 1. Double-click 'bakery-ia-ca.crt'"
|
||||
echo " 2. Click 'Install Certificate'"
|
||||
echo " 3. Choose 'Trusted Root Certification Authorities'"
|
||||
echo ""
|
||||
else
|
||||
print_warning "CA certificate secret not found. HTTPS will work but with browser warnings."
|
||||
print_warning "You can still access the application at https://localhost"
|
||||
fi
|
||||
}
|
||||
|
||||
# Display access information
|
||||
display_access_info() {
|
||||
print_success "🎉 HTTPS setup completed!"
|
||||
echo ""
|
||||
echo "🌐 Access your application at:"
|
||||
echo " Primary: https://localhost"
|
||||
echo " API: https://localhost/api"
|
||||
echo " Named Host: https://bakery-ia.local (if hosts file updated)"
|
||||
echo " API Named: https://api.bakery-ia.local (if hosts file updated)"
|
||||
echo ""
|
||||
echo "🛠️ Useful commands:"
|
||||
echo " 📋 Check status: kubectl get all -n bakery-ia"
|
||||
echo " 🔍 Check ingress: kubectl get ingress -n bakery-ia"
|
||||
echo " 📜 Check certificates: kubectl get certificates -n bakery-ia"
|
||||
echo " 📝 View service logs: kubectl logs -f deployment/<service-name> -n bakery-ia"
|
||||
echo " 🚀 Development mode: skaffold dev --profile=dev"
|
||||
echo " 🧹 Clean up: skaffold delete --profile=dev"
|
||||
echo " 🔄 Restart service: kubectl rollout restart deployment/<service-name> -n bakery-ia"
|
||||
echo ""
|
||||
echo "🔧 Troubleshooting:"
|
||||
echo " 🩺 Get events: kubectl get events -n bakery-ia --sort-by='.firstTimestamp'"
|
||||
echo " 🔍 Describe pod: kubectl describe pod <pod-name> -n bakery-ia"
|
||||
echo " 📊 Resource usage: kubectl top pods -n bakery-ia"
|
||||
echo " 🔐 Certificate details: kubectl describe certificate bakery-ia-tls-cert -n bakery-ia"
|
||||
echo ""
|
||||
if [ -f "bakery-ia-ca.crt" ]; then
|
||||
print_warning "📋 Next steps:"
|
||||
echo " 1. Import 'bakery-ia-ca.crt' into your browser to remove certificate warnings"
|
||||
echo " 2. Access https://localhost to verify the setup"
|
||||
echo " 3. Run 'skaffold dev --profile=dev' for development with hot-reload"
|
||||
else
|
||||
print_warning "⚠️ Note: You may see certificate warnings until the CA certificate is properly configured"
|
||||
fi
|
||||
echo ""
|
||||
print_status "🎯 The application is now ready for secure development!"
|
||||
}
|
||||
|
||||
# Check current cert-manager status for debugging
|
||||
check_current_cert_manager_status() {
|
||||
print_status "Checking current cert-manager status..."
|
||||
|
||||
if kubectl get namespace cert-manager &> /dev/null; then
|
||||
echo ""
|
||||
echo "📋 Current cert-manager pods status:"
|
||||
kubectl get pods -n cert-manager
|
||||
|
||||
echo ""
|
||||
echo "🔍 cert-manager deployments:"
|
||||
kubectl get deployments -n cert-manager
|
||||
|
||||
# Check for any pending or failed pods
|
||||
local failed_pods=$(kubectl get pods -n cert-manager --field-selector=status.phase!=Running --no-headers 2>/dev/null | wc -l)
|
||||
if [ "$failed_pods" -gt 0 ]; then
|
||||
echo ""
|
||||
print_warning "Found $failed_pods non-running pods. Details:"
|
||||
kubectl get pods -n cert-manager --field-selector=status.phase!=Running
|
||||
fi
|
||||
echo ""
|
||||
else
|
||||
print_status "cert-manager namespace not found. Will install fresh."
|
||||
fi
|
||||
}
|
||||
|
||||
# Cleanup function for failed installations
|
||||
cleanup_on_failure() {
|
||||
print_warning "Cleaning up due to failure..."
|
||||
|
||||
# Optional cleanup - ask user
|
||||
read -p "Do you want to clean up the Kind cluster and start fresh? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
print_status "Cleaning up Kind cluster..."
|
||||
kind delete cluster --name bakery-ia-local || true
|
||||
print_success "Cleanup completed. You can run the script again."
|
||||
else
|
||||
print_status "Keeping existing setup. You can continue manually or run the script again."
|
||||
fi
|
||||
}
|
||||
|
||||
# Trap function to handle script interruption
|
||||
trap 'echo ""; print_warning "Script interrupted. Partial setup may be present."; cleanup_on_failure; exit 1' INT TERM
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
echo "Starting HTTPS setup for Bakery IA..."
|
||||
|
||||
# Set error handling for individual steps
|
||||
local step_failed=false
|
||||
|
||||
check_prerequisites || { step_failed=true; }
|
||||
if [ "$step_failed" = false ]; then
|
||||
check_current_cert_manager_status || { step_failed=true; }
|
||||
fi
|
||||
if [ "$step_failed" = false ]; then
|
||||
install_cert_manager || { step_failed=true; }
|
||||
fi
|
||||
if [ "$step_failed" = false ]; then
|
||||
install_nginx_ingress || { step_failed=true; }
|
||||
fi
|
||||
if [ "$step_failed" = false ]; then
|
||||
setup_cluster_issuers || { step_failed=true; }
|
||||
fi
|
||||
if [ "$step_failed" = false ]; then
|
||||
deploy_with_https || { step_failed=true; }
|
||||
fi
|
||||
if [ "$step_failed" = false ]; then
|
||||
check_certificates || { step_failed=true; }
|
||||
fi
|
||||
if [ "$step_failed" = false ]; then
|
||||
update_hosts_file || { step_failed=true; }
|
||||
fi
|
||||
if [ "$step_failed" = false ]; then
|
||||
export_ca_certificate || { step_failed=true; }
|
||||
fi
|
||||
|
||||
if [ "$step_failed" = false ]; then
|
||||
display_access_info
|
||||
print_success "Setup completed successfully! 🚀"
|
||||
else
|
||||
print_error "Setup failed at one or more steps. Check the output above for details."
|
||||
cleanup_on_failure
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user