Files
bakery-ia/scripts/setup.sh
2025-07-20 02:16:51 +02:00

985 lines
24 KiB
Bash
Executable File

# ================================================================
# UPDATED SETUP SCRIPT
# scripts/setup.sh
# ================================================================
#!/bin/bash
# Bakery Forecasting Platform - Microservices Setup Script
# This script sets up the complete development environment
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
print_step() {
echo -e "${BLUE}[STEP]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_header() {
echo ""
echo "================================================================"
echo -e "${BLUE}$1${NC}"
echo "================================================================"
}
# Check prerequisites
check_prerequisites() {
print_header "CHECKING PREREQUISITES"
# Check Docker
if ! command -v docker &> /dev/null; then
print_error "Docker is not installed. Please install Docker first."
exit 1
fi
print_success "Docker is installed"
# Check Docker Compose
if ! command -v docker-compose &> /dev/null; then
print_error "Docker Compose is not installed. Please install Docker Compose first."
exit 1
fi
print_success "Docker Compose is installed"
# Check if Docker is running
if ! docker info &> /dev/null; then
print_error "Docker is not running. Please start Docker first."
exit 1
fi
print_success "Docker is running"
# Check available ports
local ports=(8000 8001 8002 8003 8004 8005 8006 3000 3001 3002 5432 6379 5672 15672 9090)
local used_ports=()
for port in "${ports[@]}"; do
if netstat -tuln 2>/dev/null | grep -q ":$port "; then
used_ports+=($port)
fi
done
if [ ${#used_ports[@]} -gt 0 ]; then
print_warning "The following ports are in use: ${used_ports[*]}"
print_warning "You may need to stop other services or change port configurations"
else
print_success "All required ports are available"
fi
}
# Create directory structure
create_directory_structure() {
print_header "CREATING DIRECTORY STRUCTURE"
# Core directories
local dirs=(
"shared/config"
"shared/auth"
"shared/database"
"shared/messaging"
"shared/monitoring"
"shared/utils"
"gateway/app/core"
"gateway/app/middleware"
"gateway/app/routes"
"gateway/tests"
)
# Service directories
local services=("auth" "training" "forecasting" "data" "tenant" "notification")
for service in "${services[@]}"; do
dirs+=(
"services/$service/app/core"
"services/$service/app/models"
"services/$service/app/schemas"
"services/$service/app/services"
"services/$service/app/api"
"services/$service/migrations/versions"
"services/$service/tests"
)
done
# Additional directories
dirs+=(
"frontend/dashboard/src/components"
"frontend/dashboard/src/pages"
"frontend/dashboard/src/services"
"frontend/dashboard/src/hooks"
"frontend/dashboard/src/utils"
"frontend/marketing/src/components"
"frontend/marketing/src/pages"
"infrastructure/docker"
"infrastructure/kubernetes/base"
"infrastructure/terraform/modules"
"deployment/nginx"
"tests/integration"
"tests/e2e"
"tests/performance"
"docs/architecture"
"docs/api"
"docs/deployment"
"scripts"
"logs"
"models"
"templates/email"
"templates/whatsapp"
)
for dir in "${dirs[@]}"; do
if [ ! -d "$dir" ]; then
mkdir -p "$dir"
print_success "Created directory: $dir"
fi
done
}
# Create shared base configuration
create_shared_config() {
print_step "Creating shared configuration..."
if [ ! -f "shared/config/__init__.py" ]; then
touch "shared/config/__init__.py"
fi
if [ ! -f "shared/config/base.py" ]; then
cat > "shared/config/base.py" << 'EOF'
"""
Base configuration for all microservices
This file should contain the BaseServiceSettings class
"""
# Import the base configuration from the artifact
# The complete base.py content should be copied here from the artifact
EOF
print_success "Created shared base configuration template"
print_warning "Please copy the BaseServiceSettings class from the artifact to shared/config/base.py"
fi
}
# Create service configurations
create_service_configs() {
print_header "CREATING SERVICE CONFIGURATIONS"
local services=("auth" "training" "forecasting" "data" "tenant" "notification")
for service in "${services[@]}"; do
print_step "Creating configuration for $service service..."
local service_dir="services/$service"
local config_file="$service_dir/app/core/config.py"
if [ ! -f "$config_file" ]; then
cat > "$config_file" << EOF
"""
$service service configuration
"""
from shared.config.base import BaseServiceSettings
import os
class ${service^}Settings(BaseServiceSettings):
"""$service service specific settings"""
# Service Identity
APP_NAME: str = "${service^} Service"
SERVICE_NAME: str = "$service-service"
DESCRIPTION: str = "$service microservice for bakery platform"
# Database Configuration
DATABASE_URL: str = os.getenv("${service^^}_DATABASE_URL",
"postgresql+asyncpg://${service}_user:${service}_pass123@${service}-db:5432/${service}_db")
# Redis Database (each service gets its own DB number)
REDIS_DB: int = $(( $(echo "${services[@]}" | tr ' ' '\n' | grep -n "^$service$" | cut -d: -f1) - 1 ))
settings = ${service^}Settings()
EOF
print_success "Created: $config_file"
fi
# Create database configuration
local db_config_file="$service_dir/app/core/database.py"
if [ ! -f "$db_config_file" ]; then
cat > "$db_config_file" << EOF
"""
Database configuration for $service service
"""
from shared.database.base import DatabaseManager
from app.core.config import settings
# Initialize database manager
database_manager = DatabaseManager(settings.DATABASE_URL)
# Alias for convenience
get_db = database_manager.get_db
EOF
print_success "Created: $db_config_file"
fi
# Create auth configuration
local auth_config_file="$service_dir/app/core/auth.py"
if [ ! -f "$auth_config_file" ]; then
cat > "$auth_config_file" << EOF
"""
Authentication configuration for $service service
"""
from shared.auth.jwt_handler import JWTHandler
from shared.auth.decorators import require_auth, require_role
from app.core.config import settings
# Initialize JWT handler
jwt_handler = JWTHandler(
secret_key=settings.JWT_SECRET_KEY,
algorithm=settings.JWT_ALGORITHM,
access_token_expire_minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES
)
# Export commonly used functions
verify_token = jwt_handler.verify_token
create_access_token = jwt_handler.create_access_token
get_current_user = jwt_handler.get_current_user
# Export decorators
__all__ = ['verify_token', 'create_access_token', 'get_current_user', 'require_auth', 'require_role']
EOF
print_success "Created: $auth_config_file"
fi
# Create requirements.txt
local requirements_file="$service_dir/requirements.txt"
if [ ! -f "$requirements_file" ]; then
cat > "$requirements_file" << 'EOF'
# Core FastAPI dependencies
fastapi==0.104.1
uvicorn[standard]==0.24.0
pydantic==2.5.0
pydantic-settings==2.1.0
# Database
sqlalchemy==2.0.23
asyncpg==0.29.0
alembic==1.12.1
# HTTP client
httpx==0.25.2
# Caching and messaging
redis==5.0.1
aio-pika==9.3.0
# Monitoring and logging
prometheus-client==0.17.1
python-json-logger==2.0.4
# Utilities
pytz==2023.3
python-multipart==0.0.6
# Security
python-jose[cryptography]==3.3.0
passlib[bcrypt]==1.7.4
python-dateutil==2.8.2
# ML dependencies (for training and forecasting services)
pandas==2.1.3
numpy==1.25.2
scikit-learn==1.3.2
prophet==1.1.4
# Spanish localization
babel==2.13.1
EOF
print_success "Created: $requirements_file"
fi
# Create Dockerfile
local dockerfile="$service_dir/Dockerfile"
if [ ! -f "$dockerfile" ]; then
cat > "$dockerfile" << 'EOF'
FROM python:3.11-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
gcc \
g++ \
curl \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy shared libraries first
COPY shared/ /app/shared/
# Copy application code
COPY . .
# Add shared libraries to Python path
ENV PYTHONPATH="/app:/app/shared:$PYTHONPATH"
# Create non-root user
RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app
USER appuser
# Expose port
EXPOSE 8000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1
# Run application
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
EOF
print_success "Created: $dockerfile"
fi
done
}
# Create gateway configuration
create_gateway_config() {
print_step "Creating gateway configuration..."
if [ ! -f "gateway/app/core/config.py" ]; then
cat > "gateway/app/core/config.py" << 'EOF'
"""
Gateway service configuration
"""
from shared.config.base import BaseServiceSettings
import os
from typing import Dict, List
class GatewaySettings(BaseServiceSettings):
"""Gateway-specific settings"""
# Service Identity
APP_NAME: str = "Bakery Forecasting Gateway"
SERVICE_NAME: str = "gateway"
DESCRIPTION: str = "API Gateway for Bakery Forecasting Platform"
# Gateway-specific Redis database
REDIS_DB: int = 6
# Gateway doesn't need a database
DATABASE_URL: str = ""
# Service Discovery
CONSUL_URL: str = os.getenv("CONSUL_URL", "http://consul:8500")
ENABLE_SERVICE_DISCOVERY: bool = os.getenv("ENABLE_SERVICE_DISCOVERY", "false").lower() == "true"
settings = GatewaySettings()
EOF
print_success "Created gateway configuration"
fi
}
# Create environment file
create_environment_file() {
print_header "CREATING ENVIRONMENT CONFIGURATION"
if [ ! -f ".env" ]; then
print_step "Creating .env file from template..."
# Copy the environment template from the artifact
cat > ".env" << 'EOF'
# Copy the complete .env content from the artifact here
# This should include all the environment variables defined in the artifact
EOF
print_success "Created .env file"
print_warning "Please update the .env file with your actual configuration values"
print_warning "Especially change JWT_SECRET_KEY, database passwords, and API keys"
else
print_warning ".env file already exists - skipping creation"
fi
}
# Create Docker Compose configuration
create_docker_compose() {
print_header "CREATING DOCKER COMPOSE CONFIGURATION"
if [ ! -f "docker-compose.yml" ]; then
print_step "Creating docker-compose.yml..."
cat > "docker-compose.yml" << 'EOF'
version: '3.8'
services:
# ============================================================
# INFRASTRUCTURE SERVICES
# ============================================================
# PostgreSQL Databases (one per service)
auth-db:
image: postgres:15-alpine
environment:
POSTGRES_DB: auth_db
POSTGRES_USER: auth_user
POSTGRES_PASSWORD: auth_pass123
volumes:
- auth_db_data:/var/lib/postgresql/data
networks:
- bakery-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U auth_user -d auth_db"]
interval: 10s
timeout: 5s
retries: 5
training-db:
image: postgres:15-alpine
environment:
POSTGRES_DB: training_db
POSTGRES_USER: training_user
POSTGRES_PASSWORD: training_pass123
volumes:
- training_db_data:/var/lib/postgresql/data
networks:
- bakery-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U training_user -d training_db"]
interval: 10s
timeout: 5s
retries: 5
forecasting-db:
image: postgres:15-alpine
environment:
POSTGRES_DB: forecasting_db
POSTGRES_USER: forecasting_user
POSTGRES_PASSWORD: forecasting_pass123
volumes:
- forecasting_db_data:/var/lib/postgresql/data
networks:
- bakery-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U forecasting_user -d forecasting_db"]
interval: 10s
timeout: 5s
retries: 5
data-db:
image: postgres:15-alpine
environment:
POSTGRES_DB: data_db
POSTGRES_USER: data_user
POSTGRES_PASSWORD: data_pass123
volumes:
- data_db_data:/var/lib/postgresql/data
networks:
- bakery-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U data_user -d data_db"]
interval: 10s
timeout: 5s
retries: 5
tenant-db:
image: postgres:15-alpine
environment:
POSTGRES_DB: tenant_db
POSTGRES_USER: tenant_user
POSTGRES_PASSWORD: tenant_pass123
volumes:
- tenant_db_data:/var/lib/postgresql/data
networks:
- bakery-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U tenant_user -d tenant_db"]
interval: 10s
timeout: 5s
retries: 5
notification-db:
image: postgres:15-alpine
environment:
POSTGRES_DB: notification_db
POSTGRES_USER: notification_user
POSTGRES_PASSWORD: notification_pass123
volumes:
- notification_db_data:/var/lib/postgresql/data
networks:
- bakery-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U notification_user -d notification_db"]
interval: 10s
timeout: 5s
retries: 5
# Redis Cache
redis:
image: redis:7-alpine
command: redis-server --appendonly yes --requirepass redis_pass123
volumes:
- redis_data:/data
networks:
- bakery-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
# RabbitMQ Message Broker
rabbitmq:
image: rabbitmq:3-management-alpine
environment:
RABBITMQ_DEFAULT_USER: bakery
RABBITMQ_DEFAULT_PASS: forecast123
ports:
- "15672:15672" # Management UI
volumes:
- rabbitmq_data:/var/lib/rabbitmq
networks:
- bakery-network
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "ping"]
interval: 30s
timeout: 10s
retries: 5
# ============================================================
# MICROSERVICES
# ============================================================
# API Gateway
gateway:
build: ./gateway
ports:
- "8000:8000"
env_file: .env
depends_on:
- redis
- rabbitmq
networks:
- bakery-network
volumes:
- ./logs:/app/logs
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
# Auth Service
auth-service:
build: ./services/auth
ports:
- "8001:8000"
env_file: .env
depends_on:
- auth-db
- redis
- rabbitmq
networks:
- bakery-network
volumes:
- ./logs:/app/logs
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
# Training Service
training-service:
build: ./services/training
ports:
- "8002:8000"
env_file: .env
depends_on:
- training-db
- redis
- rabbitmq
networks:
- bakery-network
volumes:
- ./logs:/app/logs
- ./models:/app/models
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
# Forecasting Service
forecasting-service:
build: ./services/forecasting
ports:
- "8003:8000"
env_file: .env
depends_on:
- forecasting-db
- redis
- rabbitmq
networks:
- bakery-network
volumes:
- ./logs:/app/logs
- ./models:/app/models
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
# Data Service
data-service:
build: ./services/data
ports:
- "8004:8000"
env_file: .env
depends_on:
- data-db
- redis
- rabbitmq
networks:
- bakery-network
volumes:
- ./logs:/app/logs
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
# Tenant Service
tenant-service:
build: ./services/tenant
ports:
- "8005:8000"
env_file: .env
depends_on:
- tenant-db
- redis
- rabbitmq
networks:
- bakery-network
volumes:
- ./logs:/app/logs
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
# Notification Service
notification-service:
build: ./services/notification
ports:
- "8006:8000"
env_file: .env
depends_on:
- notification-db
- redis
- rabbitmq
networks:
- bakery-network
volumes:
- ./logs:/app/logs
- ./templates:/app/templates
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
# ============================================================
# MONITORING STACK
# ============================================================
# Prometheus
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./infrastructure/monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
networks:
- bakery-network
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
# Grafana
grafana:
image: grafana/grafana:latest
ports:
- "3002:3000"
environment:
GF_SECURITY_ADMIN_PASSWORD: admin123
volumes:
- grafana_data:/var/lib/grafana
- ./infrastructure/monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./infrastructure/monitoring/grafana/datasources:/etc/grafana/provisioning/datasources
networks:
- bakery-network
networks:
bakery-network:
driver: bridge
volumes:
# Database volumes
auth_db_data:
training_db_data:
forecasting_db_data:
data_db_data:
tenant_db_data:
notification_db_data:
# Cache and messaging volumes
redis_data:
rabbitmq_data:
# Monitoring volumes
prometheus_data:
grafana_data:
EOF
print_success "Created docker-compose.yml"
fi
}
# Create utility scripts
create_utility_scripts() {
print_header "CREATING UTILITY SCRIPTS"
# Test script
if [ ! -f "scripts/test.sh" ]; then
cat > "scripts/test.sh" << 'EOF'
#!/bin/bash
# Run tests for all services
set -e
echo "Running tests for all microservices..."
services=("auth" "training" "forecasting" "data" "tenant" "notification")
for service in "${services[@]}"; do
echo "Testing $service service..."
docker-compose exec ${service}-service python -m pytest tests/ -v
done
echo "Running integration tests..."
docker-compose exec gateway python -m pytest ../tests/integration/ -v
echo "All tests completed!"
EOF
chmod +x "scripts/test.sh"
print_success "Created test script"
fi
# Deployment script
if [ ! -f "scripts/deploy.sh" ]; then
cat > "scripts/deploy.sh" << 'EOF'
#!/bin/bash
# Deploy services to production
set -e
echo "Building and deploying services..."
# Build all services
docker-compose build
# Deploy with zero downtime
docker-compose up -d --no-deps --force-recreate
# Wait for health checks
echo "Waiting for services to be healthy..."
sleep 30
# Verify deployment
./scripts/health-check.sh
echo "Deployment completed successfully!"
EOF
chmod +x "scripts/deploy.sh"
print_success "Created deployment script"
fi
# Health check script
if [ ! -f "scripts/health-check.sh" ]; then
cat > "scripts/health-check.sh" << 'EOF'
#!/bin/bash
# Check health of all services
services=(
"gateway:8000"
"auth-service:8001"
"training-service:8002"
"forecasting-service:8003"
"data-service:8004"
"tenant-service:8005"
"notification-service:8006"
)
echo "Checking service health..."
all_healthy=true
for service_port in "${services[@]}"; do
service=$(echo $service_port | cut -d: -f1)
port=$(echo $service_port | cut -d: -f2)
if curl -f -s "http://localhost:$port/health" > /dev/null; then
echo "✅ $service is healthy"
else
echo "❌ $service is unhealthy"
all_healthy=false
fi
done
if $all_healthy; then
echo "🎉 All services are healthy!"
exit 0
else
echo "⚠️ Some services are unhealthy"
exit 1
fi
EOF
chmod +x "scripts/health-check.sh"
print_success "Created health check script"
fi
}
# Create monitoring configuration
create_monitoring_config() {
print_step "Creating monitoring configuration..."
# Prometheus configuration
if [ ! -f "infrastructure/monitoring/prometheus.yml" ]; then
mkdir -p infrastructure/monitoring
cat > "infrastructure/monitoring/prometheus.yml" << 'EOF'
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'gateway'
static_configs:
- targets: ['gateway:8000']
- job_name: 'auth-service'
static_configs:
- targets: ['auth-service:8000']
- job_name: 'training-service'
static_configs:
- targets: ['training-service:8000']
- job_name: 'forecasting-service'
static_configs:
- targets: ['forecasting-service:8000']
- job_name: 'data-service'
static_configs:
- targets: ['data-service:8000']
- job_name: 'tenant-service'
static_configs:
- targets: ['tenant-service:8000']
- job_name: 'notification-service'
static_configs:
- targets: ['notification-service:8000']
EOF
print_success "Created Prometheus configuration"
fi
}
# Final setup steps
final_setup() {
print_header "FINAL SETUP STEPS"
# Make scripts executable
chmod +x scripts/*.sh
# Create logs directory
mkdir -p logs models
print_success "Setup completed successfully!"
echo ""
echo "================================================================"
echo -e "${GREEN}NEXT STEPS${NC}"
echo "================================================================"
echo "1. Update .env file with your configuration:"
echo " - Change JWT_SECRET_KEY"
echo " - Add AEMET and Madrid Open Data API keys"
echo " - Configure email settings"
echo ""
echo "2. Copy the configuration classes from artifacts:"
echo " - Copy BaseServiceSettings to shared/config/base.py"
echo " - Copy service-specific settings to respective config files"
echo ""
echo "3. Start the services:"
echo " docker-compose up -d"
echo ""
echo "4. Check service health:"
echo " ./scripts/health-check.sh"
echo ""
echo "5. Access the services:"
echo " - Gateway: http://localhost:8000"
echo " - API Docs: http://localhost:8000/docs"
echo " - Grafana: http://localhost:3002 (admin/admin123)"
echo " - RabbitMQ: http://localhost:15672 (bakery/forecast123)"
echo ""
echo "================================================================"
}
# Main execution
main() {
print_header "BAKERY FORECASTING PLATFORM - MICROSERVICES SETUP"
check_prerequisites
create_directory_structure
create_shared_config
create_service_configs
create_gateway_config
create_environment_file
create_docker_compose
create_utility_scripts
create_monitoring_config
final_setup
}
# Run main function
main "$@"
EOF
chmod +x "scripts/setup.sh"
print_success "Created setup script"
fi
}