Add new frontend
This commit is contained in:
@@ -1,214 +0,0 @@
|
||||
# ================================================================
|
||||
# FIXED SETUP SCRIPT
|
||||
# scripts/docker-setup.sh
|
||||
# ================================================================
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# Fixed setup script with proper error handling
|
||||
|
||||
set -e
|
||||
|
||||
ENVIRONMENT=${1:-development}
|
||||
PROFILES=${2:-"development,frontend"}
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Logging functions
|
||||
print_step() {
|
||||
echo -e "${GREEN}[STEP]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
print_step "Setting up Bakery Forecasting Platform"
|
||||
echo "Environment: $ENVIRONMENT"
|
||||
echo "Profiles: $PROFILES"
|
||||
|
||||
# Check if .env file exists
|
||||
if [ ! -f ".env" ]; then
|
||||
print_error ".env file not found!"
|
||||
echo "Please create .env file with the content from the artifact."
|
||||
echo "Run: cp .env.example .env"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate critical environment variables
|
||||
print_step "Validating environment variables..."
|
||||
|
||||
# Source the .env file to check variables
|
||||
set -a # automatically export all variables
|
||||
source .env
|
||||
set +a
|
||||
|
||||
# Check critical variables
|
||||
critical_vars=(
|
||||
"IMAGE_TAG"
|
||||
"AUTH_DB_NAME"
|
||||
"AUTH_DB_USER"
|
||||
"AUTH_DB_PASSWORD"
|
||||
"REDIS_PASSWORD"
|
||||
"RABBITMQ_USER"
|
||||
"RABBITMQ_PASSWORD"
|
||||
"GATEWAY_PORT"
|
||||
"AUTH_SERVICE_PORT"
|
||||
)
|
||||
|
||||
missing_vars=()
|
||||
|
||||
for var in "${critical_vars[@]}"; do
|
||||
if [ -z "${!var}" ]; then
|
||||
missing_vars+=("$var")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#missing_vars[@]} -gt 0 ]; then
|
||||
print_error "Missing required environment variables:"
|
||||
printf '%s\n' "${missing_vars[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_step "Environment variables validated successfully"
|
||||
|
||||
# Create necessary directories
|
||||
print_step "Creating necessary directories..."
|
||||
mkdir -p infrastructure/{redis,rabbitmq,postgres/init-scripts,monitoring/{prometheus/rules,grafana/{dashboards,datasources}},pgadmin}
|
||||
mkdir -p backups logs models templates/{email,whatsapp}
|
||||
mkdir -p shared/{config,auth,database,messaging,monitoring,utils}
|
||||
|
||||
# Create basic monitoring configs if they don't exist
|
||||
if [ ! -f "infrastructure/monitoring/prometheus/prometheus.yml" ]; then
|
||||
print_step "Creating basic Prometheus configuration..."
|
||||
cat > infrastructure/monitoring/prometheus/prometheus.yml << 'EOF'
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'gateway'
|
||||
static_configs:
|
||||
- targets: ['gateway:8000']
|
||||
|
||||
- job_name: 'auth-service'
|
||||
static_configs:
|
||||
- targets: ['auth-service:8000']
|
||||
|
||||
- job_name: 'training-service'
|
||||
static_configs:
|
||||
- targets: ['training-service:8000']
|
||||
|
||||
- job_name: 'forecasting-service'
|
||||
static_configs:
|
||||
- targets: ['forecasting-service:8000']
|
||||
|
||||
- job_name: 'data-service'
|
||||
static_configs:
|
||||
- targets: ['data-service:8000']
|
||||
|
||||
- job_name: 'tenant-service'
|
||||
static_configs:
|
||||
- targets: ['tenant-service:8000']
|
||||
|
||||
- job_name: 'notification-service'
|
||||
static_configs:
|
||||
- targets: ['notification-service:8000']
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Set proper permissions
|
||||
chmod 644 infrastructure/monitoring/prometheus/prometheus.yml 2>/dev/null || true
|
||||
|
||||
# Stop any existing containers
|
||||
print_step "Stopping existing containers..."
|
||||
docker-compose down --remove-orphans 2>/dev/null || true
|
||||
|
||||
# Build and start services based on environment
|
||||
case $ENVIRONMENT in
|
||||
"development")
|
||||
print_step "Starting development environment..."
|
||||
IFS=',' read -ra PROFILE_ARRAY <<< "$PROFILES"
|
||||
PROFILE_ARGS=""
|
||||
for profile in "${PROFILE_ARRAY[@]}"; do
|
||||
PROFILE_ARGS="$PROFILE_ARGS --profile $profile"
|
||||
done
|
||||
|
||||
# Build first to catch any build errors
|
||||
print_step "Building services..."
|
||||
docker-compose $PROFILE_ARGS build
|
||||
|
||||
# Then start
|
||||
print_step "Starting services..."
|
||||
docker-compose $PROFILE_ARGS up -d
|
||||
;;
|
||||
"production")
|
||||
print_step "Starting production environment..."
|
||||
docker-compose -f docker-compose.yml -f docker-compose.prod.yml --profile production --profile monitoring up -d --build
|
||||
;;
|
||||
"testing")
|
||||
print_step "Starting testing environment..."
|
||||
docker-compose -f docker-compose.yml -f docker-compose.test.yml up -d --build
|
||||
;;
|
||||
*)
|
||||
print_step "Starting with custom profiles: $PROFILES"
|
||||
IFS=',' read -ra PROFILE_ARRAY <<< "$PROFILES"
|
||||
PROFILE_ARGS=""
|
||||
for profile in "${PROFILE_ARRAY[@]}"; do
|
||||
PROFILE_ARGS="$PROFILE_ARGS --profile $profile"
|
||||
done
|
||||
docker-compose $PROFILE_ARGS build
|
||||
docker-compose $PROFILE_ARGS up -d
|
||||
;;
|
||||
esac
|
||||
|
||||
# Wait a moment for services to start
|
||||
print_step "Waiting for services to start..."
|
||||
sleep 10
|
||||
|
||||
# Check service status
|
||||
print_step "Checking service status..."
|
||||
if command -v curl &> /dev/null; then
|
||||
# Check if gateway is responding
|
||||
if curl -f -s "http://localhost:${GATEWAY_PORT}/health" > /dev/null 2>&1; then
|
||||
echo "✅ Gateway is responding"
|
||||
else
|
||||
echo "⚠️ Gateway is not yet responding (this is normal during first startup)"
|
||||
fi
|
||||
else
|
||||
echo "⚠️ curl not found - skipping health check"
|
||||
fi
|
||||
|
||||
print_step "Setup completed!"
|
||||
echo ""
|
||||
echo "================================================================"
|
||||
echo -e "${GREEN}SERVICES AVAILABLE${NC}"
|
||||
echo "================================================================"
|
||||
echo "- Gateway: http://localhost:${GATEWAY_PORT}"
|
||||
echo "- API Docs: http://localhost:${GATEWAY_PORT}/docs"
|
||||
echo "- Dashboard: http://localhost:${DASHBOARD_PORT} (if frontend profile enabled)"
|
||||
echo "- Grafana: http://localhost:${GRAFANA_PORT} (${GRAFANA_ADMIN_USER}/${GRAFANA_ADMIN_PASSWORD})"
|
||||
echo "- pgAdmin: http://localhost:${PGADMIN_PORT} (${PGADMIN_EMAIL}/${PGADMIN_PASSWORD})"
|
||||
echo "- RabbitMQ: http://localhost:${RABBITMQ_MANAGEMENT_PORT} (${RABBITMQ_USER}/${RABBITMQ_PASSWORD})"
|
||||
echo "- Redis Commander: http://localhost:${REDIS_COMMANDER_PORT} (${REDIS_COMMANDER_USER}/${REDIS_COMMANDER_PASSWORD})"
|
||||
echo ""
|
||||
echo "================================================================"
|
||||
echo -e "${GREEN}NEXT STEPS${NC}"
|
||||
echo "================================================================"
|
||||
echo "1. Check service health:"
|
||||
echo " ./scripts/docker-health-check.sh"
|
||||
echo ""
|
||||
echo "2. View logs:"
|
||||
echo " docker-compose logs -f"
|
||||
echo ""
|
||||
echo "3. Check specific service:"
|
||||
echo " docker-compose logs -f auth-service"
|
||||
echo ""
|
||||
echo "If you see any errors, check the logs for more details."
|
||||
984
scripts/setup.sh
984
scripts/setup.sh
@@ -1,984 +0,0 @@
|
||||
# ================================================================
|
||||
# UPDATED SETUP SCRIPT
|
||||
# scripts/setup.sh
|
||||
# ================================================================
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# Bakery Forecasting Platform - Microservices Setup Script
|
||||
# This script sets up the complete development environment
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging functions
|
||||
print_step() {
|
||||
echo -e "${BLUE}[STEP]${NC} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
print_header() {
|
||||
echo ""
|
||||
echo "================================================================"
|
||||
echo -e "${BLUE}$1${NC}"
|
||||
echo "================================================================"
|
||||
}
|
||||
|
||||
# Check prerequisites
|
||||
check_prerequisites() {
|
||||
print_header "CHECKING PREREQUISITES"
|
||||
|
||||
# Check Docker
|
||||
if ! command -v docker &> /dev/null; then
|
||||
print_error "Docker is not installed. Please install Docker first."
|
||||
exit 1
|
||||
fi
|
||||
print_success "Docker is installed"
|
||||
|
||||
# Check Docker Compose
|
||||
if ! command -v docker-compose &> /dev/null; then
|
||||
print_error "Docker Compose is not installed. Please install Docker Compose first."
|
||||
exit 1
|
||||
fi
|
||||
print_success "Docker Compose is installed"
|
||||
|
||||
# Check if Docker is running
|
||||
if ! docker info &> /dev/null; then
|
||||
print_error "Docker is not running. Please start Docker first."
|
||||
exit 1
|
||||
fi
|
||||
print_success "Docker is running"
|
||||
|
||||
# Check available ports
|
||||
local ports=(8000 8001 8002 8003 8004 8005 8006 3000 3001 3002 5432 6379 5672 15672 9090)
|
||||
local used_ports=()
|
||||
|
||||
for port in "${ports[@]}"; do
|
||||
if netstat -tuln 2>/dev/null | grep -q ":$port "; then
|
||||
used_ports+=($port)
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#used_ports[@]} -gt 0 ]; then
|
||||
print_warning "The following ports are in use: ${used_ports[*]}"
|
||||
print_warning "You may need to stop other services or change port configurations"
|
||||
else
|
||||
print_success "All required ports are available"
|
||||
fi
|
||||
}
|
||||
|
||||
# Create directory structure
|
||||
create_directory_structure() {
|
||||
print_header "CREATING DIRECTORY STRUCTURE"
|
||||
|
||||
# Core directories
|
||||
local dirs=(
|
||||
"shared/config"
|
||||
"shared/auth"
|
||||
"shared/database"
|
||||
"shared/messaging"
|
||||
"shared/monitoring"
|
||||
"shared/utils"
|
||||
"gateway/app/core"
|
||||
"gateway/app/middleware"
|
||||
"gateway/app/routes"
|
||||
"gateway/tests"
|
||||
)
|
||||
|
||||
# Service directories
|
||||
local services=("auth" "training" "forecasting" "data" "tenant" "notification")
|
||||
for service in "${services[@]}"; do
|
||||
dirs+=(
|
||||
"services/$service/app/core"
|
||||
"services/$service/app/models"
|
||||
"services/$service/app/schemas"
|
||||
"services/$service/app/services"
|
||||
"services/$service/app/api"
|
||||
"services/$service/migrations/versions"
|
||||
"services/$service/tests"
|
||||
)
|
||||
done
|
||||
|
||||
# Additional directories
|
||||
dirs+=(
|
||||
"frontend/dashboard/src/components"
|
||||
"frontend/dashboard/src/pages"
|
||||
"frontend/dashboard/src/services"
|
||||
"frontend/dashboard/src/hooks"
|
||||
"frontend/dashboard/src/utils"
|
||||
"frontend/marketing/src/components"
|
||||
"frontend/marketing/src/pages"
|
||||
"infrastructure/docker"
|
||||
"infrastructure/kubernetes/base"
|
||||
"infrastructure/terraform/modules"
|
||||
"deployment/nginx"
|
||||
"tests/integration"
|
||||
"tests/e2e"
|
||||
"tests/performance"
|
||||
"docs/architecture"
|
||||
"docs/api"
|
||||
"docs/deployment"
|
||||
"scripts"
|
||||
"logs"
|
||||
"models"
|
||||
"templates/email"
|
||||
"templates/whatsapp"
|
||||
)
|
||||
|
||||
for dir in "${dirs[@]}"; do
|
||||
if [ ! -d "$dir" ]; then
|
||||
mkdir -p "$dir"
|
||||
print_success "Created directory: $dir"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Create shared base configuration
|
||||
create_shared_config() {
|
||||
print_step "Creating shared configuration..."
|
||||
|
||||
if [ ! -f "shared/config/__init__.py" ]; then
|
||||
touch "shared/config/__init__.py"
|
||||
fi
|
||||
|
||||
if [ ! -f "shared/config/base.py" ]; then
|
||||
cat > "shared/config/base.py" << 'EOF'
|
||||
"""
|
||||
Base configuration for all microservices
|
||||
This file should contain the BaseServiceSettings class
|
||||
"""
|
||||
|
||||
# Import the base configuration from the artifact
|
||||
# The complete base.py content should be copied here from the artifact
|
||||
EOF
|
||||
print_success "Created shared base configuration template"
|
||||
print_warning "Please copy the BaseServiceSettings class from the artifact to shared/config/base.py"
|
||||
fi
|
||||
}
|
||||
|
||||
# Create service configurations
|
||||
create_service_configs() {
|
||||
print_header "CREATING SERVICE CONFIGURATIONS"
|
||||
|
||||
local services=("auth" "training" "forecasting" "data" "tenant" "notification")
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
print_step "Creating configuration for $service service..."
|
||||
|
||||
local service_dir="services/$service"
|
||||
local config_file="$service_dir/app/core/config.py"
|
||||
|
||||
if [ ! -f "$config_file" ]; then
|
||||
cat > "$config_file" << EOF
|
||||
"""
|
||||
$service service configuration
|
||||
"""
|
||||
|
||||
from shared.config.base import BaseServiceSettings
|
||||
import os
|
||||
|
||||
class ${service^}Settings(BaseServiceSettings):
|
||||
"""$service service specific settings"""
|
||||
|
||||
# Service Identity
|
||||
APP_NAME: str = "${service^} Service"
|
||||
SERVICE_NAME: str = "$service-service"
|
||||
DESCRIPTION: str = "$service microservice for bakery platform"
|
||||
|
||||
# Database Configuration
|
||||
DATABASE_URL: str = os.getenv("${service^^}_DATABASE_URL",
|
||||
"postgresql+asyncpg://${service}_user:${service}_pass123@${service}-db:5432/${service}_db")
|
||||
|
||||
# Redis Database (each service gets its own DB number)
|
||||
REDIS_DB: int = $(( $(echo "${services[@]}" | tr ' ' '\n' | grep -n "^$service$" | cut -d: -f1) - 1 ))
|
||||
|
||||
settings = ${service^}Settings()
|
||||
EOF
|
||||
print_success "Created: $config_file"
|
||||
fi
|
||||
|
||||
# Create database configuration
|
||||
local db_config_file="$service_dir/app/core/database.py"
|
||||
if [ ! -f "$db_config_file" ]; then
|
||||
cat > "$db_config_file" << EOF
|
||||
"""
|
||||
Database configuration for $service service
|
||||
"""
|
||||
|
||||
from shared.database.base import DatabaseManager
|
||||
from app.core.config import settings
|
||||
|
||||
# Initialize database manager
|
||||
database_manager = DatabaseManager(settings.DATABASE_URL)
|
||||
|
||||
# Alias for convenience
|
||||
get_db = database_manager.get_db
|
||||
EOF
|
||||
print_success "Created: $db_config_file"
|
||||
fi
|
||||
|
||||
# Create auth configuration
|
||||
local auth_config_file="$service_dir/app/core/auth.py"
|
||||
if [ ! -f "$auth_config_file" ]; then
|
||||
cat > "$auth_config_file" << EOF
|
||||
"""
|
||||
Authentication configuration for $service service
|
||||
"""
|
||||
|
||||
from shared.auth.jwt_handler import JWTHandler
|
||||
from shared.auth.decorators import require_auth, require_role
|
||||
from app.core.config import settings
|
||||
|
||||
# Initialize JWT handler
|
||||
jwt_handler = JWTHandler(
|
||||
secret_key=settings.JWT_SECRET_KEY,
|
||||
algorithm=settings.JWT_ALGORITHM,
|
||||
access_token_expire_minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES
|
||||
)
|
||||
|
||||
# Export commonly used functions
|
||||
verify_token = jwt_handler.verify_token
|
||||
create_access_token = jwt_handler.create_access_token
|
||||
get_current_user = jwt_handler.get_current_user
|
||||
|
||||
# Export decorators
|
||||
__all__ = ['verify_token', 'create_access_token', 'get_current_user', 'require_auth', 'require_role']
|
||||
EOF
|
||||
print_success "Created: $auth_config_file"
|
||||
fi
|
||||
|
||||
# Create requirements.txt
|
||||
local requirements_file="$service_dir/requirements.txt"
|
||||
if [ ! -f "$requirements_file" ]; then
|
||||
cat > "$requirements_file" << 'EOF'
|
||||
# Core FastAPI dependencies
|
||||
fastapi==0.104.1
|
||||
uvicorn[standard]==0.24.0
|
||||
pydantic==2.5.0
|
||||
pydantic-settings==2.1.0
|
||||
|
||||
# Database
|
||||
sqlalchemy==2.0.23
|
||||
asyncpg==0.29.0
|
||||
alembic==1.12.1
|
||||
|
||||
# HTTP client
|
||||
httpx==0.25.2
|
||||
|
||||
# Caching and messaging
|
||||
redis==5.0.1
|
||||
aio-pika==9.3.0
|
||||
|
||||
# Monitoring and logging
|
||||
prometheus-client==0.17.1
|
||||
python-json-logger==2.0.4
|
||||
|
||||
# Utilities
|
||||
pytz==2023.3
|
||||
python-multipart==0.0.6
|
||||
|
||||
# Security
|
||||
python-jose[cryptography]==3.3.0
|
||||
passlib[bcrypt]==1.7.4
|
||||
python-dateutil==2.8.2
|
||||
|
||||
# ML dependencies (for training and forecasting services)
|
||||
pandas==2.1.3
|
||||
numpy==1.25.2
|
||||
scikit-learn==1.3.2
|
||||
prophet==1.1.4
|
||||
|
||||
# Spanish localization
|
||||
babel==2.13.1
|
||||
EOF
|
||||
print_success "Created: $requirements_file"
|
||||
fi
|
||||
|
||||
# Create Dockerfile
|
||||
local dockerfile="$service_dir/Dockerfile"
|
||||
if [ ! -f "$dockerfile" ]; then
|
||||
cat > "$dockerfile" << 'EOF'
|
||||
FROM python:3.11-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
gcc \
|
||||
g++ \
|
||||
curl \
|
||||
libpq-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy requirements
|
||||
COPY requirements.txt .
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy shared libraries first
|
||||
COPY shared/ /app/shared/
|
||||
|
||||
# Copy application code
|
||||
COPY . .
|
||||
|
||||
# Add shared libraries to Python path
|
||||
ENV PYTHONPATH="/app:/app/shared:$PYTHONPATH"
|
||||
|
||||
# Create non-root user
|
||||
RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app
|
||||
USER appuser
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8000/health || exit 1
|
||||
|
||||
# Run application
|
||||
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
EOF
|
||||
print_success "Created: $dockerfile"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Create gateway configuration
|
||||
create_gateway_config() {
|
||||
print_step "Creating gateway configuration..."
|
||||
|
||||
if [ ! -f "gateway/app/core/config.py" ]; then
|
||||
cat > "gateway/app/core/config.py" << 'EOF'
|
||||
"""
|
||||
Gateway service configuration
|
||||
"""
|
||||
|
||||
from shared.config.base import BaseServiceSettings
|
||||
import os
|
||||
from typing import Dict, List
|
||||
|
||||
class GatewaySettings(BaseServiceSettings):
|
||||
"""Gateway-specific settings"""
|
||||
|
||||
# Service Identity
|
||||
APP_NAME: str = "Bakery Forecasting Gateway"
|
||||
SERVICE_NAME: str = "gateway"
|
||||
DESCRIPTION: str = "API Gateway for Bakery Forecasting Platform"
|
||||
|
||||
# Gateway-specific Redis database
|
||||
REDIS_DB: int = 6
|
||||
|
||||
# Gateway doesn't need a database
|
||||
DATABASE_URL: str = ""
|
||||
|
||||
# Service Discovery
|
||||
CONSUL_URL: str = os.getenv("CONSUL_URL", "http://consul:8500")
|
||||
ENABLE_SERVICE_DISCOVERY: bool = os.getenv("ENABLE_SERVICE_DISCOVERY", "false").lower() == "true"
|
||||
|
||||
settings = GatewaySettings()
|
||||
EOF
|
||||
print_success "Created gateway configuration"
|
||||
fi
|
||||
}
|
||||
|
||||
# Create environment file
|
||||
create_environment_file() {
|
||||
print_header "CREATING ENVIRONMENT CONFIGURATION"
|
||||
|
||||
if [ ! -f ".env" ]; then
|
||||
print_step "Creating .env file from template..."
|
||||
|
||||
# Copy the environment template from the artifact
|
||||
cat > ".env" << 'EOF'
|
||||
# Copy the complete .env content from the artifact here
|
||||
# This should include all the environment variables defined in the artifact
|
||||
EOF
|
||||
print_success "Created .env file"
|
||||
print_warning "Please update the .env file with your actual configuration values"
|
||||
print_warning "Especially change JWT_SECRET_KEY, database passwords, and API keys"
|
||||
else
|
||||
print_warning ".env file already exists - skipping creation"
|
||||
fi
|
||||
}
|
||||
|
||||
# Create Docker Compose configuration
|
||||
create_docker_compose() {
|
||||
print_header "CREATING DOCKER COMPOSE CONFIGURATION"
|
||||
|
||||
if [ ! -f "docker-compose.yml" ]; then
|
||||
print_step "Creating docker-compose.yml..."
|
||||
|
||||
cat > "docker-compose.yml" << 'EOF'
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# ============================================================
|
||||
# INFRASTRUCTURE SERVICES
|
||||
# ============================================================
|
||||
|
||||
# PostgreSQL Databases (one per service)
|
||||
auth-db:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: auth_db
|
||||
POSTGRES_USER: auth_user
|
||||
POSTGRES_PASSWORD: auth_pass123
|
||||
volumes:
|
||||
- auth_db_data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- bakery-network
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U auth_user -d auth_db"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
training-db:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: training_db
|
||||
POSTGRES_USER: training_user
|
||||
POSTGRES_PASSWORD: training_pass123
|
||||
volumes:
|
||||
- training_db_data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- bakery-network
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U training_user -d training_db"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
forecasting-db:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: forecasting_db
|
||||
POSTGRES_USER: forecasting_user
|
||||
POSTGRES_PASSWORD: forecasting_pass123
|
||||
volumes:
|
||||
- forecasting_db_data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- bakery-network
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U forecasting_user -d forecasting_db"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
data-db:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: data_db
|
||||
POSTGRES_USER: data_user
|
||||
POSTGRES_PASSWORD: data_pass123
|
||||
volumes:
|
||||
- data_db_data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- bakery-network
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U data_user -d data_db"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
tenant-db:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: tenant_db
|
||||
POSTGRES_USER: tenant_user
|
||||
POSTGRES_PASSWORD: tenant_pass123
|
||||
volumes:
|
||||
- tenant_db_data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- bakery-network
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U tenant_user -d tenant_db"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
notification-db:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: notification_db
|
||||
POSTGRES_USER: notification_user
|
||||
POSTGRES_PASSWORD: notification_pass123
|
||||
volumes:
|
||||
- notification_db_data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- bakery-network
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U notification_user -d notification_db"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# Redis Cache
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
command: redis-server --appendonly yes --requirepass redis_pass123
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
- bakery-network
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# RabbitMQ Message Broker
|
||||
rabbitmq:
|
||||
image: rabbitmq:3-management-alpine
|
||||
environment:
|
||||
RABBITMQ_DEFAULT_USER: bakery
|
||||
RABBITMQ_DEFAULT_PASS: forecast123
|
||||
ports:
|
||||
- "15672:15672" # Management UI
|
||||
volumes:
|
||||
- rabbitmq_data:/var/lib/rabbitmq
|
||||
networks:
|
||||
- bakery-network
|
||||
healthcheck:
|
||||
test: ["CMD", "rabbitmq-diagnostics", "ping"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
|
||||
# ============================================================
|
||||
# MICROSERVICES
|
||||
# ============================================================
|
||||
|
||||
# API Gateway
|
||||
gateway:
|
||||
build: ./gateway
|
||||
ports:
|
||||
- "8000:8000"
|
||||
env_file: .env
|
||||
depends_on:
|
||||
- redis
|
||||
- rabbitmq
|
||||
networks:
|
||||
- bakery-network
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# Auth Service
|
||||
auth-service:
|
||||
build: ./services/auth
|
||||
ports:
|
||||
- "8001:8000"
|
||||
env_file: .env
|
||||
depends_on:
|
||||
- auth-db
|
||||
- redis
|
||||
- rabbitmq
|
||||
networks:
|
||||
- bakery-network
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# Training Service
|
||||
training-service:
|
||||
build: ./services/training
|
||||
ports:
|
||||
- "8002:8000"
|
||||
env_file: .env
|
||||
depends_on:
|
||||
- training-db
|
||||
- redis
|
||||
- rabbitmq
|
||||
networks:
|
||||
- bakery-network
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
- ./models:/app/models
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# Forecasting Service
|
||||
forecasting-service:
|
||||
build: ./services/forecasting
|
||||
ports:
|
||||
- "8003:8000"
|
||||
env_file: .env
|
||||
depends_on:
|
||||
- forecasting-db
|
||||
- redis
|
||||
- rabbitmq
|
||||
networks:
|
||||
- bakery-network
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
- ./models:/app/models
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# Data Service
|
||||
data-service:
|
||||
build: ./services/data
|
||||
ports:
|
||||
- "8004:8000"
|
||||
env_file: .env
|
||||
depends_on:
|
||||
- data-db
|
||||
- redis
|
||||
- rabbitmq
|
||||
networks:
|
||||
- bakery-network
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# Tenant Service
|
||||
tenant-service:
|
||||
build: ./services/tenant
|
||||
ports:
|
||||
- "8005:8000"
|
||||
env_file: .env
|
||||
depends_on:
|
||||
- tenant-db
|
||||
- redis
|
||||
- rabbitmq
|
||||
networks:
|
||||
- bakery-network
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# Notification Service
|
||||
notification-service:
|
||||
build: ./services/notification
|
||||
ports:
|
||||
- "8006:8000"
|
||||
env_file: .env
|
||||
depends_on:
|
||||
- notification-db
|
||||
- redis
|
||||
- rabbitmq
|
||||
networks:
|
||||
- bakery-network
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
- ./templates:/app/templates
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# ============================================================
|
||||
# MONITORING STACK
|
||||
# ============================================================
|
||||
|
||||
# Prometheus
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- ./infrastructure/monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- prometheus_data:/prometheus
|
||||
networks:
|
||||
- bakery-network
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||
- '--web.console.templates=/etc/prometheus/consoles'
|
||||
|
||||
# Grafana
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
ports:
|
||||
- "3002:3000"
|
||||
environment:
|
||||
GF_SECURITY_ADMIN_PASSWORD: admin123
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ./infrastructure/monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards
|
||||
- ./infrastructure/monitoring/grafana/datasources:/etc/grafana/provisioning/datasources
|
||||
networks:
|
||||
- bakery-network
|
||||
|
||||
networks:
|
||||
bakery-network:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
# Database volumes
|
||||
auth_db_data:
|
||||
training_db_data:
|
||||
forecasting_db_data:
|
||||
data_db_data:
|
||||
tenant_db_data:
|
||||
notification_db_data:
|
||||
|
||||
# Cache and messaging volumes
|
||||
redis_data:
|
||||
rabbitmq_data:
|
||||
|
||||
# Monitoring volumes
|
||||
prometheus_data:
|
||||
grafana_data:
|
||||
EOF
|
||||
print_success "Created docker-compose.yml"
|
||||
fi
|
||||
}
|
||||
|
||||
# Create utility scripts
|
||||
create_utility_scripts() {
|
||||
print_header "CREATING UTILITY SCRIPTS"
|
||||
|
||||
# Test script
|
||||
if [ ! -f "scripts/test.sh" ]; then
|
||||
cat > "scripts/test.sh" << 'EOF'
|
||||
#!/bin/bash
|
||||
|
||||
# Run tests for all services
|
||||
|
||||
set -e
|
||||
|
||||
echo "Running tests for all microservices..."
|
||||
|
||||
services=("auth" "training" "forecasting" "data" "tenant" "notification")
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
echo "Testing $service service..."
|
||||
docker-compose exec ${service}-service python -m pytest tests/ -v
|
||||
done
|
||||
|
||||
echo "Running integration tests..."
|
||||
docker-compose exec gateway python -m pytest ../tests/integration/ -v
|
||||
|
||||
echo "All tests completed!"
|
||||
EOF
|
||||
chmod +x "scripts/test.sh"
|
||||
print_success "Created test script"
|
||||
fi
|
||||
|
||||
# Deployment script
|
||||
if [ ! -f "scripts/deploy.sh" ]; then
|
||||
cat > "scripts/deploy.sh" << 'EOF'
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy services to production
|
||||
|
||||
set -e
|
||||
|
||||
echo "Building and deploying services..."
|
||||
|
||||
# Build all services
|
||||
docker-compose build
|
||||
|
||||
# Deploy with zero downtime
|
||||
docker-compose up -d --no-deps --force-recreate
|
||||
|
||||
# Wait for health checks
|
||||
echo "Waiting for services to be healthy..."
|
||||
sleep 30
|
||||
|
||||
# Verify deployment
|
||||
./scripts/health-check.sh
|
||||
|
||||
echo "Deployment completed successfully!"
|
||||
EOF
|
||||
chmod +x "scripts/deploy.sh"
|
||||
print_success "Created deployment script"
|
||||
fi
|
||||
|
||||
# Health check script
|
||||
if [ ! -f "scripts/health-check.sh" ]; then
|
||||
cat > "scripts/health-check.sh" << 'EOF'
|
||||
#!/bin/bash
|
||||
|
||||
# Check health of all services
|
||||
|
||||
services=(
|
||||
"gateway:8000"
|
||||
"auth-service:8001"
|
||||
"training-service:8002"
|
||||
"forecasting-service:8003"
|
||||
"data-service:8004"
|
||||
"tenant-service:8005"
|
||||
"notification-service:8006"
|
||||
)
|
||||
|
||||
echo "Checking service health..."
|
||||
|
||||
all_healthy=true
|
||||
|
||||
for service_port in "${services[@]}"; do
|
||||
service=$(echo $service_port | cut -d: -f1)
|
||||
port=$(echo $service_port | cut -d: -f2)
|
||||
|
||||
if curl -f -s "http://localhost:$port/health" > /dev/null; then
|
||||
echo "✅ $service is healthy"
|
||||
else
|
||||
echo "❌ $service is unhealthy"
|
||||
all_healthy=false
|
||||
fi
|
||||
done
|
||||
|
||||
if $all_healthy; then
|
||||
echo "🎉 All services are healthy!"
|
||||
exit 0
|
||||
else
|
||||
echo "⚠️ Some services are unhealthy"
|
||||
exit 1
|
||||
fi
|
||||
EOF
|
||||
chmod +x "scripts/health-check.sh"
|
||||
print_success "Created health check script"
|
||||
fi
|
||||
}
|
||||
|
||||
# Create monitoring configuration
|
||||
create_monitoring_config() {
|
||||
print_step "Creating monitoring configuration..."
|
||||
|
||||
# Prometheus configuration
|
||||
if [ ! -f "infrastructure/monitoring/prometheus.yml" ]; then
|
||||
mkdir -p infrastructure/monitoring
|
||||
cat > "infrastructure/monitoring/prometheus.yml" << 'EOF'
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'gateway'
|
||||
static_configs:
|
||||
- targets: ['gateway:8000']
|
||||
|
||||
- job_name: 'auth-service'
|
||||
static_configs:
|
||||
- targets: ['auth-service:8000']
|
||||
|
||||
- job_name: 'training-service'
|
||||
static_configs:
|
||||
- targets: ['training-service:8000']
|
||||
|
||||
- job_name: 'forecasting-service'
|
||||
static_configs:
|
||||
- targets: ['forecasting-service:8000']
|
||||
|
||||
- job_name: 'data-service'
|
||||
static_configs:
|
||||
- targets: ['data-service:8000']
|
||||
|
||||
- job_name: 'tenant-service'
|
||||
static_configs:
|
||||
- targets: ['tenant-service:8000']
|
||||
|
||||
- job_name: 'notification-service'
|
||||
static_configs:
|
||||
- targets: ['notification-service:8000']
|
||||
EOF
|
||||
print_success "Created Prometheus configuration"
|
||||
fi
|
||||
}
|
||||
|
||||
# Final setup steps
|
||||
final_setup() {
|
||||
print_header "FINAL SETUP STEPS"
|
||||
|
||||
# Make scripts executable
|
||||
chmod +x scripts/*.sh
|
||||
|
||||
# Create logs directory
|
||||
mkdir -p logs models
|
||||
|
||||
print_success "Setup completed successfully!"
|
||||
|
||||
echo ""
|
||||
echo "================================================================"
|
||||
echo -e "${GREEN}NEXT STEPS${NC}"
|
||||
echo "================================================================"
|
||||
echo "1. Update .env file with your configuration:"
|
||||
echo " - Change JWT_SECRET_KEY"
|
||||
echo " - Add AEMET and Madrid Open Data API keys"
|
||||
echo " - Configure email settings"
|
||||
echo ""
|
||||
echo "2. Copy the configuration classes from artifacts:"
|
||||
echo " - Copy BaseServiceSettings to shared/config/base.py"
|
||||
echo " - Copy service-specific settings to respective config files"
|
||||
echo ""
|
||||
echo "3. Start the services:"
|
||||
echo " docker-compose up -d"
|
||||
echo ""
|
||||
echo "4. Check service health:"
|
||||
echo " ./scripts/health-check.sh"
|
||||
echo ""
|
||||
echo "5. Access the services:"
|
||||
echo " - Gateway: http://localhost:8000"
|
||||
echo " - API Docs: http://localhost:8000/docs"
|
||||
echo " - Grafana: http://localhost:3002 (admin/admin123)"
|
||||
echo " - RabbitMQ: http://localhost:15672 (bakery/forecast123)"
|
||||
echo ""
|
||||
echo "================================================================"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
print_header "BAKERY FORECASTING PLATFORM - MICROSERVICES SETUP"
|
||||
|
||||
check_prerequisites
|
||||
create_directory_structure
|
||||
create_shared_config
|
||||
create_service_configs
|
||||
create_gateway_config
|
||||
create_environment_file
|
||||
create_docker_compose
|
||||
create_utility_scripts
|
||||
create_monitoring_config
|
||||
final_setup
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
EOF
|
||||
chmod +x "scripts/setup.sh"
|
||||
print_success "Created setup script"
|
||||
fi
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "🧪 Running tests for all services..."
|
||||
|
||||
# Run tests for each service
|
||||
for service in auth training forecasting data tenant notification; do
|
||||
echo "Testing $service service..."
|
||||
if docker-compose ps | grep -q "${service}-service.*Up"; then
|
||||
docker-compose exec -T ${service}-service python -m pytest tests/ -v || echo "Tests failed for $service"
|
||||
else
|
||||
echo "Service $service is not running, skipping tests"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ Test run completed"
|
||||
@@ -1,297 +0,0 @@
|
||||
# ================================================================
|
||||
# CONFIGURATION VALIDATION SCRIPT
|
||||
# scripts/validate-config.sh
|
||||
# ================================================================
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# Configuration validation script
|
||||
|
||||
set -e
|
||||
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_header() {
|
||||
echo ""
|
||||
echo "================================================================"
|
||||
echo -e "${GREEN}$1${NC}"
|
||||
echo "================================================================"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}[✓]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[✗]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[!]${NC} $1"
|
||||
}
|
||||
|
||||
validate_env_file() {
|
||||
print_header "VALIDATING ENVIRONMENT CONFIGURATION"
|
||||
|
||||
if [ ! -f ".env" ]; then
|
||||
print_error ".env file not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load environment variables
|
||||
source .env
|
||||
|
||||
# Critical settings validation
|
||||
critical_vars=(
|
||||
"JWT_SECRET_KEY"
|
||||
"AUTH_DATABASE_URL"
|
||||
"TRAINING_DATABASE_URL"
|
||||
"FORECASTING_DATABASE_URL"
|
||||
"DATA_DATABASE_URL"
|
||||
"TENANT_DATABASE_URL"
|
||||
"NOTIFICATION_DATABASE_URL"
|
||||
"REDIS_URL"
|
||||
"RABBITMQ_URL"
|
||||
)
|
||||
|
||||
all_good=true
|
||||
|
||||
for var in "${critical_vars[@]}"; do
|
||||
if [ -z "${!var}" ]; then
|
||||
print_error "$var is not set"
|
||||
all_good=false
|
||||
elif [[ "${!var}" == *"change"* ]] || [[ "${!var}" == *"default"* ]]; then
|
||||
print_warning "$var appears to use default/placeholder value"
|
||||
else
|
||||
print_success "$var is configured"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check JWT secret strength
|
||||
if [ ${#JWT_SECRET_KEY} -lt 32 ]; then
|
||||
print_error "JWT_SECRET_KEY must be at least 32 characters long"
|
||||
all_good=false
|
||||
fi
|
||||
|
||||
# Check environment
|
||||
if [ "$ENVIRONMENT" = "production" ]; then
|
||||
production_vars=("AEMET_API_KEY" "MADRID_OPENDATA_API_KEY" "SMTP_USER" "SMTP_PASSWORD")
|
||||
for var in "${production_vars[@]}"; do
|
||||
if [ -z "${!var}" ]; then
|
||||
print_warning "$var should be configured for production"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if $all_good; then
|
||||
print_success "Environment configuration is valid"
|
||||
else
|
||||
print_error "Environment configuration has issues"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
validate_service_configs() {
|
||||
print_header "VALIDATING SERVICE CONFIGURATIONS"
|
||||
|
||||
services=("auth" "training" "forecasting" "data" "tenant" "notification")
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
config_file="services/$service/app/core/config.py"
|
||||
|
||||
if [ -f "$config_file" ]; then
|
||||
print_success "$service configuration exists"
|
||||
|
||||
# Check if configuration follows the standard
|
||||
if grep -q "BaseServiceSettings" "$config_file"; then
|
||||
print_success "$service uses BaseServiceSettings"
|
||||
else
|
||||
print_warning "$service doesn't inherit from BaseServiceSettings"
|
||||
fi
|
||||
|
||||
if grep -q "DATABASE_URL" "$config_file"; then
|
||||
print_success "$service has database configuration"
|
||||
else
|
||||
print_warning "$service missing database configuration"
|
||||
fi
|
||||
else
|
||||
print_error "$service configuration missing"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check gateway configuration
|
||||
if [ -f "gateway/app/core/config.py" ]; then
|
||||
print_success "Gateway configuration exists"
|
||||
else
|
||||
print_error "Gateway configuration missing"
|
||||
fi
|
||||
}
|
||||
|
||||
validate_shared_config() {
|
||||
print_header "VALIDATING SHARED CONFIGURATION"
|
||||
|
||||
if [ -f "shared/config/base.py" ]; then
|
||||
print_success "Base configuration exists"
|
||||
|
||||
if grep -q "BaseServiceSettings" "shared/config/base.py"; then
|
||||
print_success "BaseServiceSettings class found"
|
||||
else
|
||||
print_error "BaseServiceSettings class missing"
|
||||
fi
|
||||
else
|
||||
print_error "Base configuration missing"
|
||||
fi
|
||||
|
||||
shared_modules=("auth" "database" "messaging" "monitoring" "utils")
|
||||
for module in "${shared_modules[@]}"; do
|
||||
if [ -d "shared/$module" ]; then
|
||||
print_success "Shared $module module exists"
|
||||
else
|
||||
print_warning "Shared $module module missing"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
validate_docker_config() {
|
||||
print_header "VALIDATING DOCKER CONFIGURATION"
|
||||
|
||||
if [ -f "docker-compose.yml" ]; then
|
||||
print_success "Docker Compose configuration exists"
|
||||
|
||||
# Check if all services are defined
|
||||
services=("gateway" "auth-service" "training-service" "forecasting-service" "data-service" "tenant-service" "notification-service")
|
||||
for service in "${services[@]}"; do
|
||||
if grep -q "$service:" docker-compose.yml; then
|
||||
print_success "$service defined in docker-compose.yml"
|
||||
else
|
||||
print_error "$service missing from docker-compose.yml"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if all databases are defined
|
||||
databases=("auth-db" "training-db" "forecasting-db" "data-db" "tenant-db" "notification-db")
|
||||
for db in "${databases[@]}"; do
|
||||
if grep -q "$db:" docker-compose.yml; then
|
||||
print_success "$db defined in docker-compose.yml"
|
||||
else
|
||||
print_error "$db missing from docker-compose.yml"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check infrastructure services
|
||||
infra=("redis" "rabbitmq" "prometheus" "grafana")
|
||||
for service in "${infra[@]}"; do
|
||||
if grep -q "$service:" docker-compose.yml; then
|
||||
print_success "$service defined in docker-compose.yml"
|
||||
else
|
||||
print_warning "$service missing from docker-compose.yml"
|
||||
fi
|
||||
done
|
||||
else
|
||||
print_error "Docker Compose configuration missing"
|
||||
fi
|
||||
|
||||
# Check Dockerfiles
|
||||
services=("gateway" "auth" "training" "forecasting" "data" "tenant" "notification")
|
||||
for service in "${services[@]}"; do
|
||||
if [ "$service" = "gateway" ]; then
|
||||
dockerfile="gateway/Dockerfile"
|
||||
else
|
||||
dockerfile="services/$service/Dockerfile"
|
||||
fi
|
||||
|
||||
if [ -f "$dockerfile" ]; then
|
||||
print_success "$service Dockerfile exists"
|
||||
else
|
||||
print_warning "$service Dockerfile missing"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
validate_directory_structure() {
|
||||
print_header "VALIDATING DIRECTORY STRUCTURE"
|
||||
|
||||
required_dirs=(
|
||||
"shared/config"
|
||||
"shared/auth"
|
||||
"shared/database"
|
||||
"shared/messaging"
|
||||
"gateway/app/core"
|
||||
"services/auth/app/core"
|
||||
"services/training/app/core"
|
||||
"services/forecasting/app/core"
|
||||
"services/data/app/core"
|
||||
"services/tenant/app/core"
|
||||
"services/notification/app/core"
|
||||
"scripts"
|
||||
"logs"
|
||||
"models"
|
||||
"templates"
|
||||
)
|
||||
|
||||
missing_dirs=()
|
||||
|
||||
for dir in "${required_dirs[@]}"; do
|
||||
if [ -d "$dir" ]; then
|
||||
print_success "$dir exists"
|
||||
else
|
||||
print_warning "$dir missing"
|
||||
missing_dirs+=("$dir")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#missing_dirs[@]} -gt 0 ]; then
|
||||
print_warning "Creating missing directories..."
|
||||
for dir in "${missing_dirs[@]}"; do
|
||||
mkdir -p "$dir"
|
||||
print_success "Created $dir"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
validate_scripts() {
|
||||
print_header "VALIDATING UTILITY SCRIPTS"
|
||||
|
||||
scripts=("setup.sh" "test.sh" "deploy.sh" "health-check.sh" "validate-config.sh")
|
||||
|
||||
for script in "${scripts[@]}"; do
|
||||
script_path="scripts/$script"
|
||||
if [ -f "$script_path" ]; then
|
||||
print_success "$script exists"
|
||||
|
||||
if [ -x "$script_path" ]; then
|
||||
print_success "$script is executable"
|
||||
else
|
||||
print_warning "$script is not executable - fixing..."
|
||||
chmod +x "$script_path"
|
||||
fi
|
||||
else
|
||||
print_warning "$script missing"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Main validation function
|
||||
main() {
|
||||
print_header "CONFIGURATION VALIDATION"
|
||||
|
||||
validate_directory_structure
|
||||
validate_shared_config
|
||||
validate_service_configs
|
||||
validate_env_file
|
||||
validate_docker_config
|
||||
validate_scripts
|
||||
|
||||
print_header "VALIDATION COMPLETE"
|
||||
echo "If all validations passed, you're ready to start the services!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. docker-compose up -d"
|
||||
echo "2. ./scripts/health-check.sh"
|
||||
}
|
||||
|
||||
# Run validation
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user