diff --git a/.env.sample b/.env.sample index cac80cdc..be94b57e 100644 --- a/.env.sample +++ b/.env.sample @@ -1,59 +1,118 @@ # ================================================================ -# UPDATED .env.example FILE -# .env.example +# BAKERY FORECASTING PLATFORM - ENVIRONMENT CONFIGURATION +# Single source of truth - no duplication with docker-compose.yml # ================================================================ # ================================================================ -# ENVIRONMENT CONFIGURATION +# ENVIRONMENT & BUILD SETTINGS # ================================================================ # Environment: development, staging, production, testing ENVIRONMENT=development DEBUG=true LOG_LEVEL=INFO + +# Build configuration +BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") +VCS_REF=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown") +IMAGE_TAG=latest +DOMAIN=localhost + +# ================================================================ +# SERVICE PORTS (used by Docker Compose) +# ================================================================ + +# Core services +GATEWAY_PORT=8000 +AUTH_SERVICE_PORT=8001 +TRAINING_SERVICE_PORT=8002 +FORECASTING_SERVICE_PORT=8003 +DATA_SERVICE_PORT=8004 +TENANT_SERVICE_PORT=8005 +NOTIFICATION_SERVICE_PORT=8006 + +# Frontend +DASHBOARD_PORT=3000 +MARKETING_PORT=3001 + +# Infrastructure +REDIS_PORT=6379 +RABBITMQ_PORT=5672 +RABBITMQ_MANAGEMENT_PORT=15672 + +# Monitoring +PROMETHEUS_PORT=9090 +GRAFANA_PORT=3002 + +# Development tools +PGADMIN_PORT=5050 +REDIS_COMMANDER_PORT=8081 + +# ================================================================ +# CORE SERVICE SETTINGS (used by applications) +# ================================================================ + +# Application metadata SERVICE_VERSION=1.0.0 +TIMEZONE=Europe/Madrid +LOCALE=es_ES.UTF-8 +CURRENCY=EUR # ================================================================ # DATABASE CONFIGURATION -# Each service has its own dedicated database # ================================================================ -# Auth Service Database +# PostgreSQL common settings +POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=es_ES.UTF-8 --lc-ctype=es_ES.UTF-8 + +# Auth Database +AUTH_DB_NAME=auth_db +AUTH_DB_USER=auth_user +AUTH_DB_PASSWORD=auth_pass123 AUTH_DATABASE_URL=postgresql+asyncpg://auth_user:auth_pass123@auth-db:5432/auth_db -# Training Service Database +# Training Database +TRAINING_DB_NAME=training_db +TRAINING_DB_USER=training_user +TRAINING_DB_PASSWORD=training_pass123 TRAINING_DATABASE_URL=postgresql+asyncpg://training_user:training_pass123@training-db:5432/training_db -# Forecasting Service Database +# Forecasting Database +FORECASTING_DB_NAME=forecasting_db +FORECASTING_DB_USER=forecasting_user +FORECASTING_DB_PASSWORD=forecasting_pass123 FORECASTING_DATABASE_URL=postgresql+asyncpg://forecasting_user:forecasting_pass123@forecasting-db:5432/forecasting_db -# Data Service Database +# Data Database +DATA_DB_NAME=data_db +DATA_DB_USER=data_user +DATA_DB_PASSWORD=data_pass123 DATA_DATABASE_URL=postgresql+asyncpg://data_user:data_pass123@data-db:5432/data_db -# Tenant Service Database +# Tenant Database +TENANT_DB_NAME=tenant_db +TENANT_DB_USER=tenant_user +TENANT_DB_PASSWORD=tenant_pass123 TENANT_DATABASE_URL=postgresql+asyncpg://tenant_user:tenant_pass123@tenant-db:5432/tenant_db -# Notification Service Database +# Notification Database +NOTIFICATION_DB_NAME=notification_db +NOTIFICATION_DB_USER=notification_user +NOTIFICATION_DB_PASSWORD=notification_pass123 NOTIFICATION_DATABASE_URL=postgresql+asyncpg://notification_user:notification_pass123@notification-db:5432/notification_db -# Database Connection Pool Settings -DB_POOL_SIZE=10 -DB_MAX_OVERFLOW=20 -DB_POOL_TIMEOUT=30 -DB_POOL_RECYCLE=3600 -DB_ECHO=false - # ================================================================ # REDIS CONFIGURATION -# Each service uses a different Redis database # ================================================================ -REDIS_URL=redis://redis:6379 +REDIS_PASSWORD=redis_pass123 +REDIS_MAX_MEMORY=512mb +REDIS_URL=redis://:redis_pass123@redis:6379 REDIS_MAX_CONNECTIONS=50 -# Redis Database Assignments: +# Redis Database Assignments (used by standardized config) # 0 - Auth Service -# 1 - Training Service +# 1 - Training Service # 2 - Forecasting Service # 3 - Data Service # 4 - Tenant Service @@ -64,18 +123,20 @@ REDIS_MAX_CONNECTIONS=50 # RABBITMQ CONFIGURATION # ================================================================ +RABBITMQ_USER=bakery +RABBITMQ_PASSWORD=forecast123 +RABBITMQ_VHOST=/ +RABBITMQ_ERLANG_COOKIE=bakery-secret-cookie-change-in-production RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/ RABBITMQ_EXCHANGE=bakery_events RABBITMQ_QUEUE_PREFIX=bakery -RABBITMQ_RETRY_ATTEMPTS=3 -RABBITMQ_RETRY_DELAY=5 # ================================================================ # AUTHENTICATION & SECURITY # ================================================================ # JWT Configuration (CHANGE IN PRODUCTION!) -JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production-very-long-and-secure +JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production-min-32-characters-long JWT_ALGORITHM=HS256 JWT_ACCESS_TOKEN_EXPIRE_MINUTES=30 JWT_REFRESH_TOKEN_EXPIRE_DAYS=7 @@ -100,7 +161,7 @@ LOCKOUT_DURATION_MINUTES=30 # CORS & API CONFIGURATION # ================================================================ -CORS_ORIGINS=http://localhost:3000,http://localhost:3001,http://127.0.0.1:3000 +CORS_ORIGINS=http://localhost:3000,http://localhost:3001,http://127.0.0.1:3000,https://panaderia.vercel.app CORS_ALLOW_CREDENTIALS=true # Rate Limiting @@ -113,15 +174,15 @@ RATE_LIMIT_BURST=10 API_DOCS_ENABLED=true # ================================================================ -# SERVICE URLS +# SERVICE DISCOVERY URLS (used by standardized config) # ================================================================ GATEWAY_URL=http://gateway:8000 AUTH_SERVICE_URL=http://auth-service:8000 +TENANT_SERVICE_URL=http://tenant-service:8000 TRAINING_SERVICE_URL=http://training-service:8000 FORECASTING_SERVICE_URL=http://forecasting-service:8000 DATA_SERVICE_URL=http://data-service:8000 -TENANT_SERVICE_URL=http://tenant-service:8000 NOTIFICATION_SERVICE_URL=http://notification-service:8000 # HTTP Client Settings @@ -130,29 +191,37 @@ HTTP_RETRIES=3 HTTP_RETRY_DELAY=1.0 # ================================================================ -# EXTERNAL APIS & INTEGRATIONS +# EXTERNAL APIS # ================================================================ # Spanish Weather Service (AEMET) -AEMET_API_KEY=your-aemet-api-key-here +AEMET_API_KEY=eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1YWxmYXJvQGdtYWlsLmNvbSIsImp0aSI6ImRjZWVmNTEwLTdmYzEtNGMxNy1hODZkLWQ4NzdlZDc5ZDllNyIsImlzcyI6IkFFTUVUIiwiaWF0IjoxNzUyODMwMDg3LCJ1c2VySWQiOiJkY2VlZjUxMC03ZmMxLTRjMTctYTg2ZC1kODc3ZWQ3OWQ5ZTciLCJyb2xlIjoiIn0.C047gaiEhWhH4ItDgkHSwg8HzKTzw87TOPRTRf8j-2w +AEMET_BASE_URL=https://opendata.aemet.es/opendata AEMET_TIMEOUT=30 AEMET_RETRY_ATTEMPTS=3 # Madrid Open Data Platform MADRID_OPENDATA_API_KEY=your-madrid-opendata-key-here +MADRID_OPENDATA_BASE_URL=https://datos.madrid.es MADRID_OPENDATA_TIMEOUT=30 -# Email Configuration (Gmail example) +# ================================================================ +# EMAIL CONFIGURATION +# ================================================================ + SMTP_HOST=smtp.gmail.com SMTP_PORT=587 SMTP_USER=your-email@gmail.com -SMTP_PASSWORD=your-email-app-password +SMTP_PASSWORD=your-app-specific-password SMTP_TLS=true SMTP_SSL=false DEFAULT_FROM_EMAIL=noreply@bakeryforecast.es DEFAULT_FROM_NAME=Bakery Forecast -# WhatsApp API (Twilio example) +# ================================================================ +# WHATSAPP/TWILIO CONFIGURATION +# ================================================================ + WHATSAPP_API_KEY=your-whatsapp-api-key-here WHATSAPP_BASE_URL=https://api.twilio.com WHATSAPP_FROM_NUMBER=whatsapp:+14155238886 @@ -173,6 +242,10 @@ MAX_CONCURRENT_TRAINING_JOBS=3 MIN_TRAINING_DATA_DAYS=30 TRAINING_BATCH_SIZE=1000 +# Resource Limits (used by Docker Compose) +TRAINING_MEMORY_LIMIT=2G +TRAINING_CPU_LIMIT=1.5 + # Prophet Configuration PROPHET_SEASONALITY_MODE=additive PROPHET_CHANGEPOINT_PRIOR_SCALE=0.05 @@ -194,9 +267,6 @@ MIN_HISTORICAL_DAYS=60 PREDICTION_CONFIDENCE_THRESHOLD=0.8 # Spanish Business Context -TIMEZONE=Europe/Madrid -LOCALE=es_ES.UTF-8 -CURRENCY=EUR BUSINESS_HOUR_START=7 BUSINESS_HOUR_END=20 @@ -260,23 +330,23 @@ LOG_RETENTION_DAYS=30 # Metrics & Monitoring PROMETHEUS_ENABLED=true -PROMETHEUS_PORT=9090 - -# Tracing (disabled by default) -JAEGER_ENABLED=false -JAEGER_AGENT_HOST=localhost -JAEGER_AGENT_PORT=6831 +PROMETHEUS_RETENTION=200h # Health Checks HEALTH_CHECK_TIMEOUT=30 HEALTH_CHECK_INTERVAL=30 +# Grafana Configuration +GRAFANA_ADMIN_USER=admin +GRAFANA_ADMIN_PASSWORD=admin123 +GRAFANA_SECRET_KEY=grafana-secret-key-change-in-production +GRAFANA_ROOT_URL=http://localhost:3002/ + # ================================================================ # DATA RETENTION & CLEANUP # ================================================================ DATA_RETENTION_DAYS=365 -LOG_RETENTION_DAYS=90 METRIC_RETENTION_DAYS=90 TEMP_FILE_CLEANUP_HOURS=24 @@ -347,6 +417,26 @@ DELIVERY_TRACKING_ENABLED=true OPEN_TRACKING_ENABLED=true CLICK_TRACKING_ENABLED=true +# ================================================================ +# FRONTEND CONFIGURATION +# ================================================================ + +# Frontend URLs (used by Docker Compose build args) +FRONTEND_API_URL=http://localhost:8000/api/v1 +FRONTEND_WS_URL=ws://localhost:8000/ws + +# ================================================================ +# DEVELOPMENT TOOLS CONFIGURATION +# ================================================================ + +# pgAdmin +PGADMIN_EMAIL=admin@bakery.local +PGADMIN_PASSWORD=admin123 + +# Redis Commander +REDIS_COMMANDER_USER=admin +REDIS_COMMANDER_PASSWORD=admin123 + # ================================================================ # COMPLIANCE & GDPR # ================================================================ @@ -419,4 +509,23 @@ TRAINING_WORKER_COUNT=1 # Support & Contact SUPPORT_EMAIL=soporte@bakeryforecast.es -INVOICE_LANGUAGE=es \ No newline at end of file +INVOICE_LANGUAGE=es + +# ================================================================ +# NOTES FOR CONFIGURATION MANAGEMENT +# ================================================================ + +# This .env file is the SINGLE SOURCE OF TRUTH for all configuration. +# Docker Compose uses these variables via ${VARIABLE_NAME} substitution. +# Application services load these via env_file: .env in docker-compose.yml +# No duplication between .env and docker-compose.yml environment sections. + +# To override for different environments: +# 1. Copy this file: cp .env .env.production +# 2. Modify values in .env.production +# 3. Use: docker-compose --env-file .env.production up -d + +# For sensitive values in production: +# 1. Use Docker secrets or external secret management +# 2. Override via environment variables: REDIS_PASSWORD=secret docker-compose up +# 3. Use .env.local (gitignored) for local overrides \ No newline at end of file diff --git a/deployment/nginx/docker-compose.test.yml b/deployment/nginx/docker-compose.test.yml new file mode 100644 index 00000000..4edab757 --- /dev/null +++ b/deployment/nginx/docker-compose.test.yml @@ -0,0 +1,133 @@ +# ================================================================ +# TESTING DOCKER COMPOSE FILE +# docker-compose.test.yml +# ================================================================ + +version: '3.8' + +# Testing-specific configuration +# Usage: docker-compose -f docker-compose.yml -f docker-compose.test.yml up -d + +services: + # Test database services (separate from development/production) + test-auth-db: + image: postgres:15-alpine + environment: + - POSTGRES_DB=test_auth_db + - POSTGRES_USER=test_user + - POSTGRES_PASSWORD=test_pass + tmpfs: + - /var/lib/postgresql/data # Use tmpfs for faster tests + + test-training-db: + image: postgres:15-alpine + environment: + - POSTGRES_DB=test_training_db + - POSTGRES_USER=test_user + - POSTGRES_PASSWORD=test_pass + tmpfs: + - /var/lib/postgresql/data + + test-forecasting-db: + image: postgres:15-alpine + environment: + - POSTGRES_DB=test_forecasting_db + - POSTGRES_USER=test_user + - POSTGRES_PASSWORD=test_pass + tmpfs: + - /var/lib/postgresql/data + + test-data-db: + image: postgres:15-alpine + environment: + - POSTGRES_DB=test_data_db + - POSTGRES_USER=test_user + - POSTGRES_PASSWORD=test_pass + tmpfs: + - /var/lib/postgresql/data + + test-tenant-db: + image: postgres:15-alpine + environment: + - POSTGRES_DB=test_tenant_db + - POSTGRES_USER=test_user + - POSTGRES_PASSWORD=test_pass + tmpfs: + - /var/lib/postgresql/data + + test-notification-db: + image: postgres:15-alpine + environment: + - POSTGRES_DB=test_notification_db + - POSTGRES_USER=test_user + - POSTGRES_PASSWORD=test_pass + tmpfs: + - /var/lib/postgresql/data + + # Test Redis + test-redis: + image: redis:7-alpine + command: redis-server --appendonly no --save "" + tmpfs: + - /data + + # Override services to use test databases + auth-service: + environment: + - TESTING=true + - DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-auth-db:5432/test_auth_db + - REDIS_URL=redis://test-redis:6379 + - MOCK_EXTERNAL_APIS=true + depends_on: + - test-auth-db + - test-redis + + training-service: + environment: + - TESTING=true + - DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-training-db:5432/test_training_db + - REDIS_URL=redis://test-redis:6379 + - MOCK_EXTERNAL_APIS=true + depends_on: + - test-training-db + - test-redis + + forecasting-service: + environment: + - TESTING=true + - DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-forecasting-db:5432/test_forecasting_db + - REDIS_URL=redis://test-redis:6379 + - MOCK_EXTERNAL_APIS=true + depends_on: + - test-forecasting-db + - test-redis + + data-service: + environment: + - TESTING=true + - DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-data-db:5432/test_data_db + - REDIS_URL=redis://test-redis:6379 + - MOCK_EXTERNAL_APIS=true + depends_on: + - test-data-db + - test-redis + + tenant-service: + environment: + - TESTING=true + - DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-tenant-db:5432/test_tenant_db + - REDIS_URL=redis://test-redis:6379 + - MOCK_EXTERNAL_APIS=true + depends_on: + - test-tenant-db + - test-redis + + notification-service: + environment: + - TESTING=true + - DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-notification-db:5432/test_notification_db + - REDIS_URL=redis://test-redis:6379 + - MOCK_EXTERNAL_APIS=true + depends_on: + - test-notification-db + - test-redis \ No newline at end of file diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 00000000..e69de29b diff --git a/docker-compose.yml b/docker-compose.yml index b8d7bab7..5d517246 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,60 +1,83 @@ # ================================================================ -# UPDATED DOCKER COMPOSE - PROPER AUTHENTICATION ARCHITECTURE +# OPTIMIZED DOCKER COMPOSE - NO ENVIRONMENT DUPLICATION +# Single source of truth: .env file only # ================================================================ + +# ================================================================ +# NETWORKS & VOLUMES (same as before) +# ================================================================ networks: bakery-network: driver: bridge + ipam: + config: + - subnet: 172.20.0.0/16 volumes: - postgres_auth_data: - postgres_training_data: - postgres_forecasting_data: - postgres_data_data: - postgres_tenant_data: - postgres_notification_data: + auth_db_data: + training_db_data: + forecasting_db_data: + data_db_data: + tenant_db_data: + notification_db_data: redis_data: rabbitmq_data: prometheus_data: grafana_data: + model_storage: + log_storage: +# ================================================================ +# SERVICES - USING ONLY .env FILE +# ================================================================ services: + # ================================================================ - # INFRASTRUCTURE SERVICES + # INFRASTRUCTURE - NO DUPLICATION # ================================================================ - # Redis - For caching, sessions, and rate limiting redis: image: redis:7-alpine container_name: bakery-redis - command: redis-server --appendonly yes --requirepass redis_pass123 + restart: unless-stopped + # ONLY use environment substitution from .env + command: > + redis-server + --appendonly yes + --requirepass ${REDIS_PASSWORD} + --maxmemory ${REDIS_MAX_MEMORY:-512mb} + --databases 16 ports: - - "6379:6379" + - "${REDIS_PORT}:6379" volumes: - redis_data:/data networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.10 healthcheck: - test: ["CMD", "redis-cli", "--raw", "incr", "ping"] + test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD}", "ping"] interval: 30s timeout: 10s retries: 3 - # RabbitMQ - Message broker rabbitmq: - image: rabbitmq:3-management + image: rabbitmq:3.12-management-alpine container_name: bakery-rabbitmq + restart: unless-stopped + # ONLY use environment substitution from .env environment: - - RABBITMQ_DEFAULT_USER=bakery - - RABBITMQ_DEFAULT_PASS=forecast123 - - RABBITMQ_DEFAULT_VHOST=/ + - RABBITMQ_DEFAULT_USER=${RABBITMQ_USER} + - RABBITMQ_DEFAULT_PASS=${RABBITMQ_PASSWORD} + - RABBITMQ_DEFAULT_VHOST=${RABBITMQ_VHOST} ports: - - "5672:5672" - - "15672:15672" + - "${RABBITMQ_PORT}:5672" + - "${RABBITMQ_MANAGEMENT_PORT}:15672" volumes: - rabbitmq_data:/var/lib/rabbitmq networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.11 healthcheck: test: ["CMD", "rabbitmq-diagnostics", "ping"] interval: 30s @@ -62,167 +85,164 @@ services: retries: 3 # ================================================================ - # DATABASE SERVICES + # DATABASES - NO DUPLICATION # ================================================================ - # Auth Database auth-db: - image: postgres:15 + image: postgres:15-alpine container_name: bakery-auth-db + restart: unless-stopped + # ONLY reference .env variables environment: - - POSTGRES_DB=auth_db - - POSTGRES_USER=auth_user - - POSTGRES_PASSWORD=auth_pass123 + - POSTGRES_DB=${AUTH_DB_NAME} + - POSTGRES_USER=${AUTH_DB_USER} + - POSTGRES_PASSWORD=${AUTH_DB_PASSWORD} + - POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS} + - PGDATA=/var/lib/postgresql/data/pgdata volumes: - - postgres_auth_data:/var/lib/postgresql/data + - auth_db_data:/var/lib/postgresql/data networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.20 healthcheck: - test: ["CMD-SHELL", "pg_isready -U auth_user -d auth_db"] + test: ["CMD-SHELL", "pg_isready -U ${AUTH_DB_USER} -d ${AUTH_DB_NAME}"] interval: 10s timeout: 5s retries: 5 - # Tenant Database - tenant-db: - image: postgres:15 - container_name: bakery-tenant-db - environment: - - POSTGRES_DB=tenant_db - - POSTGRES_USER=tenant_user - - POSTGRES_PASSWORD=tenant_pass123 - volumes: - - postgres_tenant_data:/var/lib/postgresql/data - networks: - - bakery-network - healthcheck: - test: ["CMD-SHELL", "pg_isready -U tenant_user -d tenant_db"] - interval: 10s - timeout: 5s - retries: 5 - - # Training Database training-db: - image: postgres:15 + image: postgres:15-alpine container_name: bakery-training-db + restart: unless-stopped environment: - - POSTGRES_DB=training_db - - POSTGRES_USER=training_user - - POSTGRES_PASSWORD=training_pass123 + - POSTGRES_DB=${TRAINING_DB_NAME} + - POSTGRES_USER=${TRAINING_DB_USER} + - POSTGRES_PASSWORD=${TRAINING_DB_PASSWORD} + - POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS} + - PGDATA=/var/lib/postgresql/data/pgdata volumes: - - postgres_training_data:/var/lib/postgresql/data + - training_db_data:/var/lib/postgresql/data networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.21 healthcheck: - test: ["CMD-SHELL", "pg_isready -U training_user -d training_db"] + test: ["CMD-SHELL", "pg_isready -U ${TRAINING_DB_USER} -d ${TRAINING_DB_NAME}"] interval: 10s timeout: 5s retries: 5 - # Forecasting Database forecasting-db: - image: postgres:15 + image: postgres:15-alpine container_name: bakery-forecasting-db + restart: unless-stopped environment: - - POSTGRES_DB=forecasting_db - - POSTGRES_USER=forecasting_user - - POSTGRES_PASSWORD=forecasting_pass123 + - POSTGRES_DB=${FORECASTING_DB_NAME} + - POSTGRES_USER=${FORECASTING_DB_USER} + - POSTGRES_PASSWORD=${FORECASTING_DB_PASSWORD} + - POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS} + - PGDATA=/var/lib/postgresql/data/pgdata volumes: - - postgres_forecasting_data:/var/lib/postgresql/data + - forecasting_db_data:/var/lib/postgresql/data networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.22 healthcheck: - test: ["CMD-SHELL", "pg_isready -U forecasting_user -d forecasting_db"] + test: ["CMD-SHELL", "pg_isready -U ${FORECASTING_DB_USER} -d ${FORECASTING_DB_NAME}"] interval: 10s timeout: 5s retries: 5 - # Data Database data-db: - image: postgres:15 + image: postgres:15-alpine container_name: bakery-data-db + restart: unless-stopped environment: - - POSTGRES_DB=data_db - - POSTGRES_USER=data_user - - POSTGRES_PASSWORD=data_pass123 + - POSTGRES_DB=${DATA_DB_NAME} + - POSTGRES_USER=${DATA_DB_USER} + - POSTGRES_PASSWORD=${DATA_DB_PASSWORD} + - POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS} + - PGDATA=/var/lib/postgresql/data/pgdata volumes: - - postgres_data_data:/var/lib/postgresql/data + - data_db_data:/var/lib/postgresql/data networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.23 healthcheck: - test: ["CMD-SHELL", "pg_isready -U data_user -d data_db"] + test: ["CMD-SHELL", "pg_isready -U ${DATA_DB_USER} -d ${DATA_DB_NAME}"] + interval: 10s + timeout: 5s + retries: 5 + + tenant-db: + image: postgres:15-alpine + container_name: bakery-tenant-db + restart: unless-stopped + environment: + - POSTGRES_DB=${TENANT_DB_NAME} + - POSTGRES_USER=${TENANT_DB_USER} + - POSTGRES_PASSWORD=${TENANT_DB_PASSWORD} + - POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS} + - PGDATA=/var/lib/postgresql/data/pgdata + volumes: + - tenant_db_data:/var/lib/postgresql/data + networks: + bakery-network: + ipv4_address: 172.20.0.24 + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${TENANT_DB_USER} -d ${TENANT_DB_NAME}"] interval: 10s timeout: 5s retries: 5 - # Notification Database notification-db: - image: postgres:15 + image: postgres:15-alpine container_name: bakery-notification-db + restart: unless-stopped environment: - - POSTGRES_DB=notification_db - - POSTGRES_USER=notification_user - - POSTGRES_PASSWORD=notification_pass123 + - POSTGRES_DB=${NOTIFICATION_DB_NAME} + - POSTGRES_USER=${NOTIFICATION_DB_USER} + - POSTGRES_PASSWORD=${NOTIFICATION_DB_PASSWORD} + - POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS} + - PGDATA=/var/lib/postgresql/data/pgdata volumes: - - postgres_notification_data:/var/lib/postgresql/data + - notification_db_data:/var/lib/postgresql/data networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.25 healthcheck: - test: ["CMD-SHELL", "pg_isready -U notification_user -d notification_db"] + test: ["CMD-SHELL", "pg_isready -U ${NOTIFICATION_DB_USER} -d ${NOTIFICATION_DB_NAME}"] interval: 10s timeout: 5s retries: 5 # ================================================================ - # MICROSERVICES + # MICROSERVICES - CLEAN APPROACH # ================================================================ - # API Gateway - Enhanced with Redis caching gateway: build: context: . dockerfile: ./gateway/Dockerfile + args: + - ENVIRONMENT=${ENVIRONMENT} + - BUILD_DATE=${BUILD_DATE} + image: bakery/gateway:${IMAGE_TAG} container_name: bakery-gateway - environment: - # Service Discovery - - AUTH_SERVICE_URL=http://auth-service:8000 - - TENANT_SERVICE_URL=http://tenant-service:8000 - - TRAINING_SERVICE_URL=http://training-service:8000 - - FORECASTING_SERVICE_URL=http://forecasting-service:8000 - - DATA_SERVICE_URL=http://data-service:8000 - - NOTIFICATION_SERVICE_URL=http://notification-service:8000 - - # Authentication & Caching - - JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production} - - JWT_ALGORITHM=HS256 - - REDIS_URL=redis://:redis_pass123@redis:6379/0 - - # CORS Configuration - - CORS_ORIGINS=http://localhost:3000,http://localhost:3001,https://panaderia.vercel.app - - # Service Configuration - - SERVICE_NAME=gateway - - SERVICE_VERSION=1.0.0 - - LOG_LEVEL=INFO - - # Rate Limiting - - RATE_LIMIT_CALLS_PER_MINUTE=60 - - RATE_LIMIT_BURST=10 - + restart: unless-stopped + # ONLY load from .env file - no duplication + env_file: .env ports: - - "8000:8000" + - "${GATEWAY_PORT}:8000" depends_on: redis: condition: service_healthy rabbitmq: condition: service_healthy - auth-service: - condition: service_healthy - tenant-service: - condition: service_healthy networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.100 volumes: + - log_storage:/app/logs - ./gateway:/app - ./shared:/app/shared healthcheck: @@ -231,40 +251,20 @@ services: timeout: 10s retries: 3 - # Auth Service - Enhanced with proper JWT handling auth-service: build: context: . dockerfile: ./services/auth/Dockerfile + args: + - ENVIRONMENT=${ENVIRONMENT} + - BUILD_DATE=${BUILD_DATE} + image: bakery/auth-service:${IMAGE_TAG} container_name: bakery-auth-service - environment: - # Database - - DATABASE_URL=postgresql+asyncpg://auth_user:auth_pass123@auth-db:5432/auth_db - - # Redis for sessions and rate limiting - - REDIS_URL=redis://:redis_pass123@redis:6379/1 - - # Message Queue - - RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/ - - # JWT Configuration - - JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production} - - JWT_ALGORITHM=HS256 - - JWT_ACCESS_TOKEN_EXPIRE_MINUTES=30 - - JWT_REFRESH_TOKEN_EXPIRE_DAYS=7 - - # Security Configuration - - PASSWORD_MIN_LENGTH=8 - - MAX_LOGIN_ATTEMPTS=5 - - LOCKOUT_DURATION_MINUTES=30 - - # Service Configuration - - SERVICE_NAME=auth-service - - SERVICE_VERSION=1.0.0 - - LOG_LEVEL=INFO - + restart: unless-stopped + # ONLY load from .env file - no duplication + env_file: .env ports: - - "8001:8000" + - "${AUTH_SERVICE_PORT}:8000" depends_on: auth-db: condition: service_healthy @@ -273,8 +273,10 @@ services: rabbitmq: condition: service_healthy networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.101 volumes: + - log_storage:/app/logs - ./services/auth:/app - ./shared:/app/shared healthcheck: @@ -283,36 +285,19 @@ services: timeout: 10s retries: 3 - # Tenant Service - New enhanced service tenant-service: build: context: . dockerfile: ./services/tenant/Dockerfile + args: + - ENVIRONMENT=${ENVIRONMENT} + - BUILD_DATE=${BUILD_DATE} + image: bakery/tenant-service:${IMAGE_TAG} container_name: bakery-tenant-service - environment: - # Database - - DATABASE_URL=postgresql+asyncpg://tenant_user:tenant_pass123@tenant-db:5432/tenant_db - - # Redis for caching - - REDIS_URL=redis://:redis_pass123@redis:6379/2 - - # Message Queue - - RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/ - - # Service Discovery - - AUTH_SERVICE_URL=http://auth-service:8000 - - # JWT Configuration (for token verification) - - JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production} - - JWT_ALGORITHM=HS256 - - # Service Configuration - - SERVICE_NAME=tenant-service - - SERVICE_VERSION=1.0.0 - - LOG_LEVEL=INFO - + restart: unless-stopped + env_file: .env ports: - - "8005:8000" + - "${TENANT_SERVICE_PORT}:8000" depends_on: tenant-db: condition: service_healthy @@ -323,8 +308,10 @@ services: auth-service: condition: service_healthy networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.105 volumes: + - log_storage:/app/logs - ./services/tenant:/app - ./shared:/app/shared healthcheck: @@ -333,44 +320,19 @@ services: timeout: 10s retries: 3 - # Training Service - Enhanced with tenant isolation training-service: build: context: . dockerfile: ./services/training/Dockerfile + args: + - ENVIRONMENT=${ENVIRONMENT} + - BUILD_DATE=${BUILD_DATE} + image: bakery/training-service:${IMAGE_TAG} container_name: bakery-training-service - environment: - # Database - - DATABASE_URL=postgresql+asyncpg://training_user:training_pass123@training-db:5432/training_db - - # Redis for job queuing and caching - - REDIS_URL=redis://:redis_pass123@redis:6379/3 - - # Message Queue - - RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/ - - # Service Discovery - - AUTH_SERVICE_URL=http://auth-service:8000 - - DATA_SERVICE_URL=http://data-service:8000 - - TENANT_SERVICE_URL=http://tenant-service:8000 - - # JWT Configuration - - JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production} - - JWT_ALGORITHM=HS256 - - # ML Configuration - - MODEL_STORAGE_PATH=/app/models - - MAX_TRAINING_TIME_MINUTES=30 - - MIN_TRAINING_DATA_DAYS=30 - - PROPHET_SEASONALITY_MODE=additive - - # Service Configuration - - SERVICE_NAME=training-service - - SERVICE_VERSION=1.0.0 - - LOG_LEVEL=INFO - + restart: unless-stopped + env_file: .env ports: - - "8002:8000" + - "${TRAINING_SERVICE_PORT}:8000" depends_on: training-db: condition: service_healthy @@ -383,53 +345,37 @@ services: data-service: condition: service_healthy networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.102 volumes: + - log_storage:/app/logs + - model_storage:/app/models - ./services/training:/app - ./shared:/app/shared - - ./models:/app/models # Persistent model storage healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 10s retries: 3 + deploy: + resources: + limits: + memory: ${TRAINING_MEMORY_LIMIT:-2G} + cpus: '${TRAINING_CPU_LIMIT:-1.5}' - # Forecasting Service - Enhanced with proper auth forecasting-service: build: context: . dockerfile: ./services/forecasting/Dockerfile + args: + - ENVIRONMENT=${ENVIRONMENT} + - BUILD_DATE=${BUILD_DATE} + image: bakery/forecasting-service:${IMAGE_TAG} container_name: bakery-forecasting-service - environment: - # Database - - DATABASE_URL=postgresql+asyncpg://forecasting_user:forecasting_pass123@forecasting-db:5432/forecasting_db - - # Redis for caching predictions - - REDIS_URL=redis://:redis_pass123@redis:6379/4 - - # Message Queue - - RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/ - - # Service Discovery - - AUTH_SERVICE_URL=http://auth-service:8000 - - TRAINING_SERVICE_URL=http://training-service:8000 - - DATA_SERVICE_URL=http://data-service:8000 - - # JWT Configuration - - JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production} - - JWT_ALGORITHM=HS256 - - # ML Configuration - - MODEL_STORAGE_PATH=/app/models - - PREDICTION_CACHE_TTL_HOURS=6 - - # Service Configuration - - SERVICE_NAME=forecasting-service - - SERVICE_VERSION=1.0.0 - - LOG_LEVEL=INFO - + restart: unless-stopped + env_file: .env ports: - - "8003:8000" + - "${FORECASTING_SERVICE_PORT}:8000" depends_on: forecasting-db: condition: service_healthy @@ -442,57 +388,32 @@ services: training-service: condition: service_healthy networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.103 volumes: + - log_storage:/app/logs + - model_storage:/app/models - ./services/forecasting:/app - ./shared:/app/shared - - ./models:/app/models # Shared model storage healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 10s retries: 3 - # Data Service - Enhanced with external API integration data-service: build: context: . dockerfile: ./services/data/Dockerfile + args: + - ENVIRONMENT=${ENVIRONMENT} + - BUILD_DATE=${BUILD_DATE} + image: bakery/data-service:${IMAGE_TAG} container_name: bakery-data-service - environment: - # Database - - DATABASE_URL=postgresql+asyncpg://data_user:data_pass123@data-db:5432/data_db - - # Redis for API caching - - REDIS_URL=redis://:redis_pass123@redis:6379/5 - - # Message Queue - - RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/ - - # Service Discovery - - AUTH_SERVICE_URL=http://auth-service:8000 - - TENANT_SERVICE_URL=http://tenant-service:8000 - - # External API Keys - - AEMET_API_KEY=${AEMET_API_KEY:-eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1YWxmYXJvQGdtYWlsLmNvbSIsImp0aSI6ImRjZWVmNTEwLTdmYzEtNGMxNy1hODZkLWQ4NzdlZDc5ZDllNyIsImlzcyI6IkFFTUVUIiwiaWF0IjoxNzUyODMwMDg3LCJ1c2VySWQiOiJkY2VlZjUxMC03ZmMxLTRjMTctYTg2ZC1kODc3ZWQ3OWQ5ZTciLCJyb2xlIjoiIn0.C047gaiEhWhH4ItDgkHSwg8HzKTzw87TOPRTRf8j-2w} - - MADRID_OPENDATA_API_KEY=${MADRID_OPENDATA_API_KEY:-your-madrid-opendata-key} - - # JWT Configuration - - JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production} - - JWT_ALGORITHM=HS256 - - # Data Configuration - - WEATHER_CACHE_TTL_HOURS=1 - - TRAFFIC_CACHE_TTL_HOURS=1 - - DATA_RETENTION_DAYS=365 - - # Service Configuration - - SERVICE_NAME=data-service - - SERVICE_VERSION=1.0.0 - - LOG_LEVEL=INFO - + restart: unless-stopped + env_file: .env ports: - - "8004:8000" + - "${DATA_SERVICE_PORT}:8000" depends_on: data-db: condition: service_healthy @@ -503,8 +424,10 @@ services: auth-service: condition: service_healthy networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.104 volumes: + - log_storage:/app/logs - ./services/data:/app - ./shared:/app/shared healthcheck: @@ -513,49 +436,19 @@ services: timeout: 10s retries: 3 - # Notification Service - Enhanced with WhatsApp and Email notification-service: build: context: . dockerfile: ./services/notification/Dockerfile + args: + - ENVIRONMENT=${ENVIRONMENT} + - BUILD_DATE=${BUILD_DATE} + image: bakery/notification-service:${IMAGE_TAG} container_name: bakery-notification-service - environment: - # Database - - DATABASE_URL=postgresql+asyncpg://notification_user:notification_pass123@notification-db:5432/notification_db - - # Redis for queue management - - REDIS_URL=redis://:redis_pass123@redis:6379/6 - - # Message Queue - - RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/ - - # Service Discovery - - AUTH_SERVICE_URL=http://auth-service:8000 - - TENANT_SERVICE_URL=http://tenant-service:8000 - - # Email Configuration - - SMTP_HOST=${SMTP_HOST:-smtp.gmail.com} - - SMTP_PORT=${SMTP_PORT:-587} - - SMTP_USER=${SMTP_USER:-your-email@gmail.com} - - SMTP_PASSWORD=${SMTP_PASSWORD:-your-app-password} - - SMTP_FROM_NAME=Bakery Forecast - - # WhatsApp Configuration (Twilio) - - WHATSAPP_ACCOUNT_SID=${WHATSAPP_ACCOUNT_SID:-your-twilio-sid} - - WHATSAPP_AUTH_TOKEN=${WHATSAPP_AUTH_TOKEN:-your-twilio-token} - - WHATSAPP_FROM_NUMBER=${WHATSAPP_FROM_NUMBER:-whatsapp:+14155238886} - - # JWT Configuration - - JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production} - - JWT_ALGORITHM=HS256 - - # Service Configuration - - SERVICE_NAME=notification-service - - SERVICE_VERSION=1.0.0 - - LOG_LEVEL=INFO - + restart: unless-stopped + env_file: .env ports: - - "8006:8000" + - "${NOTIFICATION_SERVICE_PORT}:8000" depends_on: notification-db: condition: service_healthy @@ -566,8 +459,10 @@ services: auth-service: condition: service_healthy networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.106 volumes: + - log_storage:/app/logs - ./services/notification:/app - ./shared:/app/shared healthcheck: @@ -577,98 +472,119 @@ services: retries: 3 # ================================================================ - # MONITORING SERVICES + # MONITORING - SIMPLE APPROACH # ================================================================ - # Prometheus - Metrics collection prometheus: - image: prom/prometheus:latest + image: prom/prometheus:v2.45.0 container_name: bakery-prometheus + restart: unless-stopped command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' - - '--web.console.libraries=/etc/prometheus/console_libraries' - - '--web.console.templates=/etc/prometheus/consoles' - - '--storage.tsdb.retention.time=200h' + - '--storage.tsdb.retention.time=${PROMETHEUS_RETENTION:-200h}' - '--web.enable-lifecycle' ports: - - "9090:9090" + - "${PROMETHEUS_PORT}:9090" volumes: - - ./infrastructure/monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml + - ./infrastructure/monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro - prometheus_data:/prometheus networks: - - bakery-network - depends_on: - - gateway - - auth-service - - tenant-service - - training-service - - forecasting-service - - data-service - - notification-service + bakery-network: + ipv4_address: 172.20.0.200 + profiles: + - monitoring - # Grafana - Metrics visualization grafana: - image: grafana/grafana:latest + image: grafana/grafana:10.0.0 container_name: bakery-grafana + restart: unless-stopped environment: - - GF_SECURITY_ADMIN_PASSWORD=admin123 + - GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER} + - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD} - GF_USERS_ALLOW_SIGN_UP=false + - GF_DEFAULT_TIMEZONE=${TIMEZONE} + - GF_SERVER_ROOT_URL=${GRAFANA_ROOT_URL} ports: - - "3002:3000" + - "${GRAFANA_PORT}:3000" volumes: - grafana_data:/var/lib/grafana - - ./infrastructure/monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards - - ./infrastructure/monitoring/grafana/datasources:/etc/grafana/provisioning/datasources + - ./infrastructure/monitoring/grafana:/etc/grafana/provisioning:ro networks: - - bakery-network + bakery-network: + ipv4_address: 172.20.0.201 depends_on: - prometheus + profiles: + - monitoring # ================================================================ - # FRONTEND SERVICES (Optional for development) + # FRONTEND - CLEAN CONFIG # ================================================================ - # React Dashboard dashboard: build: context: ./frontend - dockerfile: Dockerfile.dev - container_name: bakery-frontend - environment: - - REACT_APP_API_URL=http://localhost:8000/api/v1 - - REACT_APP_WS_URL=ws://localhost:8000/ws + dockerfile: Dockerfile.${ENVIRONMENT} + args: + - REACT_APP_API_URL=${FRONTEND_API_URL} + - REACT_APP_WS_URL=${FRONTEND_WS_URL} + - REACT_APP_ENVIRONMENT=${ENVIRONMENT} + image: bakery/dashboard:${IMAGE_TAG} + container_name: bakery-dashboard + restart: unless-stopped ports: - - "3000:3000" + - "${DASHBOARD_PORT}:3000" depends_on: - - gateway + gateway: + condition: service_healthy + networks: + bakery-network: + ipv4_address: 172.20.0.110 + profiles: + - frontend + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:3000/"] + interval: 30s + timeout: 10s + retries: 3 + + # ================================================================ + # DEVELOPMENT TOOLS - OPTIONAL + # ================================================================ + + pgadmin: + image: dpage/pgadmin4:7.4 + container_name: bakery-pgadmin + restart: unless-stopped + environment: + - PGADMIN_DEFAULT_EMAIL=${PGADMIN_EMAIL} + - PGADMIN_DEFAULT_PASSWORD=${PGADMIN_PASSWORD} + - PGADMIN_CONFIG_SERVER_MODE=False + ports: + - "${PGADMIN_PORT}:80" + volumes: + - ./infrastructure/pgadmin/servers.json:/pgadmin4/servers.json:ro networks: - bakery-network - volumes: - - ./frontend:/app - - /app/node_modules - command: npm start + profiles: + - development + - admin -# ================================================================ -# ENVIRONMENT VARIABLES TEMPLATE -# ================================================================ - -# Create a .env file with these variables: -# -# # JWT Secret (CHANGE IN PRODUCTION!) -# JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production-min-32-chars -# -# # External API Keys -# AEMET_API_KEY=your-aemet-api-key-here -# MADRID_OPENDATA_API_KEY=your-madrid-opendata-key-here -# -# # Email Configuration (Gmail example) -# SMTP_HOST=smtp.gmail.com -# SMTP_PORT=587 -# SMTP_USER=your-email@gmail.com -# SMTP_PASSWORD=your-app-specific-password -# -# # WhatsApp/Twilio Configuration -# WHATSAPP_ACCOUNT_SID=your-twilio-account-sid -# WHATSAPP_AUTH_TOKEN=your-twilio-auth-token -# WHATSAPP_FROM_NUMBER=whatsapp:+14155238886 \ No newline at end of file + redis-commander: + image: rediscommander/redis-commander:latest + container_name: bakery-redis-commander + restart: unless-stopped + environment: + - REDIS_HOSTS=local:redis:6379:0:${REDIS_PASSWORD} + - HTTP_USER=${REDIS_COMMANDER_USER} + - HTTP_PASSWORD=${REDIS_COMMANDER_PASSWORD} + ports: + - "${REDIS_COMMANDER_PORT}:8081" + networks: + - bakery-network + profiles: + - development + - admin + depends_on: + - redis \ No newline at end of file diff --git a/frontend/Dockerfile.dev b/frontend/Dockerfile.development similarity index 100% rename from frontend/Dockerfile.dev rename to frontend/Dockerfile.development diff --git a/infrastructure/monitoring/grafana/dashboards/dashboard.yml b/infrastructure/monitoring/grafana/dashboards/dashboard.yml index cc47a55f..e1248ea9 100644 --- a/infrastructure/monitoring/grafana/dashboards/dashboard.yml +++ b/infrastructure/monitoring/grafana/dashboards/dashboard.yml @@ -1,11 +1,15 @@ +# infrastructure/monitoring/grafana/dashboards/dashboard.yml +# Grafana dashboard provisioning + apiVersion: 1 providers: - - name: 'Bakery Forecasting' + - name: 'bakery-dashboards' orgId: 1 - folder: '' + folder: 'Bakery Forecasting' type: file disableDeletion: false updateIntervalSeconds: 10 + allowUiUpdates: true options: path: /etc/grafana/provisioning/dashboards \ No newline at end of file diff --git a/infrastructure/monitoring/grafana/datasources/prometheus.yml b/infrastructure/monitoring/grafana/datasources/prometheus.yml index f88db84c..10f4fa55 100644 --- a/infrastructure/monitoring/grafana/datasources/prometheus.yml +++ b/infrastructure/monitoring/grafana/datasources/prometheus.yml @@ -1,3 +1,6 @@ +# infrastructure/monitoring/grafana/datasources/prometheus.yml +# Grafana Prometheus datasource configuration + apiVersion: 1 datasources: @@ -6,4 +9,20 @@ datasources: access: proxy url: http://prometheus:9090 isDefault: true + version: 1 + editable: true + jsonData: + timeInterval: "15s" + queryTimeout: "60s" + httpMethod: "POST" + exemplarTraceIdDestinations: + - name: trace_id + datasourceUid: jaeger + + - name: Jaeger + type: jaeger + access: proxy + url: http://jaeger:16686 + uid: jaeger + version: 1 editable: true \ No newline at end of file diff --git a/infrastructure/monitoring/prometheus/prometheus.yml b/infrastructure/monitoring/prometheus/prometheus.yml index b699b724..6b68d705 100644 --- a/infrastructure/monitoring/prometheus/prometheus.yml +++ b/infrastructure/monitoring/prometheus/prometheus.yml @@ -1,17 +1,30 @@ ---- +# infrastructure/monitoring/prometheus/prometheus.yml +# Prometheus configuration + global: scrape_interval: 15s evaluation_interval: 15s + external_labels: + cluster: 'bakery-forecasting' + replica: 'prometheus-01' rule_files: - - "alerts.yml" + - "/etc/prometheus/rules/*.yml" + +alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 scrape_configs: + # Service discovery for microservices - job_name: 'gateway' static_configs: - targets: ['gateway:8000'] metrics_path: '/metrics' scrape_interval: 30s + scrape_timeout: 10s - job_name: 'auth-service' static_configs: @@ -49,11 +62,21 @@ scrape_configs: metrics_path: '/metrics' scrape_interval: 30s + # Infrastructure monitoring - job_name: 'redis' static_configs: - targets: ['redis:6379'] + metrics_path: '/metrics' + scrape_interval: 30s - job_name: 'rabbitmq' static_configs: - targets: ['rabbitmq:15692'] + metrics_path: '/metrics' + scrape_interval: 30s + # Database monitoring (requires postgres_exporter) + - job_name: 'postgres' + static_configs: + - targets: ['postgres-exporter:9187'] + scrape_interval: 30s \ No newline at end of file diff --git a/infrastructure/monitoring/prometheus/rules/alerts.yml b/infrastructure/monitoring/prometheus/rules/alerts.yml new file mode 100644 index 00000000..9fbf233b --- /dev/null +++ b/infrastructure/monitoring/prometheus/rules/alerts.yml @@ -0,0 +1,86 @@ +# infrastructure/monitoring/prometheus/rules/alerts.yml +# Prometheus alerting rules + +groups: + - name: bakery_services + rules: + # Service availability alerts + - alert: ServiceDown + expr: up == 0 + for: 2m + labels: + severity: critical + annotations: + summary: "Service {{ $labels.job }} is down" + description: "Service {{ $labels.job }} has been down for more than 2 minutes." + + # High error rate alerts + - alert: HighErrorRate + expr: rate(http_requests_total{status=~"5.."}[5m]) > 0.1 + for: 5m + labels: + severity: warning + annotations: + summary: "High error rate on {{ $labels.job }}" + description: "Error rate is {{ $value }} errors per second on {{ $labels.job }}." + + # High response time alerts + - alert: HighResponseTime + expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m])) > 1 + for: 5m + labels: + severity: warning + annotations: + summary: "High response time on {{ $labels.job }}" + description: "95th percentile response time is {{ $value }}s on {{ $labels.job }}." + + # Memory usage alerts + - alert: HighMemoryUsage + expr: process_resident_memory_bytes / 1024 / 1024 > 500 + for: 5m + labels: + severity: warning + annotations: + summary: "High memory usage on {{ $labels.job }}" + description: "Memory usage is {{ $value }}MB on {{ $labels.job }}." + + # Database connection alerts + - alert: DatabaseConnectionHigh + expr: pg_stat_activity_count > 80 + for: 5m + labels: + severity: warning + annotations: + summary: "High database connections" + description: "Database has {{ $value }} active connections." + + - name: bakery_business + rules: + # Training job alerts + - alert: TrainingJobFailed + expr: increase(training_jobs_failed_total[1h]) > 0 + labels: + severity: warning + annotations: + summary: "Training job failed" + description: "{{ $value }} training jobs have failed in the last hour." + + # Prediction accuracy alerts + - alert: LowPredictionAccuracy + expr: prediction_accuracy < 0.7 + for: 15m + labels: + severity: warning + annotations: + summary: "Low prediction accuracy" + description: "Prediction accuracy is {{ $value }} for tenant {{ $labels.tenant_id }}." + + # API rate limit alerts + - alert: APIRateLimitHit + expr: increase(rate_limit_hits_total[5m]) > 10 + for: 5m + labels: + severity: warning + annotations: + summary: "API rate limit hit frequently" + description: "Rate limit has been hit {{ $value }} times in 5 minutes." \ No newline at end of file diff --git a/infrastructure/pgadmin/pgpass b/infrastructure/pgadmin/pgpass new file mode 100644 index 00000000..7f672fcb --- /dev/null +++ b/infrastructure/pgadmin/pgpass @@ -0,0 +1,6 @@ +auth-db:5432:auth_db:auth_user:auth_pass123 +training-db:5432:training_db:training_user:training_pass123 +forecasting-db:5432:forecasting_db:forecasting_user:forecasting_pass123 +data-db:5432:data_db:data_user:data_pass123 +tenant-db:5432:tenant_db:tenant_user:tenant_pass123 +notification-db:5432:notification_db:notification_user:notification_pass123 \ No newline at end of file diff --git a/infrastructure/pgadmin/servers.json b/infrastructure/pgadmin/servers.json new file mode 100644 index 00000000..140abfe9 --- /dev/null +++ b/infrastructure/pgadmin/servers.json @@ -0,0 +1,64 @@ +{ + "Servers": { + "1": { + "Name": "Auth Database", + "Group": "Bakery Services", + "Host": "auth-db", + "Port": 5432, + "MaintenanceDB": "auth_db", + "Username": "auth_user", + "PassFile": "/pgadmin4/pgpass", + "SSLMode": "prefer" + }, + "2": { + "Name": "Training Database", + "Group": "Bakery Services", + "Host": "training-db", + "Port": 5432, + "MaintenanceDB": "training_db", + "Username": "training_user", + "PassFile": "/pgadmin4/pgpass", + "SSLMode": "prefer" + }, + "3": { + "Name": "Forecasting Database", + "Group": "Bakery Services", + "Host": "forecasting-db", + "Port": 5432, + "MaintenanceDB": "forecasting_db", + "Username": "forecasting_user", + "PassFile": "/pgadmin4/pgpass", + "SSLMode": "prefer" + }, + "4": { + "Name": "Data Database", + "Group": "Bakery Services", + "Host": "data-db", + "Port": 5432, + "MaintenanceDB": "data_db", + "Username": "data_user", + "PassFile": "/pgadmin4/pgpass", + "SSLMode": "prefer" + }, + "5": { + "Name": "Tenant Database", + "Group": "Bakery Services", + "Host": "tenant-db", + "Port": 5432, + "MaintenanceDB": "tenant_db", + "Username": "tenant_user", + "PassFile": "/pgadmin4/pgpass", + "SSLMode": "prefer" + }, + "6": { + "Name": "Notification Database", + "Group": "Bakery Services", + "Host": "notification-db", + "Port": 5432, + "MaintenanceDB": "notification_db", + "Username": "notification_user", + "PassFile": "/pgadmin4/pgpass", + "SSLMode": "prefer" + } + } +} \ No newline at end of file diff --git a/infrastructure/postgres/init-scripts/init.sql b/infrastructure/postgres/init-scripts/init.sql new file mode 100644 index 00000000..29456946 --- /dev/null +++ b/infrastructure/postgres/init-scripts/init.sql @@ -0,0 +1,26 @@ +-- Create extensions for all databases +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; +CREATE EXTENSION IF NOT EXISTS "pg_stat_statements"; +CREATE EXTENSION IF NOT EXISTS "pg_trgm"; + +-- Create Spanish collation for proper text sorting +-- This will be used for bakery names, product names, etc. +-- CREATE COLLATION IF NOT EXISTS spanish (provider = icu, locale = 'es-ES'); + +-- Set timezone to Madrid +SET timezone = 'Europe/Madrid'; + +-- Performance tuning for small to medium databases +ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_statements'; +ALTER SYSTEM SET max_connections = 100; +ALTER SYSTEM SET shared_buffers = '256MB'; +ALTER SYSTEM SET effective_cache_size = '1GB'; +ALTER SYSTEM SET maintenance_work_mem = '64MB'; +ALTER SYSTEM SET checkpoint_completion_target = 0.9; +ALTER SYSTEM SET wal_buffers = '16MB'; +ALTER SYSTEM SET default_statistics_target = 100; +ALTER SYSTEM SET random_page_cost = 1.1; +ALTER SYSTEM SET effective_io_concurrency = 200; + +-- Reload configuration +SELECT pg_reload_conf(); \ No newline at end of file diff --git a/infrastructure/rabbitmq/definitions.json b/infrastructure/rabbitmq/definitions.json new file mode 100644 index 00000000..e8e7507f --- /dev/null +++ b/infrastructure/rabbitmq/definitions.json @@ -0,0 +1,94 @@ +{ + "rabbit_version": "3.12.0", + "rabbitmq_version": "3.12.0", + "product_name": "RabbitMQ", + "product_version": "3.12.0", + "users": [ + { + "name": "bakery", + "password_hash": "hash_of_forecast123", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": ["administrator"] + } + ], + "vhosts": [ + { + "name": "/" + } + ], + "permissions": [ + { + "user": "bakery", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" + } + ], + "exchanges": [ + { + "name": "bakery_events", + "vhost": "/", + "type": "topic", + "durable": true, + "auto_delete": false, + "internal": false, + "arguments": {} + } + ], + "queues": [ + { + "name": "training_events", + "vhost": "/", + "durable": true, + "auto_delete": false, + "arguments": { + "x-message-ttl": 86400000 + } + }, + { + "name": "forecasting_events", + "vhost": "/", + "durable": true, + "auto_delete": false, + "arguments": { + "x-message-ttl": 86400000 + } + }, + { + "name": "notification_events", + "vhost": "/", + "durable": true, + "auto_delete": false, + "arguments": { + "x-message-ttl": 86400000 + } + } + ], + "bindings": [ + { + "source": "bakery_events", + "vhost": "/", + "destination": "training_events", + "destination_type": "queue", + "routing_key": "training.*", + "arguments": {} + }, + { + "source": "bakery_events", + "vhost": "/", + "destination": "forecasting_events", + "destination_type": "queue", + "routing_key": "forecasting.*", + "arguments": {} + }, + { + "source": "bakery_events", + "vhost": "/", + "destination": "notification_events", + "destination_type": "queue", + "routing_key": "notification.*", + "arguments": {} + } + ] +} \ No newline at end of file diff --git a/infrastructure/rabbitmq/rabbitmq.conf b/infrastructure/rabbitmq/rabbitmq.conf new file mode 100644 index 00000000..3fc48ad6 --- /dev/null +++ b/infrastructure/rabbitmq/rabbitmq.conf @@ -0,0 +1,26 @@ +# infrastructure/rabbitmq/rabbitmq.conf +# RabbitMQ configuration file + +# Network settings +listeners.tcp.default = 5672 +management.tcp.port = 15672 + +# Memory and disk thresholds +vm_memory_high_watermark.relative = 0.6 +disk_free_limit.relative = 2.0 + +# Default user (will be overridden by environment variables) +default_user = bakery +default_pass = forecast123 +default_vhost = / + +# Management plugin +management.load_definitions = /etc/rabbitmq/definitions.json + +# Logging +log.console = true +log.console.level = info +log.file = false + +# Queue settings +queue_master_locator = min-masters \ No newline at end of file diff --git a/infrastructure/redis/redis.conf b/infrastructure/redis/redis.conf new file mode 100644 index 00000000..2868a157 --- /dev/null +++ b/infrastructure/redis/redis.conf @@ -0,0 +1,51 @@ +# infrastructure/redis/redis.conf +# Redis configuration file + +# Network settings +bind 0.0.0.0 +port 6379 +timeout 300 +tcp-keepalive 300 + +# General settings +daemonize no +supervised no +pidfile /var/run/redis_6379.pid +loglevel notice +logfile "" + +# Persistence settings +save 900 1 +save 300 10 +save 60 10000 +stop-writes-on-bgsave-error yes +rdbcompression yes +rdbchecksum yes +dbfilename dump.rdb +dir ./ + +# Append only file settings +appendonly yes +appendfilename "appendonly.aof" +appendfsync everysec +no-appendfsync-on-rewrite no +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb +aof-load-truncated yes + +# Memory management +maxmemory 512mb +maxmemory-policy allkeys-lru +maxmemory-samples 5 + +# Security +requirepass redis_pass123 + +# Slow log +slowlog-log-slower-than 10000 +slowlog-max-len 128 + +# Client output buffer limits +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 diff --git a/scripts/docker-logs.sh b/scripts/docker-logs.sh new file mode 100755 index 00000000..90235308 --- /dev/null +++ b/scripts/docker-logs.sh @@ -0,0 +1,14 @@ +# scripts/docker-logs.sh +#!/bin/bash + +# View logs for specific service or all services + +SERVICE=${1:-"all"} + +if [ "$SERVICE" = "all" ]; then + echo "๐Ÿ“‹ Showing logs for all services..." + docker-compose logs -f --tail=100 +else + echo "๐Ÿ“‹ Showing logs for $SERVICE..." + docker-compose logs -f --tail=100 $SERVICE +fi \ No newline at end of file diff --git a/scripts/docker-setup.sh b/scripts/docker-setup.sh new file mode 100755 index 00000000..5b5092a7 --- /dev/null +++ b/scripts/docker-setup.sh @@ -0,0 +1,214 @@ +# ================================================================ +# FIXED SETUP SCRIPT +# scripts/docker-setup.sh +# ================================================================ + +#!/bin/bash + +# Fixed setup script with proper error handling + +set -e + +ENVIRONMENT=${1:-development} +PROFILES=${2:-"development,frontend"} + +# Colors for output +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +NC='\033[0m' + +# Logging functions +print_step() { + echo -e "${GREEN}[STEP]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +print_step "Setting up Bakery Forecasting Platform" +echo "Environment: $ENVIRONMENT" +echo "Profiles: $PROFILES" + +# Check if .env file exists +if [ ! -f ".env" ]; then + print_error ".env file not found!" + echo "Please create .env file with the content from the artifact." + echo "Run: cp .env.example .env" + exit 1 +fi + +# Validate critical environment variables +print_step "Validating environment variables..." + +# Source the .env file to check variables +set -a # automatically export all variables +source .env +set +a + +# Check critical variables +critical_vars=( + "IMAGE_TAG" + "AUTH_DB_NAME" + "AUTH_DB_USER" + "AUTH_DB_PASSWORD" + "REDIS_PASSWORD" + "RABBITMQ_USER" + "RABBITMQ_PASSWORD" + "GATEWAY_PORT" + "AUTH_SERVICE_PORT" +) + +missing_vars=() + +for var in "${critical_vars[@]}"; do + if [ -z "${!var}" ]; then + missing_vars+=("$var") + fi +done + +if [ ${#missing_vars[@]} -gt 0 ]; then + print_error "Missing required environment variables:" + printf '%s\n' "${missing_vars[@]}" + exit 1 +fi + +print_step "Environment variables validated successfully" + +# Create necessary directories +print_step "Creating necessary directories..." +mkdir -p infrastructure/{redis,rabbitmq,postgres/init-scripts,monitoring/{prometheus/rules,grafana/{dashboards,datasources}},pgadmin} +mkdir -p backups logs models templates/{email,whatsapp} +mkdir -p shared/{config,auth,database,messaging,monitoring,utils} + +# Create basic monitoring configs if they don't exist +if [ ! -f "infrastructure/monitoring/prometheus/prometheus.yml" ]; then + print_step "Creating basic Prometheus configuration..." + cat > infrastructure/monitoring/prometheus/prometheus.yml << 'EOF' +global: + scrape_interval: 15s + +scrape_configs: + - job_name: 'gateway' + static_configs: + - targets: ['gateway:8000'] + + - job_name: 'auth-service' + static_configs: + - targets: ['auth-service:8000'] + + - job_name: 'training-service' + static_configs: + - targets: ['training-service:8000'] + + - job_name: 'forecasting-service' + static_configs: + - targets: ['forecasting-service:8000'] + + - job_name: 'data-service' + static_configs: + - targets: ['data-service:8000'] + + - job_name: 'tenant-service' + static_configs: + - targets: ['tenant-service:8000'] + + - job_name: 'notification-service' + static_configs: + - targets: ['notification-service:8000'] +EOF +fi + +# Set proper permissions +chmod 644 infrastructure/monitoring/prometheus/prometheus.yml 2>/dev/null || true + +# Stop any existing containers +print_step "Stopping existing containers..." +docker-compose down --remove-orphans 2>/dev/null || true + +# Build and start services based on environment +case $ENVIRONMENT in + "development") + print_step "Starting development environment..." + IFS=',' read -ra PROFILE_ARRAY <<< "$PROFILES" + PROFILE_ARGS="" + for profile in "${PROFILE_ARRAY[@]}"; do + PROFILE_ARGS="$PROFILE_ARGS --profile $profile" + done + + # Build first to catch any build errors + print_step "Building services..." + docker-compose $PROFILE_ARGS build + + # Then start + print_step "Starting services..." + docker-compose $PROFILE_ARGS up -d + ;; + "production") + print_step "Starting production environment..." + docker-compose -f docker-compose.yml -f docker-compose.prod.yml --profile production --profile monitoring up -d --build + ;; + "testing") + print_step "Starting testing environment..." + docker-compose -f docker-compose.yml -f docker-compose.test.yml up -d --build + ;; + *) + print_step "Starting with custom profiles: $PROFILES" + IFS=',' read -ra PROFILE_ARRAY <<< "$PROFILES" + PROFILE_ARGS="" + for profile in "${PROFILE_ARRAY[@]}"; do + PROFILE_ARGS="$PROFILE_ARGS --profile $profile" + done + docker-compose $PROFILE_ARGS build + docker-compose $PROFILE_ARGS up -d + ;; +esac + +# Wait a moment for services to start +print_step "Waiting for services to start..." +sleep 10 + +# Check service status +print_step "Checking service status..." +if command -v curl &> /dev/null; then + # Check if gateway is responding + if curl -f -s "http://localhost:${GATEWAY_PORT}/health" > /dev/null 2>&1; then + echo "โœ… Gateway is responding" + else + echo "โš ๏ธ Gateway is not yet responding (this is normal during first startup)" + fi +else + echo "โš ๏ธ curl not found - skipping health check" +fi + +print_step "Setup completed!" +echo "" +echo "================================================================" +echo -e "${GREEN}SERVICES AVAILABLE${NC}" +echo "================================================================" +echo "- Gateway: http://localhost:${GATEWAY_PORT}" +echo "- API Docs: http://localhost:${GATEWAY_PORT}/docs" +echo "- Dashboard: http://localhost:${DASHBOARD_PORT} (if frontend profile enabled)" +echo "- Grafana: http://localhost:${GRAFANA_PORT} (${GRAFANA_ADMIN_USER}/${GRAFANA_ADMIN_PASSWORD})" +echo "- pgAdmin: http://localhost:${PGADMIN_PORT} (${PGADMIN_EMAIL}/${PGADMIN_PASSWORD})" +echo "- RabbitMQ: http://localhost:${RABBITMQ_MANAGEMENT_PORT} (${RABBITMQ_USER}/${RABBITMQ_PASSWORD})" +echo "- Redis Commander: http://localhost:${REDIS_COMMANDER_PORT} (${REDIS_COMMANDER_USER}/${REDIS_COMMANDER_PASSWORD})" +echo "" +echo "================================================================" +echo -e "${GREEN}NEXT STEPS${NC}" +echo "================================================================" +echo "1. Check service health:" +echo " ./scripts/docker-health-check.sh" +echo "" +echo "2. View logs:" +echo " docker-compose logs -f" +echo "" +echo "3. Check specific service:" +echo " docker-compose logs -f auth-service" +echo "" +echo "If you see any errors, check the logs for more details." diff --git a/scripts/health-check.sh b/scripts/health-check.sh new file mode 100755 index 00000000..b7f90e7f --- /dev/null +++ b/scripts/health-check.sh @@ -0,0 +1,90 @@ +# scripts/docker-health-check.sh +#!/bin/bash + +# Comprehensive health check for all services + +services=( + "bakery-redis:6379" + "bakery-rabbitmq:15672" + "bakery-gateway:8000" + "bakery-auth-service:8000" + "bakery-tenant-service:8000" + "bakery-training-service:8000" + "bakery-forecasting-service:8000" + "bakery-data-service:8000" + "bakery-notification-service:8000" +) + +echo "๐Ÿฅ Checking service health..." + +for service_port in "${services[@]}"; do + service=$(echo $service_port | cut -d: -f1) + port=$(echo $service_port | cut -d: -f2) + + if docker ps --format "table {{.Names}}" | grep -q "^$service$"; then + if [ "$service" = "bakery-redis" ]; then + # Redis health check + if docker exec $service redis-cli -a redis_pass123 ping > /dev/null 2>&1; then + echo "โœ… $service is healthy" + else + echo "โŒ $service is unhealthy" + fi + elif [ "$service" = "bakery-rabbitmq" ]; then + # RabbitMQ health check + if curl -s -u bakery:forecast123 http://localhost:$port/api/health/checks/alarms > /dev/null; then + echo "โœ… $service is healthy" + else + echo "โŒ $service is unhealthy" + fi + else + # HTTP service health check + container_ip=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $service) + if curl -f -s "http://$container_ip:8000/health" > /dev/null; then + echo "โœ… $service is healthy" + else + echo "โŒ $service is unhealthy" + fi + fi + else + echo "โš ๏ธ $service is not running" + fi +done + +echo "" +echo "๐Ÿ” Checking database connections..." + +databases=("auth-db" "training-db" "forecasting-db" "data-db" "tenant-db" "notification-db") + +for db in "${databases[@]}"; do + if docker ps --format "table {{.Names}}" | grep -q "^bakery-$db$"; then + db_name=$(echo $db | sed 's/-/_/g') + user=$(echo $db | sed 's/-db//' | sed 's/-/_/g')_user + + if docker exec bakery-$db pg_isready -U $user -d $db_name > /dev/null 2>&1; then + echo "โœ… bakery-$db is ready" + else + echo "โŒ bakery-$db is not ready" + fi + else + echo "โš ๏ธ bakery-$db is not running" + fi +done + +echo "" +echo "๐Ÿ“Š Service resource usage:" +docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" $(docker ps --format "{{.Names}}" | grep "^bakery-") + +# scripts/docker-logs.sh +#!/bin/bash + +# View logs for specific service or all services + +SERVICE=${1:-"all"} + +if [ "$SERVICE" = "all" ]; then + echo "๐Ÿ“‹ Showing logs for all services..." + docker-compose logs -f --tail=100 +else + echo "๐Ÿ“‹ Showing logs for $SERVICE..." + docker-compose logs -f --tail=100 $SERVICE +fi \ No newline at end of file diff --git a/scripts/restart-service.sh b/scripts/restart-service.sh new file mode 100755 index 00000000..75c5a025 --- /dev/null +++ b/scripts/restart-service.sh @@ -0,0 +1,106 @@ +# ================================================================ +# SERVICE RESTART SCRIPT +# scripts/restart-service.sh +# ================================================================ + +#!/bin/bash + +# Restart individual service script + +set -e + +GREEN='\033[0;32m' +BLUE='\033[0;34m' +RED='\033[0;31m' +NC='\033[0m' + +print_step() { + echo -e "${BLUE}[RESTART]${NC} $1" +} + +print_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +restart_service() { + local service=$1 + + print_step "Restarting $service..." + + # Build the service + docker-compose build $service + + # Restart with zero downtime + docker-compose up -d --no-deps --force-recreate $service + + # Wait a bit for the service to start + sleep 5 + + # Check health + local port + case $service in + "gateway") port=8000 ;; + "auth-service") port=8001 ;; + "training-service") port=8002 ;; + "forecasting-service") port=8003 ;; + "data-service") port=8004 ;; + "tenant-service") port=8005 ;; + "notification-service") port=8006 ;; + *) + print_error "Unknown service: $service" + exit 1 + ;; + esac + + # Health check with timeout + local attempts=0 + local max_attempts=12 # 60 seconds total + + while [ $attempts -lt $max_attempts ]; do + if curl -f -s "http://localhost:$port/health" > /dev/null 2>&1; then + print_success "$service is healthy and ready" + return 0 + fi + + attempts=$((attempts + 1)) + echo "Waiting for $service to be healthy... ($attempts/$max_attempts)" + sleep 5 + done + + print_error "$service failed to become healthy within 60 seconds" + return 1 +} + +# Main function +main() { + if [ $# -eq 0 ]; then + echo "Usage: $0 " + echo "" + echo "Available services:" + echo " gateway" + echo " auth-service" + echo " training-service" + echo " forecasting-service" + echo " data-service" + echo " tenant-service" + echo " notification-service" + echo "" + echo "Example: $0 auth-service" + exit 1 + fi + + local service=$1 + + echo "================================================================" + echo "RESTARTING SERVICE: $service" + echo "================================================================" + + restart_service $service +} + +# Run main function +main "$@" \ No newline at end of file diff --git a/scripts/setup.sh b/scripts/setup.sh index cf1c64ab..889ab8ee 100755 --- a/scripts/setup.sh +++ b/scripts/setup.sh @@ -1,13 +1,15 @@ +# ================================================================ +# UPDATED SETUP SCRIPT +# scripts/setup.sh +# ================================================================ + #!/bin/bash -# scripts/setup.sh -# Intelligent Setup Script - Extract artifacts and create microservices structure +# Bakery Forecasting Platform - Microservices Setup Script +# This script sets up the complete development environment set -e -echo "๐Ÿš€ Setting up Bakery Forecasting Microservices Platform" -echo "========================================================" - # Colors for output RED='\033[0;31m' GREEN='\033[0;32m' @@ -15,336 +17,207 @@ YELLOW='\033[1;33m' BLUE='\033[0;34m' NC='\033[0m' # No Color +# Logging functions print_step() { - echo -e "${BLUE}โžค${NC} $1" + echo -e "${BLUE}[STEP]${NC} $1" } print_success() { - echo -e "${GREEN}โœ“${NC} $1" + echo -e "${GREEN}[SUCCESS]${NC} $1" } print_warning() { - echo -e "${YELLOW}โš ${NC} $1" + echo -e "${YELLOW}[WARNING]${NC} $1" } print_error() { - echo -e "${RED}โœ—${NC} $1" + echo -e "${RED}[ERROR]${NC} $1" +} + +print_header() { + echo "" + echo "================================================================" + echo -e "${BLUE}$1${NC}" + echo "================================================================" } # Check prerequisites -print_step "Checking prerequisites..." - -command -v docker >/dev/null 2>&1 || { - print_error "Docker is required but not installed. Please install Docker first." - exit 1 -} - -command -v docker-compose >/dev/null 2>&1 || { - print_error "Docker Compose is required but not installed. Please install Docker Compose first." - exit 1 -} - -print_success "Prerequisites check passed" - -# Function to extract files from artifact files -extract_artifact_files() { - local artifact_file="$1" - local description="$2" +check_prerequisites() { + print_header "CHECKING PREREQUISITES" - print_step "Processing $description..." - - if [ ! -f "$artifact_file" ]; then - print_warning "Artifact file $artifact_file not found, skipping..." - return + # Check Docker + if ! command -v docker &> /dev/null; then + print_error "Docker is not installed. Please install Docker first." + exit 1 fi + print_success "Docker is installed" - # Read the artifact file and extract individual files - local current_file="" - local current_content="" - local in_file=false + # Check Docker Compose + if ! command -v docker-compose &> /dev/null; then + print_error "Docker Compose is not installed. Please install Docker Compose first." + exit 1 + fi + print_success "Docker Compose is installed" - while IFS= read -r line; do - # Check if line starts with a file path (contains .py, .yml, .md, .sh, etc.) - if [[ "$line" =~ ^#[[:space:]]*(.*\.(py|yml|yaml|md|sh|txt|js|json|html|css|Dockerfile|requirements\.txt))$ ]]; then - # Save previous file if we were processing one - if [ "$in_file" = true ] && [ -n "$current_file" ]; then - # Create directory if it doesn't exist - local dir=$(dirname "$current_file") - mkdir -p "$dir" - - # Write content to file - echo "$current_content" > "$current_file" - print_success "Created: $current_file" - fi - - # Start new file - current_file=$(echo "$line" | sed 's/^#[[:space:]]*//') - current_content="" - in_file=true - - elif [ "$in_file" = true ]; then - # Add line to current file content - if [ -n "$current_content" ]; then - current_content="$current_content\n$line" - else - current_content="$line" - fi + # Check if Docker is running + if ! docker info &> /dev/null; then + print_error "Docker is not running. Please start Docker first." + exit 1 + fi + print_success "Docker is running" + + # Check available ports + local ports=(8000 8001 8002 8003 8004 8005 8006 3000 3001 3002 5432 6379 5672 15672 9090) + local used_ports=() + + for port in "${ports[@]}"; do + if netstat -tuln 2>/dev/null | grep -q ":$port "; then + used_ports+=($port) fi - done < "$artifact_file" + done - # Save the last file - if [ "$in_file" = true ] && [ -n "$current_file" ]; then - local dir=$(dirname "$current_file") - mkdir -p "$dir" - echo -e "$current_content" > "$current_file" - print_success "Created: $current_file" + if [ ${#used_ports[@]} -gt 0 ]; then + print_warning "The following ports are in use: ${used_ports[*]}" + print_warning "You may need to stop other services or change port configurations" + else + print_success "All required ports are available" fi } -# Function to extract Python files with multiple file markers -extract_python_artifact() { - local artifact_file="$1" - local description="$2" +# Create directory structure +create_directory_structure() { + print_header "CREATING DIRECTORY STRUCTURE" - print_step "Processing $description..." + # Core directories + local dirs=( + "shared/config" + "shared/auth" + "shared/database" + "shared/messaging" + "shared/monitoring" + "shared/utils" + "gateway/app/core" + "gateway/app/middleware" + "gateway/app/routes" + "gateway/tests" + ) - if [ ! -f "$artifact_file" ]; then - print_warning "Artifact file $artifact_file not found, skipping..." - return - fi + # Service directories + local services=("auth" "training" "forecasting" "data" "tenant" "notification") + for service in "${services[@]}"; do + dirs+=( + "services/$service/app/core" + "services/$service/app/models" + "services/$service/app/schemas" + "services/$service/app/services" + "services/$service/app/api" + "services/$service/migrations/versions" + "services/$service/tests" + ) + done - # Use Python to parse the multi-file artifact - python3 << EOF -import re -import os - -def extract_files(filename): - with open('$artifact_file', 'r') as f: - content = f.read() + # Additional directories + dirs+=( + "frontend/dashboard/src/components" + "frontend/dashboard/src/pages" + "frontend/dashboard/src/services" + "frontend/dashboard/src/hooks" + "frontend/dashboard/src/utils" + "frontend/marketing/src/components" + "frontend/marketing/src/pages" + "infrastructure/docker" + "infrastructure/kubernetes/base" + "infrastructure/terraform/modules" + "deployment/nginx" + "tests/integration" + "tests/e2e" + "tests/performance" + "docs/architecture" + "docs/api" + "docs/deployment" + "scripts" + "logs" + "models" + "templates/email" + "templates/whatsapp" + ) - # Split by file markers (lines starting with # and containing file paths) - files = {} - current_file = None - current_content = [] - - for line in content.split('\n'): - # Check for file path markers - if re.match(r'^#\s+\S+\.(py|yml|yaml|txt|sh|json|html|css|js|Dockerfile)', line): - # Save previous file - if current_file and current_content: - files[current_file] = '\n'.join(current_content) - - # Start new file - current_file = re.sub(r'^#\s+', '', line) - current_content = [] - elif current_file: - current_content.append(line) - - # Save last file - if current_file and current_content: - files[current_file] = '\n'.join(current_content) - - # Write files - for filepath, file_content in files.items(): - # Clean up the content (remove leading/trailing quotes if present) - file_content = file_content.strip() - if file_content.startswith('"""') and file_content.endswith('"""'): - file_content = file_content[3:-3] - elif file_content.startswith("'''") and file_content.endswith("'''"): - file_content = file_content[3:-3] - - # Create directory - os.makedirs(os.path.dirname(filepath) if os.path.dirname(filepath) else '.', exist_ok=True) - - # Write file - with open(filepath, 'w') as f: - f.write(file_content) - - print(f"โœ“ Created: {filepath}") - -extract_files('$artifact_file') -EOF + for dir in "${dirs[@]}"; do + if [ ! -d "$dir" ]; then + mkdir -p "$dir" + print_success "Created directory: $dir" + fi + done } -# Create base project structure first -print_step "Creating base project structure..." - -# Create main directories -mkdir -p {gateway,services/{auth,training,forecasting,data,tenant,notification},shared,frontend/{dashboard,marketing},infrastructure,deployment,tests,docs,scripts} - -# Create subdirectories for each service -for service in auth training forecasting data tenant notification; do - mkdir -p services/$service/{app/{core,models,schemas,services,api,ml},migrations/versions,tests} - touch services/$service/app/__init__.py - touch services/$service/app/core/__init__.py - touch services/$service/app/models/__init__.py - touch services/$service/app/schemas/__init__.py - touch services/$service/app/services/__init__.py - touch services/$service/app/api/__init__.py - if [ "$service" = "training" ]; then - touch services/$service/app/ml/__init__.py +# Create shared base configuration +create_shared_config() { + print_step "Creating shared configuration..." + + if [ ! -f "shared/config/__init__.py" ]; then + touch "shared/config/__init__.py" fi -done - -# Create gateway structure -mkdir -p gateway/{app/{core,middleware,routes},tests} -touch gateway/app/__init__.py -touch gateway/app/core/__init__.py -touch gateway/app/middleware/__init__.py -touch gateway/app/routes/__init__.py - -# Create shared library structure -mkdir -p shared/{auth,database,messaging,monitoring,utils} -for lib in auth database messaging monitoring utils; do - touch shared/$lib/__init__.py -done - -# Create infrastructure directories -mkdir -p infrastructure/{docker,kubernetes,terraform,monitoring}/{base,dev,staging,production} -mkdir -p infrastructure/monitoring/{prometheus,grafana,logstash} - -print_success "Base project structure created" - -# Extract files from artifacts -print_step "Extracting files from artifacts..." - -# Process shared libraries -if [ -f "shared_libraries.py" ]; then - extract_python_artifact "shared_libraries.py" "Shared Libraries" -fi - -# Process gateway service -if [ -f "gateway_service.py" ]; then - extract_python_artifact "gateway_service.py" "Gateway Service" -fi - -# Process auth service -if [ -f "auth_service.py" ]; then - extract_python_artifact "auth_service.py" "Authentication Service" -fi - -# Process training service -if [ -f "training_service.py" ]; then - extract_python_artifact "training_service.py" "Training Service" -fi - - -print_step "Creating missing service files..." - -# Create remaining service files that might not be in artifacts -for service in forecasting data tenant notification; do - service_dir="services/$service" - # Create main.py if it doesn't exist - if [ ! -f "$service_dir/app/main.py" ]; then - cat > "$service_dir/app/main.py" << EOF + if [ ! -f "shared/config/base.py" ]; then + cat > "shared/config/base.py" << 'EOF' """ -$(echo $service | sed 's/.*/\L&/; s/[a-z]*/\u&/g') Service +Base configuration for all microservices +This file should contain the BaseServiceSettings class """ -import logging -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware - -from app.core.config import settings -from app.core.database import database_manager -from shared.monitoring.logging import setup_logging -from shared.monitoring.metrics import MetricsCollector - -# Setup logging -setup_logging("$service-service", "INFO") -logger = logging.getLogger(__name__) - -# Create FastAPI app -app = FastAPI( - title="$(echo $service | sed 's/.*/\L&/; s/[a-z]*/\u&/g') Service", - description="$(echo $service | sed 's/.*/\L&/; s/[a-z]*/\u&/g') service for bakery forecasting", - version="1.0.0" -) - -# Initialize metrics collector -metrics_collector = MetricsCollector("$service-service") - -# CORS middleware -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -@app.on_event("startup") -async def startup_event(): - """Application startup""" - logger.info("Starting $(echo $service | sed 's/.*/\L&/; s/[a-z]*/\u&/g') Service") - - # Create database tables - await database_manager.create_tables() - - # Start metrics server - metrics_collector.start_metrics_server(8080) - - logger.info("$(echo $service | sed 's/.*/\L&/; s/[a-z]*/\u&/g') Service started successfully") - -@app.get("/health") -async def health_check(): - """Health check endpoint""" - return { - "status": "healthy", - "service": "$service-service", - "version": "1.0.0" - } - -if __name__ == "__main__": - import uvicorn - uvicorn.run(app, host="0.0.0.0", port=8000) +# Import the base configuration from the artifact +# The complete base.py content should be copied here from the artifact EOF - print_success "Created: $service_dir/app/main.py" + print_success "Created shared base configuration template" + print_warning "Please copy the BaseServiceSettings class from the artifact to shared/config/base.py" fi +} + +# Create service configurations +create_service_configs() { + print_header "CREATING SERVICE CONFIGURATIONS" - # Create config.py if it doesn't exist - if [ ! -f "$service_dir/app/core/config.py" ]; then - cat > "$service_dir/app/core/config.py" << EOF + local services=("auth" "training" "forecasting" "data" "tenant" "notification") + + for service in "${services[@]}"; do + print_step "Creating configuration for $service service..." + + local service_dir="services/$service" + local config_file="$service_dir/app/core/config.py" + + if [ ! -f "$config_file" ]; then + cat > "$config_file" << EOF """ -$(echo $service | sed 's/.*/\L&/; s/[a-z]*/\u&/g') service configuration +$service service configuration """ +from shared.config.base import BaseServiceSettings import os -from pydantic_settings import BaseSettings -class Settings(BaseSettings): - """Application settings""" +class ${service^}Settings(BaseServiceSettings): + """$service service specific settings""" - # Basic settings - APP_NAME: str = "$(echo $service | sed 's/.*/\L&/; s/[a-z]*/\u&/g') Service" - VERSION: str = "1.0.0" - DEBUG: bool = os.getenv("DEBUG", "False").lower() == "true" - LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO") + # Service Identity + APP_NAME: str = "${service^} Service" + SERVICE_NAME: str = "$service-service" + DESCRIPTION: str = "$service microservice for bakery platform" - # Database settings - DATABASE_URL: str = os.getenv("DATABASE_URL", "postgresql+asyncpg://${service}_user:${service}_pass123@${service}-db:5432/${service}_db") + # Database Configuration + DATABASE_URL: str = os.getenv("${service^^}_DATABASE_URL", + "postgresql+asyncpg://${service}_user:${service}_pass123@${service}-db:5432/${service}_db") - # Redis settings - REDIS_URL: str = os.getenv("REDIS_URL", "redis://redis:6379/0") - - # RabbitMQ settings - RABBITMQ_URL: str = os.getenv("RABBITMQ_URL", "amqp://bakery:forecast123@rabbitmq:5672/") - - # Service URLs - AUTH_SERVICE_URL: str = os.getenv("AUTH_SERVICE_URL", "http://auth-service:8000") - - class Config: - env_file = ".env" + # Redis Database (each service gets its own DB number) + REDIS_DB: int = $(( $(echo "${services[@]}" | tr ' ' '\n' | grep -n "^$service$" | cut -d: -f1) - 1 )) -settings = Settings() +settings = ${service^}Settings() EOF - print_success "Created: $service_dir/app/core/config.py" - fi - - # Create database.py if it doesn't exist - if [ ! -f "$service_dir/app/core/database.py" ]; then - cat > "$service_dir/app/core/database.py" << EOF + print_success "Created: $config_file" + fi + + # Create database configuration + local db_config_file="$service_dir/app/core/database.py" + if [ ! -f "$db_config_file" ]; then + cat > "$db_config_file" << EOF """ Database configuration for $service service """ @@ -358,32 +231,90 @@ database_manager = DatabaseManager(settings.DATABASE_URL) # Alias for convenience get_db = database_manager.get_db EOF - print_success "Created: $service_dir/app/core/database.py" - fi - - # Create requirements.txt if it doesn't exist - if [ ! -f "$service_dir/requirements.txt" ]; then - cat > "$service_dir/requirements.txt" << 'EOF' + print_success "Created: $db_config_file" + fi + + # Create auth configuration + local auth_config_file="$service_dir/app/core/auth.py" + if [ ! -f "$auth_config_file" ]; then + cat > "$auth_config_file" << EOF +""" +Authentication configuration for $service service +""" + +from shared.auth.jwt_handler import JWTHandler +from shared.auth.decorators import require_auth, require_role +from app.core.config import settings + +# Initialize JWT handler +jwt_handler = JWTHandler( + secret_key=settings.JWT_SECRET_KEY, + algorithm=settings.JWT_ALGORITHM, + access_token_expire_minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES +) + +# Export commonly used functions +verify_token = jwt_handler.verify_token +create_access_token = jwt_handler.create_access_token +get_current_user = jwt_handler.get_current_user + +# Export decorators +__all__ = ['verify_token', 'create_access_token', 'get_current_user', 'require_auth', 'require_role'] +EOF + print_success "Created: $auth_config_file" + fi + + # Create requirements.txt + local requirements_file="$service_dir/requirements.txt" + if [ ! -f "$requirements_file" ]; then + cat > "$requirements_file" << 'EOF' +# Core FastAPI dependencies fastapi==0.104.1 uvicorn[standard]==0.24.0 +pydantic==2.5.0 +pydantic-settings==2.1.0 + +# Database sqlalchemy==2.0.23 asyncpg==0.29.0 alembic==1.12.1 -pydantic==2.5.0 -pydantic-settings==2.1.0 + +# HTTP client httpx==0.25.2 + +# Caching and messaging redis==5.0.1 aio-pika==9.3.0 + +# Monitoring and logging prometheus-client==0.17.1 python-json-logger==2.0.4 + +# Utilities pytz==2023.3 +python-multipart==0.0.6 + +# Security +python-jose[cryptography]==3.3.0 +passlib[bcrypt]==1.7.4 +python-dateutil==2.8.2 + +# ML dependencies (for training and forecasting services) +pandas==2.1.3 +numpy==1.25.2 +scikit-learn==1.3.2 +prophet==1.1.4 + +# Spanish localization +babel==2.13.1 EOF - print_success "Created: $service_dir/requirements.txt" - fi - - # Create Dockerfile if it doesn't exist - if [ ! -f "$service_dir/Dockerfile" ]; then - cat > "$service_dir/Dockerfile" << 'EOF' + print_success "Created: $requirements_file" + fi + + # Create Dockerfile + local dockerfile="$service_dir/Dockerfile" + if [ ! -f "$dockerfile" ]; then + cat > "$dockerfile" << 'EOF' FROM python:3.11-slim WORKDIR /app @@ -391,7 +322,9 @@ WORKDIR /app # Install system dependencies RUN apt-get update && apt-get install -y \ gcc \ + g++ \ curl \ + libpq-dev \ && rm -rf /var/lib/apt/lists/* # Copy requirements @@ -400,12 +333,19 @@ COPY requirements.txt . # Install Python dependencies RUN pip install --no-cache-dir -r requirements.txt +# Copy shared libraries first +COPY shared/ /app/shared/ + # Copy application code COPY . . # Add shared libraries to Python path ENV PYTHONPATH="/app:/app/shared:$PYTHONPATH" +# Create non-root user +RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app +USER appuser + # Expose port EXPOSE 8000 @@ -416,464 +356,629 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ # Run application CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] EOF - print_success "Created: $service_dir/Dockerfile" + print_success "Created: $dockerfile" + fi + done +} + +# Create gateway configuration +create_gateway_config() { + print_step "Creating gateway configuration..." + + if [ ! -f "gateway/app/core/config.py" ]; then + cat > "gateway/app/core/config.py" << 'EOF' +""" +Gateway service configuration +""" + +from shared.config.base import BaseServiceSettings +import os +from typing import Dict, List + +class GatewaySettings(BaseServiceSettings): + """Gateway-specific settings""" + + # Service Identity + APP_NAME: str = "Bakery Forecasting Gateway" + SERVICE_NAME: str = "gateway" + DESCRIPTION: str = "API Gateway for Bakery Forecasting Platform" + + # Gateway-specific Redis database + REDIS_DB: int = 6 + + # Gateway doesn't need a database + DATABASE_URL: str = "" + + # Service Discovery + CONSUL_URL: str = os.getenv("CONSUL_URL", "http://consul:8500") + ENABLE_SERVICE_DISCOVERY: bool = os.getenv("ENABLE_SERVICE_DISCOVERY", "false").lower() == "true" + +settings = GatewaySettings() +EOF + print_success "Created gateway configuration" + fi +} + +# Create environment file +create_environment_file() { + print_header "CREATING ENVIRONMENT CONFIGURATION" + + if [ ! -f ".env" ]; then + print_step "Creating .env file from template..." + + # Copy the environment template from the artifact + cat > ".env" << 'EOF' +# Copy the complete .env content from the artifact here +# This should include all the environment variables defined in the artifact +EOF + print_success "Created .env file" + print_warning "Please update the .env file with your actual configuration values" + print_warning "Especially change JWT_SECRET_KEY, database passwords, and API keys" + else + print_warning ".env file already exists - skipping creation" + fi +} + +# Create Docker Compose configuration +create_docker_compose() { + print_header "CREATING DOCKER COMPOSE CONFIGURATION" + + if [ ! -f "docker-compose.yml" ]; then + print_step "Creating docker-compose.yml..." + + cat > "docker-compose.yml" << 'EOF' +version: '3.8' + +services: + # ============================================================ + # INFRASTRUCTURE SERVICES + # ============================================================ + + # PostgreSQL Databases (one per service) + auth-db: + image: postgres:15-alpine + environment: + POSTGRES_DB: auth_db + POSTGRES_USER: auth_user + POSTGRES_PASSWORD: auth_pass123 + volumes: + - auth_db_data:/var/lib/postgresql/data + networks: + - bakery-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U auth_user -d auth_db"] + interval: 10s + timeout: 5s + retries: 5 + + training-db: + image: postgres:15-alpine + environment: + POSTGRES_DB: training_db + POSTGRES_USER: training_user + POSTGRES_PASSWORD: training_pass123 + volumes: + - training_db_data:/var/lib/postgresql/data + networks: + - bakery-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U training_user -d training_db"] + interval: 10s + timeout: 5s + retries: 5 + + forecasting-db: + image: postgres:15-alpine + environment: + POSTGRES_DB: forecasting_db + POSTGRES_USER: forecasting_user + POSTGRES_PASSWORD: forecasting_pass123 + volumes: + - forecasting_db_data:/var/lib/postgresql/data + networks: + - bakery-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U forecasting_user -d forecasting_db"] + interval: 10s + timeout: 5s + retries: 5 + + data-db: + image: postgres:15-alpine + environment: + POSTGRES_DB: data_db + POSTGRES_USER: data_user + POSTGRES_PASSWORD: data_pass123 + volumes: + - data_db_data:/var/lib/postgresql/data + networks: + - bakery-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U data_user -d data_db"] + interval: 10s + timeout: 5s + retries: 5 + + tenant-db: + image: postgres:15-alpine + environment: + POSTGRES_DB: tenant_db + POSTGRES_USER: tenant_user + POSTGRES_PASSWORD: tenant_pass123 + volumes: + - tenant_db_data:/var/lib/postgresql/data + networks: + - bakery-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U tenant_user -d tenant_db"] + interval: 10s + timeout: 5s + retries: 5 + + notification-db: + image: postgres:15-alpine + environment: + POSTGRES_DB: notification_db + POSTGRES_USER: notification_user + POSTGRES_PASSWORD: notification_pass123 + volumes: + - notification_db_data:/var/lib/postgresql/data + networks: + - bakery-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U notification_user -d notification_db"] + interval: 10s + timeout: 5s + retries: 5 + + # Redis Cache + redis: + image: redis:7-alpine + command: redis-server --appendonly yes --requirepass redis_pass123 + volumes: + - redis_data:/data + networks: + - bakery-network + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + + # RabbitMQ Message Broker + rabbitmq: + image: rabbitmq:3-management-alpine + environment: + RABBITMQ_DEFAULT_USER: bakery + RABBITMQ_DEFAULT_PASS: forecast123 + ports: + - "15672:15672" # Management UI + volumes: + - rabbitmq_data:/var/lib/rabbitmq + networks: + - bakery-network + healthcheck: + test: ["CMD", "rabbitmq-diagnostics", "ping"] + interval: 30s + timeout: 10s + retries: 5 + + # ============================================================ + # MICROSERVICES + # ============================================================ + + # API Gateway + gateway: + build: ./gateway + ports: + - "8000:8000" + env_file: .env + depends_on: + - redis + - rabbitmq + networks: + - bakery-network + volumes: + - ./logs:/app/logs + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + + # Auth Service + auth-service: + build: ./services/auth + ports: + - "8001:8000" + env_file: .env + depends_on: + - auth-db + - redis + - rabbitmq + networks: + - bakery-network + volumes: + - ./logs:/app/logs + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + + # Training Service + training-service: + build: ./services/training + ports: + - "8002:8000" + env_file: .env + depends_on: + - training-db + - redis + - rabbitmq + networks: + - bakery-network + volumes: + - ./logs:/app/logs + - ./models:/app/models + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + + # Forecasting Service + forecasting-service: + build: ./services/forecasting + ports: + - "8003:8000" + env_file: .env + depends_on: + - forecasting-db + - redis + - rabbitmq + networks: + - bakery-network + volumes: + - ./logs:/app/logs + - ./models:/app/models + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + + # Data Service + data-service: + build: ./services/data + ports: + - "8004:8000" + env_file: .env + depends_on: + - data-db + - redis + - rabbitmq + networks: + - bakery-network + volumes: + - ./logs:/app/logs + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + + # Tenant Service + tenant-service: + build: ./services/tenant + ports: + - "8005:8000" + env_file: .env + depends_on: + - tenant-db + - redis + - rabbitmq + networks: + - bakery-network + volumes: + - ./logs:/app/logs + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + + # Notification Service + notification-service: + build: ./services/notification + ports: + - "8006:8000" + env_file: .env + depends_on: + - notification-db + - redis + - rabbitmq + networks: + - bakery-network + volumes: + - ./logs:/app/logs + - ./templates:/app/templates + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + + # ============================================================ + # MONITORING STACK + # ============================================================ + + # Prometheus + prometheus: + image: prom/prometheus:latest + ports: + - "9090:9090" + volumes: + - ./infrastructure/monitoring/prometheus.yml:/etc/prometheus/prometheus.yml + - prometheus_data:/prometheus + networks: + - bakery-network + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + + # Grafana + grafana: + image: grafana/grafana:latest + ports: + - "3002:3000" + environment: + GF_SECURITY_ADMIN_PASSWORD: admin123 + volumes: + - grafana_data:/var/lib/grafana + - ./infrastructure/monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards + - ./infrastructure/monitoring/grafana/datasources:/etc/grafana/provisioning/datasources + networks: + - bakery-network + +networks: + bakery-network: + driver: bridge + +volumes: + # Database volumes + auth_db_data: + training_db_data: + forecasting_db_data: + data_db_data: + tenant_db_data: + notification_db_data: + + # Cache and messaging volumes + redis_data: + rabbitmq_data: + + # Monitoring volumes + prometheus_data: + grafana_data: +EOF + print_success "Created docker-compose.yml" + fi +} + +# Create utility scripts +create_utility_scripts() { + print_header "CREATING UTILITY SCRIPTS" + + # Test script + if [ ! -f "scripts/test.sh" ]; then + cat > "scripts/test.sh" << 'EOF' +#!/bin/bash + +# Run tests for all services + +set -e + +echo "Running tests for all microservices..." + +services=("auth" "training" "forecasting" "data" "tenant" "notification") + +for service in "${services[@]}"; do + echo "Testing $service service..." + docker-compose exec ${service}-service python -m pytest tests/ -v +done + +echo "Running integration tests..." +docker-compose exec gateway python -m pytest ../tests/integration/ -v + +echo "All tests completed!" +EOF + chmod +x "scripts/test.sh" + print_success "Created test script" + fi + + # Deployment script + if [ ! -f "scripts/deploy.sh" ]; then + cat > "scripts/deploy.sh" << 'EOF' +#!/bin/bash + +# Deploy services to production + +set -e + +echo "Building and deploying services..." + +# Build all services +docker-compose build + +# Deploy with zero downtime +docker-compose up -d --no-deps --force-recreate + +# Wait for health checks +echo "Waiting for services to be healthy..." +sleep 30 + +# Verify deployment +./scripts/health-check.sh + +echo "Deployment completed successfully!" +EOF + chmod +x "scripts/deploy.sh" + print_success "Created deployment script" + fi + + # Health check script + if [ ! -f "scripts/health-check.sh" ]; then + cat > "scripts/health-check.sh" << 'EOF' +#!/bin/bash + +# Check health of all services + +services=( + "gateway:8000" + "auth-service:8001" + "training-service:8002" + "forecasting-service:8003" + "data-service:8004" + "tenant-service:8005" + "notification-service:8006" +) + +echo "Checking service health..." + +all_healthy=true + +for service_port in "${services[@]}"; do + service=$(echo $service_port | cut -d: -f1) + port=$(echo $service_port | cut -d: -f2) + + if curl -f -s "http://localhost:$port/health" > /dev/null; then + echo "โœ… $service is healthy" + else + echo "โŒ $service is unhealthy" + all_healthy=false fi done -# Create .env file -print_step "Creating environment configuration..." - -if [ ! -f ".env" ]; then - cat > .env << 'EOF' -# Environment -ENVIRONMENT=development -DEBUG=true -LOG_LEVEL=INFO - -# Database URLs -AUTH_DB_URL=postgresql+asyncpg://auth_user:auth_pass123@auth-db:5432/auth_db -TRAINING_DB_URL=postgresql+asyncpg://training_user:training_pass123@training-db:5432/training_db -FORECASTING_DB_URL=postgresql+asyncpg://forecasting_user:forecasting_pass123@forecasting-db:5432/forecasting_db -DATA_DB_URL=postgresql+asyncpg://data_user:data_pass123@data-db:5432/data_db -TENANT_DB_URL=postgresql+asyncpg://tenant_user:tenant_pass123@tenant-db:5432/tenant_db -NOTIFICATION_DB_URL=postgresql+asyncpg://notification_user:notification_pass123@notification-db:5432/notification_db - -# Redis -REDIS_URL=redis://redis:6379 - -# RabbitMQ -RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/ - -# JWT -JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production-please -JWT_ALGORITHM=HS256 -JWT_ACCESS_TOKEN_EXPIRE_MINUTES=30 -JWT_REFRESH_TOKEN_EXPIRE_DAYS=7 - -# External APIs -AEMET_API_KEY=your-aemet-api-key-here -MADRID_OPENDATA_API_KEY=your-madrid-opendata-key-here - -# CORS -CORS_ORIGINS=http://localhost:3000,http://localhost:3001 - -# Email -SMTP_HOST=smtp.gmail.com -SMTP_PORT=587 -SMTP_USER=your-email@gmail.com -SMTP_PASSWORD=your-email-password - -# WhatsApp -WHATSAPP_API_KEY=your-whatsapp-api-key-here - -# Monitoring -PROMETHEUS_URL=http://prometheus:9090 -GRAFANA_URL=http://grafana:3000 -EOF - print_success "Environment configuration created" +if $all_healthy; then + echo "๐ŸŽ‰ All services are healthy!" + exit 0 +else + echo "โš ๏ธ Some services are unhealthy" + exit 1 fi +EOF + chmod +x "scripts/health-check.sh" + print_success "Created health check script" + fi +} # Create monitoring configuration -print_step "Creating monitoring configuration..." - -if [ ! -f "infrastructure/monitoring/prometheus/prometheus.yml" ]; then - cat > infrastructure/monitoring/prometheus/prometheus.yml << 'EOF' +create_monitoring_config() { + print_step "Creating monitoring configuration..." + + # Prometheus configuration + if [ ! -f "infrastructure/monitoring/prometheus.yml" ]; then + mkdir -p infrastructure/monitoring + cat > "infrastructure/monitoring/prometheus.yml" << 'EOF' global: scrape_interval: 15s scrape_configs: - job_name: 'gateway' static_configs: - - targets: ['gateway:8080'] - + - targets: ['gateway:8000'] + - job_name: 'auth-service' static_configs: - - targets: ['auth-service:8080'] - + - targets: ['auth-service:8000'] + - job_name: 'training-service' static_configs: - - targets: ['training-service:8080'] - + - targets: ['training-service:8000'] + - job_name: 'forecasting-service' static_configs: - - targets: ['forecasting-service:8080'] - + - targets: ['forecasting-service:8000'] + - job_name: 'data-service' static_configs: - - targets: ['data-service:8080'] - + - targets: ['data-service:8000'] + - job_name: 'tenant-service' static_configs: - - targets: ['tenant-service:8080'] - + - targets: ['tenant-service:8000'] + - job_name: 'notification-service' static_configs: - - targets: ['notification-service:8080'] + - targets: ['notification-service:8000'] EOF - print_success "Prometheus configuration created" -fi - -# Create utility scripts -print_step "Creating utility scripts..." - -# Create test script -cat > scripts/test.sh << 'EOF' -#!/bin/bash - -echo "๐Ÿงช Running tests for all services..." - -# Run tests for each service -for service in auth training forecasting data tenant notification; do - echo "Testing $service service..." - if docker-compose ps | grep -q "${service}-service.*Up"; then - docker-compose exec -T ${service}-service python -m pytest tests/ -v || echo "Tests failed for $service" - else - echo "Service $service is not running, skipping tests" + print_success "Created Prometheus configuration" fi -done +} -echo "โœ… Test run completed" +# Final setup steps +final_setup() { + print_header "FINAL SETUP STEPS" + + # Make scripts executable + chmod +x scripts/*.sh + + # Create logs directory + mkdir -p logs models + + print_success "Setup completed successfully!" + + echo "" + echo "================================================================" + echo -e "${GREEN}NEXT STEPS${NC}" + echo "================================================================" + echo "1. Update .env file with your configuration:" + echo " - Change JWT_SECRET_KEY" + echo " - Add AEMET and Madrid Open Data API keys" + echo " - Configure email settings" + echo "" + echo "2. Copy the configuration classes from artifacts:" + echo " - Copy BaseServiceSettings to shared/config/base.py" + echo " - Copy service-specific settings to respective config files" + echo "" + echo "3. Start the services:" + echo " docker-compose up -d" + echo "" + echo "4. Check service health:" + echo " ./scripts/health-check.sh" + echo "" + echo "5. Access the services:" + echo " - Gateway: http://localhost:8000" + echo " - API Docs: http://localhost:8000/docs" + echo " - Grafana: http://localhost:3002 (admin/admin123)" + echo " - RabbitMQ: http://localhost:15672 (bakery/forecast123)" + echo "" + echo "================================================================" +} + +# Main execution +main() { + print_header "BAKERY FORECASTING PLATFORM - MICROSERVICES SETUP" + + check_prerequisites + create_directory_structure + create_shared_config + create_service_configs + create_gateway_config + create_environment_file + create_docker_compose + create_utility_scripts + create_monitoring_config + final_setup +} + +# Run main function +main "$@" EOF - -# Create deploy script -cat > scripts/deploy.sh << 'EOF' -#!/bin/bash - -echo "๐Ÿš€ Deploying Bakery Forecasting Platform..." - -# Build and deploy all services -docker-compose build -docker-compose up -d - -echo "Waiting for services to be healthy..." -sleep 30 - -# Check service health -echo "Checking service health..." -curl -f http://localhost:8000/health || echo "Gateway health check failed" - -echo "โœ… Deployment completed" -echo "Gateway: http://localhost:8000" -echo "API Docs: http://localhost:8000/docs" -EOF - -# Make scripts executable -chmod +x scripts/*.sh - -print_success "Utility scripts created" - -# Create .gitignore -print_step "Creating .gitignore..." - -if [ ! -f ".gitignore" ]; then - cat > .gitignore << 'EOF' -# Environment -.env -.env.local -.env.development.local -.env.test.local -.env.production.local - -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST -.pytest_cache/ -.coverage -.coverage.* -htmlcov/ -.tox/ -.nox/ -.hypothesis/ -.mypy_cache/ -.dmyp.json -dmyp.json -.pyre/ - -# Virtual Environment -venv/ -ENV/ -env/ -.venv - -# Node -node_modules/ -npm-debug.log* -yarn-debug.log* -yarn-error.log* -.pnpm-debug.log* -.npm -.eslintcache -.next -out/ -build/ -dist/ - -# IDE -.vscode/ -.idea/ -*.swp -*.swo -*~ -.DS_Store - -# Logs -logs/ -*.log - -# Database -*.db -*.sqlite -*.sqlite3 - -# ML Models -*.pkl -*.joblib -*.h5 -models/ - -# Data -data/external/ -data/processed/ -*.csv -*.xlsx - -# Docker -.docker/ - -# Infrastructure -*.tfstate -*.tfstate.backup -.terraform/ -.terraform.lock.hcl - -# Kubernetes -kubeconfig -*.yaml.bak - -# Monitoring -prometheus_data/ -grafana_data/ -elasticsearch_data/ - -# Artifacts (from Claude) -*_service.py -*_libraries.py -*.md -setup_scripts.sh -EOF - print_success ".gitignore created" -fi - -# Create README -print_step "Creating documentation..." - -if [ ! -f "README.md" ]; then - cat > README.md << 'EOF' -# Bakery Forecasting Platform - Microservices - -## Overview -AI-powered demand forecasting platform for bakeries in Madrid, Spain using microservices architecture. - -## Architecture -- **API Gateway**: Central entry point for all client requests -- **Auth Service**: User authentication and authorization -- **Training Service**: ML model training for demand forecasting -- **Forecasting Service**: Generate predictions using trained models -- **Data Service**: External data integration (weather, traffic, events) -- **Tenant Service**: Multi-tenant management -- **Notification Service**: Email and WhatsApp notifications - -## Quick Start - -### Prerequisites -- Docker and Docker Compose -- Python 3.11+ -- Node.js 18+ - -### Setup -```bash -# Run setup script (this script!) -./scripts/setup.sh - -# Start services -docker-compose up -d - -# Check service health -curl http://localhost:8000/health -``` - -### Services -- **Gateway**: http://localhost:8000 -- **API Docs**: http://localhost:8000/docs -- **Grafana**: http://localhost:3002 -- **Prometheus**: http://localhost:9090 -- **RabbitMQ Management**: http://localhost:15672 - -### Development - -#### Running Tests -```bash -./scripts/test.sh -``` - -#### Building Services -```bash -docker-compose build -``` - -#### Viewing Logs -```bash -# All services -docker-compose logs -f - -# Specific service -docker-compose logs -f auth-service -``` - -#### Service URLs (Development) -- Gateway: http://localhost:8000 -- Auth Service: http://localhost:8001 -- Training Service: http://localhost:8002 -- Forecasting Service: http://localhost:8003 -- Data Service: http://localhost:8004 -- Tenant Service: http://localhost:8005 -- Notification Service: http://localhost:8006 - -## Environment Variables - -Copy `.env.example` to `.env` and update the following: - -```bash -# External API Keys -AEMET_API_KEY=your-aemet-api-key -MADRID_OPENDATA_API_KEY=your-madrid-opendata-key - -# Email Configuration -SMTP_USER=your-email@gmail.com -SMTP_PASSWORD=your-email-password - -# WhatsApp API -WHATSAPP_API_KEY=your-whatsapp-api-key - -# JWT Secret (change in production!) -JWT_SECRET_KEY=your-super-secret-jwt-key -``` - -## Troubleshooting - -### Services won't start -```bash -# Check if ports are available -docker-compose ps -netstat -tulpn | grep :8000 - -# Restart services -docker-compose down -docker-compose up -d -``` - -### Database connection issues -```bash -# Check database containers -docker-compose logs auth-db -docker-compose logs training-db - -# Reset databases -docker-compose down -v -docker-compose up -d -``` - -### Service communication issues -```bash -# Check service health -curl http://localhost:8000/health -curl http://localhost:8001/health -curl http://localhost:8002/health - -# Check RabbitMQ -open http://localhost:15672 -# User: bakery, Password: forecast123 -``` - -## Next Steps - -1. **Configure External APIs**: Add your AEMET and Madrid Open Data API keys -2. **Test Authentication**: Register a user and test login -3. **Upload Sales Data**: Import historical sales data -4. **Train Models**: Start your first training job -5. **Generate Forecasts**: Create demand predictions - -## License -MIT License -EOF - print_success "Documentation created" -fi - -# Final steps -print_step "Final setup steps..." - -# Copy shared libraries to each service (for Docker builds) -for service in auth training forecasting data tenant notification; do - if [ -d "shared" ]; then - cp -r shared services/$service/ 2>/dev/null || true + chmod +x "scripts/setup.sh" + print_success "Created setup script" fi -done - -# Copy shared libraries to gateway -if [ -d "shared" ]; then - cp -r shared gateway/ 2>/dev/null || true -fi - -# Initialize Git repository if not exists -if [ ! -d ".git" ]; then - git init - git add . - git commit -m "Initial microservices setup from artifacts" - print_success "Git repository initialized" -fi - -echo -echo "๐ŸŽ‰ Setup completed successfully!" -echo "===============================================" -echo -echo "Next steps:" -echo "1. Update .env with your actual API keys" -echo "2. Start services: docker-compose up -d" -echo "3. Check health: curl http://localhost:8000/health" -echo "4. View API docs: http://localhost:8000/docs" -echo "5. Monitor services: http://localhost:3002 (Grafana)" -echo -echo "Services will be available at:" -echo "- Gateway: http://localhost:8000" -echo "- Auth Service: http://localhost:8001" -echo "- Training Service: http://localhost:8002" -echo "- Monitoring: http://localhost:3002" -echo "- RabbitMQ: http://localhost:15672" -echo -echo "Artifact files processed:" -[ -f "shared_libraries.py" ] && echo "โœ“ shared_libraries.py" -[ -f "gateway_service.py" ] && echo "โœ“ gateway_service.py" -[ -f "auth_service.py" ] && echo "โœ“ auth_service.py" -[ -f "training_service.py" ] && echo "โœ“ training_service.py" -[ -f "docker-compose.yml" ] && echo "โœ“ docker-compose.yml" -echo -echo "Happy coding! ๐Ÿš€" \ No newline at end of file +} diff --git a/scripts/validate-config.sh b/scripts/validate-config.sh new file mode 100755 index 00000000..adb8db46 --- /dev/null +++ b/scripts/validate-config.sh @@ -0,0 +1,297 @@ +# ================================================================ +# CONFIGURATION VALIDATION SCRIPT +# scripts/validate-config.sh +# ================================================================ + +#!/bin/bash + +# Configuration validation script + +set -e + +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +NC='\033[0m' + +print_header() { + echo "" + echo "================================================================" + echo -e "${GREEN}$1${NC}" + echo "================================================================" +} + +print_success() { + echo -e "${GREEN}[โœ“]${NC} $1" +} + +print_error() { + echo -e "${RED}[โœ—]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[!]${NC} $1" +} + +validate_env_file() { + print_header "VALIDATING ENVIRONMENT CONFIGURATION" + + if [ ! -f ".env" ]; then + print_error ".env file not found" + exit 1 + fi + + # Load environment variables + source .env + + # Critical settings validation + critical_vars=( + "JWT_SECRET_KEY" + "AUTH_DATABASE_URL" + "TRAINING_DATABASE_URL" + "FORECASTING_DATABASE_URL" + "DATA_DATABASE_URL" + "TENANT_DATABASE_URL" + "NOTIFICATION_DATABASE_URL" + "REDIS_URL" + "RABBITMQ_URL" + ) + + all_good=true + + for var in "${critical_vars[@]}"; do + if [ -z "${!var}" ]; then + print_error "$var is not set" + all_good=false + elif [[ "${!var}" == *"change"* ]] || [[ "${!var}" == *"default"* ]]; then + print_warning "$var appears to use default/placeholder value" + else + print_success "$var is configured" + fi + done + + # Check JWT secret strength + if [ ${#JWT_SECRET_KEY} -lt 32 ]; then + print_error "JWT_SECRET_KEY must be at least 32 characters long" + all_good=false + fi + + # Check environment + if [ "$ENVIRONMENT" = "production" ]; then + production_vars=("AEMET_API_KEY" "MADRID_OPENDATA_API_KEY" "SMTP_USER" "SMTP_PASSWORD") + for var in "${production_vars[@]}"; do + if [ -z "${!var}" ]; then + print_warning "$var should be configured for production" + fi + done + fi + + if $all_good; then + print_success "Environment configuration is valid" + else + print_error "Environment configuration has issues" + exit 1 + fi +} + +validate_service_configs() { + print_header "VALIDATING SERVICE CONFIGURATIONS" + + services=("auth" "training" "forecasting" "data" "tenant" "notification") + + for service in "${services[@]}"; do + config_file="services/$service/app/core/config.py" + + if [ -f "$config_file" ]; then + print_success "$service configuration exists" + + # Check if configuration follows the standard + if grep -q "BaseServiceSettings" "$config_file"; then + print_success "$service uses BaseServiceSettings" + else + print_warning "$service doesn't inherit from BaseServiceSettings" + fi + + if grep -q "DATABASE_URL" "$config_file"; then + print_success "$service has database configuration" + else + print_warning "$service missing database configuration" + fi + else + print_error "$service configuration missing" + fi + done + + # Check gateway configuration + if [ -f "gateway/app/core/config.py" ]; then + print_success "Gateway configuration exists" + else + print_error "Gateway configuration missing" + fi +} + +validate_shared_config() { + print_header "VALIDATING SHARED CONFIGURATION" + + if [ -f "shared/config/base.py" ]; then + print_success "Base configuration exists" + + if grep -q "BaseServiceSettings" "shared/config/base.py"; then + print_success "BaseServiceSettings class found" + else + print_error "BaseServiceSettings class missing" + fi + else + print_error "Base configuration missing" + fi + + shared_modules=("auth" "database" "messaging" "monitoring" "utils") + for module in "${shared_modules[@]}"; do + if [ -d "shared/$module" ]; then + print_success "Shared $module module exists" + else + print_warning "Shared $module module missing" + fi + done +} + +validate_docker_config() { + print_header "VALIDATING DOCKER CONFIGURATION" + + if [ -f "docker-compose.yml" ]; then + print_success "Docker Compose configuration exists" + + # Check if all services are defined + services=("gateway" "auth-service" "training-service" "forecasting-service" "data-service" "tenant-service" "notification-service") + for service in "${services[@]}"; do + if grep -q "$service:" docker-compose.yml; then + print_success "$service defined in docker-compose.yml" + else + print_error "$service missing from docker-compose.yml" + fi + done + + # Check if all databases are defined + databases=("auth-db" "training-db" "forecasting-db" "data-db" "tenant-db" "notification-db") + for db in "${databases[@]}"; do + if grep -q "$db:" docker-compose.yml; then + print_success "$db defined in docker-compose.yml" + else + print_error "$db missing from docker-compose.yml" + fi + done + + # Check infrastructure services + infra=("redis" "rabbitmq" "prometheus" "grafana") + for service in "${infra[@]}"; do + if grep -q "$service:" docker-compose.yml; then + print_success "$service defined in docker-compose.yml" + else + print_warning "$service missing from docker-compose.yml" + fi + done + else + print_error "Docker Compose configuration missing" + fi + + # Check Dockerfiles + services=("gateway" "auth" "training" "forecasting" "data" "tenant" "notification") + for service in "${services[@]}"; do + if [ "$service" = "gateway" ]; then + dockerfile="gateway/Dockerfile" + else + dockerfile="services/$service/Dockerfile" + fi + + if [ -f "$dockerfile" ]; then + print_success "$service Dockerfile exists" + else + print_warning "$service Dockerfile missing" + fi + done +} + +validate_directory_structure() { + print_header "VALIDATING DIRECTORY STRUCTURE" + + required_dirs=( + "shared/config" + "shared/auth" + "shared/database" + "shared/messaging" + "gateway/app/core" + "services/auth/app/core" + "services/training/app/core" + "services/forecasting/app/core" + "services/data/app/core" + "services/tenant/app/core" + "services/notification/app/core" + "scripts" + "logs" + "models" + "templates" + ) + + missing_dirs=() + + for dir in "${required_dirs[@]}"; do + if [ -d "$dir" ]; then + print_success "$dir exists" + else + print_warning "$dir missing" + missing_dirs+=("$dir") + fi + done + + if [ ${#missing_dirs[@]} -gt 0 ]; then + print_warning "Creating missing directories..." + for dir in "${missing_dirs[@]}"; do + mkdir -p "$dir" + print_success "Created $dir" + done + fi +} + +validate_scripts() { + print_header "VALIDATING UTILITY SCRIPTS" + + scripts=("setup.sh" "test.sh" "deploy.sh" "health-check.sh" "validate-config.sh") + + for script in "${scripts[@]}"; do + script_path="scripts/$script" + if [ -f "$script_path" ]; then + print_success "$script exists" + + if [ -x "$script_path" ]; then + print_success "$script is executable" + else + print_warning "$script is not executable - fixing..." + chmod +x "$script_path" + fi + else + print_warning "$script missing" + fi + done +} + +# Main validation function +main() { + print_header "CONFIGURATION VALIDATION" + + validate_directory_structure + validate_shared_config + validate_service_configs + validate_env_file + validate_docker_config + validate_scripts + + print_header "VALIDATION COMPLETE" + echo "If all validations passed, you're ready to start the services!" + echo "" + echo "Next steps:" + echo "1. docker-compose up -d" + echo "2. ./scripts/health-check.sh" +} + +# Run validation +main "$@" \ No newline at end of file diff --git a/services/forecasting/app/core/auth.py b/services/forecasting/app/core/auth.py new file mode 100644 index 00000000..b33f22ab --- /dev/null +++ b/services/forecasting/app/core/auth.py @@ -0,0 +1,22 @@ +""" +Authentication configuration for forecasting service +""" + +from shared.auth.jwt_handler import JWTHandler +from shared.auth.decorators import require_auth, require_role +from app.core.config import settings + +# Initialize JWT handler +jwt_handler = JWTHandler( + secret_key=settings.JWT_SECRET_KEY, + algorithm=settings.JWT_ALGORITHM, + access_token_expire_minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES +) + +# Export commonly used functions +verify_token = jwt_handler.verify_token +create_access_token = jwt_handler.create_access_token +get_current_user = jwt_handler.get_current_user + +# Export decorators +__all__ = ['verify_token', 'create_access_token', 'get_current_user', 'require_auth', 'require_role'] diff --git a/services/notification/app/core/auth.py b/services/notification/app/core/auth.py new file mode 100644 index 00000000..a76ed786 --- /dev/null +++ b/services/notification/app/core/auth.py @@ -0,0 +1,22 @@ +""" +Authentication configuration for notification service +""" + +from shared.auth.jwt_handler import JWTHandler +from shared.auth.decorators import require_auth, require_role +from app.core.config import settings + +# Initialize JWT handler +jwt_handler = JWTHandler( + secret_key=settings.JWT_SECRET_KEY, + algorithm=settings.JWT_ALGORITHM, + access_token_expire_minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES +) + +# Export commonly used functions +verify_token = jwt_handler.verify_token +create_access_token = jwt_handler.create_access_token +get_current_user = jwt_handler.get_current_user + +# Export decorators +__all__ = ['verify_token', 'create_access_token', 'get_current_user', 'require_auth', 'require_role'] diff --git a/services/tenant/app/core/auth.py b/services/tenant/app/core/auth.py new file mode 100644 index 00000000..314685f1 --- /dev/null +++ b/services/tenant/app/core/auth.py @@ -0,0 +1,22 @@ +""" +Authentication configuration for tenant service +""" + +from shared.auth.jwt_handler import JWTHandler +from shared.auth.decorators import require_auth, require_role +from app.core.config import settings + +# Initialize JWT handler +jwt_handler = JWTHandler( + secret_key=settings.JWT_SECRET_KEY, + algorithm=settings.JWT_ALGORITHM, + access_token_expire_minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES +) + +# Export commonly used functions +verify_token = jwt_handler.verify_token +create_access_token = jwt_handler.create_access_token +get_current_user = jwt_handler.get_current_user + +# Export decorators +__all__ = ['verify_token', 'create_access_token', 'get_current_user', 'require_auth', 'require_role'] diff --git a/services/training/app/main.py b/services/training/app/main.py index 35901cf8..edc1e377 100644 --- a/services/training/app/main.py +++ b/services/training/app/main.py @@ -108,10 +108,6 @@ app.add_middleware( allow_headers=["*"], ) -app.add_middleware( - TrustedHostMiddleware, - allowed_hosts=settings.ALLOWED_HOSTS -) # Request middleware for logging and metrics @app.middleware("http") diff --git a/shared/config/__init__.py b/shared/config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/shared/config/base.py b/shared/config/base.py index f773a4e2..27e839f4 100644 --- a/shared/config/base.py +++ b/shared/config/base.py @@ -284,14 +284,6 @@ class BaseServiceSettings(BaseSettings): raise ValueError("JWT_SECRET_KEY must be at least 32 characters long") return v - @validator('DATABASE_URL') - def validate_database_url(cls, v): - if not v: - raise ValueError("DATABASE_URL is required") - if not v.startswith(('postgresql://', 'postgresql+asyncpg://')): - raise ValueError("DATABASE_URL must be a PostgreSQL URL") - return v - @validator('LOG_LEVEL') def validate_log_level(cls, v): valid_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']