Improve docker config

This commit is contained in:
Urtzi Alfaro
2025-07-20 02:16:51 +02:00
parent 9a67f3d175
commit 1c730c3c81
27 changed files with 2598 additions and 1161 deletions

View File

@@ -1,59 +1,118 @@
# ================================================================
# UPDATED .env.example FILE
# .env.example
# BAKERY FORECASTING PLATFORM - ENVIRONMENT CONFIGURATION
# Single source of truth - no duplication with docker-compose.yml
# ================================================================
# ================================================================
# ENVIRONMENT CONFIGURATION
# ENVIRONMENT & BUILD SETTINGS
# ================================================================
# Environment: development, staging, production, testing
ENVIRONMENT=development
DEBUG=true
LOG_LEVEL=INFO
# Build configuration
BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
VCS_REF=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
IMAGE_TAG=latest
DOMAIN=localhost
# ================================================================
# SERVICE PORTS (used by Docker Compose)
# ================================================================
# Core services
GATEWAY_PORT=8000
AUTH_SERVICE_PORT=8001
TRAINING_SERVICE_PORT=8002
FORECASTING_SERVICE_PORT=8003
DATA_SERVICE_PORT=8004
TENANT_SERVICE_PORT=8005
NOTIFICATION_SERVICE_PORT=8006
# Frontend
DASHBOARD_PORT=3000
MARKETING_PORT=3001
# Infrastructure
REDIS_PORT=6379
RABBITMQ_PORT=5672
RABBITMQ_MANAGEMENT_PORT=15672
# Monitoring
PROMETHEUS_PORT=9090
GRAFANA_PORT=3002
# Development tools
PGADMIN_PORT=5050
REDIS_COMMANDER_PORT=8081
# ================================================================
# CORE SERVICE SETTINGS (used by applications)
# ================================================================
# Application metadata
SERVICE_VERSION=1.0.0
TIMEZONE=Europe/Madrid
LOCALE=es_ES.UTF-8
CURRENCY=EUR
# ================================================================
# DATABASE CONFIGURATION
# Each service has its own dedicated database
# ================================================================
# Auth Service Database
# PostgreSQL common settings
POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=es_ES.UTF-8 --lc-ctype=es_ES.UTF-8
# Auth Database
AUTH_DB_NAME=auth_db
AUTH_DB_USER=auth_user
AUTH_DB_PASSWORD=auth_pass123
AUTH_DATABASE_URL=postgresql+asyncpg://auth_user:auth_pass123@auth-db:5432/auth_db
# Training Service Database
# Training Database
TRAINING_DB_NAME=training_db
TRAINING_DB_USER=training_user
TRAINING_DB_PASSWORD=training_pass123
TRAINING_DATABASE_URL=postgresql+asyncpg://training_user:training_pass123@training-db:5432/training_db
# Forecasting Service Database
# Forecasting Database
FORECASTING_DB_NAME=forecasting_db
FORECASTING_DB_USER=forecasting_user
FORECASTING_DB_PASSWORD=forecasting_pass123
FORECASTING_DATABASE_URL=postgresql+asyncpg://forecasting_user:forecasting_pass123@forecasting-db:5432/forecasting_db
# Data Service Database
# Data Database
DATA_DB_NAME=data_db
DATA_DB_USER=data_user
DATA_DB_PASSWORD=data_pass123
DATA_DATABASE_URL=postgresql+asyncpg://data_user:data_pass123@data-db:5432/data_db
# Tenant Service Database
# Tenant Database
TENANT_DB_NAME=tenant_db
TENANT_DB_USER=tenant_user
TENANT_DB_PASSWORD=tenant_pass123
TENANT_DATABASE_URL=postgresql+asyncpg://tenant_user:tenant_pass123@tenant-db:5432/tenant_db
# Notification Service Database
# Notification Database
NOTIFICATION_DB_NAME=notification_db
NOTIFICATION_DB_USER=notification_user
NOTIFICATION_DB_PASSWORD=notification_pass123
NOTIFICATION_DATABASE_URL=postgresql+asyncpg://notification_user:notification_pass123@notification-db:5432/notification_db
# Database Connection Pool Settings
DB_POOL_SIZE=10
DB_MAX_OVERFLOW=20
DB_POOL_TIMEOUT=30
DB_POOL_RECYCLE=3600
DB_ECHO=false
# ================================================================
# REDIS CONFIGURATION
# Each service uses a different Redis database
# ================================================================
REDIS_URL=redis://redis:6379
REDIS_PASSWORD=redis_pass123
REDIS_MAX_MEMORY=512mb
REDIS_URL=redis://:redis_pass123@redis:6379
REDIS_MAX_CONNECTIONS=50
# Redis Database Assignments:
# Redis Database Assignments (used by standardized config)
# 0 - Auth Service
# 1 - Training Service
# 1 - Training Service
# 2 - Forecasting Service
# 3 - Data Service
# 4 - Tenant Service
@@ -64,18 +123,20 @@ REDIS_MAX_CONNECTIONS=50
# RABBITMQ CONFIGURATION
# ================================================================
RABBITMQ_USER=bakery
RABBITMQ_PASSWORD=forecast123
RABBITMQ_VHOST=/
RABBITMQ_ERLANG_COOKIE=bakery-secret-cookie-change-in-production
RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/
RABBITMQ_EXCHANGE=bakery_events
RABBITMQ_QUEUE_PREFIX=bakery
RABBITMQ_RETRY_ATTEMPTS=3
RABBITMQ_RETRY_DELAY=5
# ================================================================
# AUTHENTICATION & SECURITY
# ================================================================
# JWT Configuration (CHANGE IN PRODUCTION!)
JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production-very-long-and-secure
JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production-min-32-characters-long
JWT_ALGORITHM=HS256
JWT_ACCESS_TOKEN_EXPIRE_MINUTES=30
JWT_REFRESH_TOKEN_EXPIRE_DAYS=7
@@ -100,7 +161,7 @@ LOCKOUT_DURATION_MINUTES=30
# CORS & API CONFIGURATION
# ================================================================
CORS_ORIGINS=http://localhost:3000,http://localhost:3001,http://127.0.0.1:3000
CORS_ORIGINS=http://localhost:3000,http://localhost:3001,http://127.0.0.1:3000,https://panaderia.vercel.app
CORS_ALLOW_CREDENTIALS=true
# Rate Limiting
@@ -113,15 +174,15 @@ RATE_LIMIT_BURST=10
API_DOCS_ENABLED=true
# ================================================================
# SERVICE URLS
# SERVICE DISCOVERY URLS (used by standardized config)
# ================================================================
GATEWAY_URL=http://gateway:8000
AUTH_SERVICE_URL=http://auth-service:8000
TENANT_SERVICE_URL=http://tenant-service:8000
TRAINING_SERVICE_URL=http://training-service:8000
FORECASTING_SERVICE_URL=http://forecasting-service:8000
DATA_SERVICE_URL=http://data-service:8000
TENANT_SERVICE_URL=http://tenant-service:8000
NOTIFICATION_SERVICE_URL=http://notification-service:8000
# HTTP Client Settings
@@ -130,29 +191,37 @@ HTTP_RETRIES=3
HTTP_RETRY_DELAY=1.0
# ================================================================
# EXTERNAL APIS & INTEGRATIONS
# EXTERNAL APIS
# ================================================================
# Spanish Weather Service (AEMET)
AEMET_API_KEY=your-aemet-api-key-here
AEMET_API_KEY=eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1YWxmYXJvQGdtYWlsLmNvbSIsImp0aSI6ImRjZWVmNTEwLTdmYzEtNGMxNy1hODZkLWQ4NzdlZDc5ZDllNyIsImlzcyI6IkFFTUVUIiwiaWF0IjoxNzUyODMwMDg3LCJ1c2VySWQiOiJkY2VlZjUxMC03ZmMxLTRjMTctYTg2ZC1kODc3ZWQ3OWQ5ZTciLCJyb2xlIjoiIn0.C047gaiEhWhH4ItDgkHSwg8HzKTzw87TOPRTRf8j-2w
AEMET_BASE_URL=https://opendata.aemet.es/opendata
AEMET_TIMEOUT=30
AEMET_RETRY_ATTEMPTS=3
# Madrid Open Data Platform
MADRID_OPENDATA_API_KEY=your-madrid-opendata-key-here
MADRID_OPENDATA_BASE_URL=https://datos.madrid.es
MADRID_OPENDATA_TIMEOUT=30
# Email Configuration (Gmail example)
# ================================================================
# EMAIL CONFIGURATION
# ================================================================
SMTP_HOST=smtp.gmail.com
SMTP_PORT=587
SMTP_USER=your-email@gmail.com
SMTP_PASSWORD=your-email-app-password
SMTP_PASSWORD=your-app-specific-password
SMTP_TLS=true
SMTP_SSL=false
DEFAULT_FROM_EMAIL=noreply@bakeryforecast.es
DEFAULT_FROM_NAME=Bakery Forecast
# WhatsApp API (Twilio example)
# ================================================================
# WHATSAPP/TWILIO CONFIGURATION
# ================================================================
WHATSAPP_API_KEY=your-whatsapp-api-key-here
WHATSAPP_BASE_URL=https://api.twilio.com
WHATSAPP_FROM_NUMBER=whatsapp:+14155238886
@@ -173,6 +242,10 @@ MAX_CONCURRENT_TRAINING_JOBS=3
MIN_TRAINING_DATA_DAYS=30
TRAINING_BATCH_SIZE=1000
# Resource Limits (used by Docker Compose)
TRAINING_MEMORY_LIMIT=2G
TRAINING_CPU_LIMIT=1.5
# Prophet Configuration
PROPHET_SEASONALITY_MODE=additive
PROPHET_CHANGEPOINT_PRIOR_SCALE=0.05
@@ -194,9 +267,6 @@ MIN_HISTORICAL_DAYS=60
PREDICTION_CONFIDENCE_THRESHOLD=0.8
# Spanish Business Context
TIMEZONE=Europe/Madrid
LOCALE=es_ES.UTF-8
CURRENCY=EUR
BUSINESS_HOUR_START=7
BUSINESS_HOUR_END=20
@@ -260,23 +330,23 @@ LOG_RETENTION_DAYS=30
# Metrics & Monitoring
PROMETHEUS_ENABLED=true
PROMETHEUS_PORT=9090
# Tracing (disabled by default)
JAEGER_ENABLED=false
JAEGER_AGENT_HOST=localhost
JAEGER_AGENT_PORT=6831
PROMETHEUS_RETENTION=200h
# Health Checks
HEALTH_CHECK_TIMEOUT=30
HEALTH_CHECK_INTERVAL=30
# Grafana Configuration
GRAFANA_ADMIN_USER=admin
GRAFANA_ADMIN_PASSWORD=admin123
GRAFANA_SECRET_KEY=grafana-secret-key-change-in-production
GRAFANA_ROOT_URL=http://localhost:3002/
# ================================================================
# DATA RETENTION & CLEANUP
# ================================================================
DATA_RETENTION_DAYS=365
LOG_RETENTION_DAYS=90
METRIC_RETENTION_DAYS=90
TEMP_FILE_CLEANUP_HOURS=24
@@ -347,6 +417,26 @@ DELIVERY_TRACKING_ENABLED=true
OPEN_TRACKING_ENABLED=true
CLICK_TRACKING_ENABLED=true
# ================================================================
# FRONTEND CONFIGURATION
# ================================================================
# Frontend URLs (used by Docker Compose build args)
FRONTEND_API_URL=http://localhost:8000/api/v1
FRONTEND_WS_URL=ws://localhost:8000/ws
# ================================================================
# DEVELOPMENT TOOLS CONFIGURATION
# ================================================================
# pgAdmin
PGADMIN_EMAIL=admin@bakery.local
PGADMIN_PASSWORD=admin123
# Redis Commander
REDIS_COMMANDER_USER=admin
REDIS_COMMANDER_PASSWORD=admin123
# ================================================================
# COMPLIANCE & GDPR
# ================================================================
@@ -419,4 +509,23 @@ TRAINING_WORKER_COUNT=1
# Support & Contact
SUPPORT_EMAIL=soporte@bakeryforecast.es
INVOICE_LANGUAGE=es
INVOICE_LANGUAGE=es
# ================================================================
# NOTES FOR CONFIGURATION MANAGEMENT
# ================================================================
# This .env file is the SINGLE SOURCE OF TRUTH for all configuration.
# Docker Compose uses these variables via ${VARIABLE_NAME} substitution.
# Application services load these via env_file: .env in docker-compose.yml
# No duplication between .env and docker-compose.yml environment sections.
# To override for different environments:
# 1. Copy this file: cp .env .env.production
# 2. Modify values in .env.production
# 3. Use: docker-compose --env-file .env.production up -d
# For sensitive values in production:
# 1. Use Docker secrets or external secret management
# 2. Override via environment variables: REDIS_PASSWORD=secret docker-compose up
# 3. Use .env.local (gitignored) for local overrides

View File

@@ -0,0 +1,133 @@
# ================================================================
# TESTING DOCKER COMPOSE FILE
# docker-compose.test.yml
# ================================================================
version: '3.8'
# Testing-specific configuration
# Usage: docker-compose -f docker-compose.yml -f docker-compose.test.yml up -d
services:
# Test database services (separate from development/production)
test-auth-db:
image: postgres:15-alpine
environment:
- POSTGRES_DB=test_auth_db
- POSTGRES_USER=test_user
- POSTGRES_PASSWORD=test_pass
tmpfs:
- /var/lib/postgresql/data # Use tmpfs for faster tests
test-training-db:
image: postgres:15-alpine
environment:
- POSTGRES_DB=test_training_db
- POSTGRES_USER=test_user
- POSTGRES_PASSWORD=test_pass
tmpfs:
- /var/lib/postgresql/data
test-forecasting-db:
image: postgres:15-alpine
environment:
- POSTGRES_DB=test_forecasting_db
- POSTGRES_USER=test_user
- POSTGRES_PASSWORD=test_pass
tmpfs:
- /var/lib/postgresql/data
test-data-db:
image: postgres:15-alpine
environment:
- POSTGRES_DB=test_data_db
- POSTGRES_USER=test_user
- POSTGRES_PASSWORD=test_pass
tmpfs:
- /var/lib/postgresql/data
test-tenant-db:
image: postgres:15-alpine
environment:
- POSTGRES_DB=test_tenant_db
- POSTGRES_USER=test_user
- POSTGRES_PASSWORD=test_pass
tmpfs:
- /var/lib/postgresql/data
test-notification-db:
image: postgres:15-alpine
environment:
- POSTGRES_DB=test_notification_db
- POSTGRES_USER=test_user
- POSTGRES_PASSWORD=test_pass
tmpfs:
- /var/lib/postgresql/data
# Test Redis
test-redis:
image: redis:7-alpine
command: redis-server --appendonly no --save ""
tmpfs:
- /data
# Override services to use test databases
auth-service:
environment:
- TESTING=true
- DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-auth-db:5432/test_auth_db
- REDIS_URL=redis://test-redis:6379
- MOCK_EXTERNAL_APIS=true
depends_on:
- test-auth-db
- test-redis
training-service:
environment:
- TESTING=true
- DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-training-db:5432/test_training_db
- REDIS_URL=redis://test-redis:6379
- MOCK_EXTERNAL_APIS=true
depends_on:
- test-training-db
- test-redis
forecasting-service:
environment:
- TESTING=true
- DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-forecasting-db:5432/test_forecasting_db
- REDIS_URL=redis://test-redis:6379
- MOCK_EXTERNAL_APIS=true
depends_on:
- test-forecasting-db
- test-redis
data-service:
environment:
- TESTING=true
- DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-data-db:5432/test_data_db
- REDIS_URL=redis://test-redis:6379
- MOCK_EXTERNAL_APIS=true
depends_on:
- test-data-db
- test-redis
tenant-service:
environment:
- TESTING=true
- DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-tenant-db:5432/test_tenant_db
- REDIS_URL=redis://test-redis:6379
- MOCK_EXTERNAL_APIS=true
depends_on:
- test-tenant-db
- test-redis
notification-service:
environment:
- TESTING=true
- DATABASE_URL=postgresql+asyncpg://test_user:test_pass@test-notification-db:5432/test_notification_db
- REDIS_URL=redis://test-redis:6379
- MOCK_EXTERNAL_APIS=true
depends_on:
- test-notification-db
- test-redis

0
docker-compose.prod.yml Normal file
View File

View File

@@ -1,60 +1,83 @@
# ================================================================
# UPDATED DOCKER COMPOSE - PROPER AUTHENTICATION ARCHITECTURE
# OPTIMIZED DOCKER COMPOSE - NO ENVIRONMENT DUPLICATION
# Single source of truth: .env file only
# ================================================================
# ================================================================
# NETWORKS & VOLUMES (same as before)
# ================================================================
networks:
bakery-network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
volumes:
postgres_auth_data:
postgres_training_data:
postgres_forecasting_data:
postgres_data_data:
postgres_tenant_data:
postgres_notification_data:
auth_db_data:
training_db_data:
forecasting_db_data:
data_db_data:
tenant_db_data:
notification_db_data:
redis_data:
rabbitmq_data:
prometheus_data:
grafana_data:
model_storage:
log_storage:
# ================================================================
# SERVICES - USING ONLY .env FILE
# ================================================================
services:
# ================================================================
# INFRASTRUCTURE SERVICES
# INFRASTRUCTURE - NO DUPLICATION
# ================================================================
# Redis - For caching, sessions, and rate limiting
redis:
image: redis:7-alpine
container_name: bakery-redis
command: redis-server --appendonly yes --requirepass redis_pass123
restart: unless-stopped
# ONLY use environment substitution from .env
command: >
redis-server
--appendonly yes
--requirepass ${REDIS_PASSWORD}
--maxmemory ${REDIS_MAX_MEMORY:-512mb}
--databases 16
ports:
- "6379:6379"
- "${REDIS_PORT}:6379"
volumes:
- redis_data:/data
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.10
healthcheck:
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD}", "ping"]
interval: 30s
timeout: 10s
retries: 3
# RabbitMQ - Message broker
rabbitmq:
image: rabbitmq:3-management
image: rabbitmq:3.12-management-alpine
container_name: bakery-rabbitmq
restart: unless-stopped
# ONLY use environment substitution from .env
environment:
- RABBITMQ_DEFAULT_USER=bakery
- RABBITMQ_DEFAULT_PASS=forecast123
- RABBITMQ_DEFAULT_VHOST=/
- RABBITMQ_DEFAULT_USER=${RABBITMQ_USER}
- RABBITMQ_DEFAULT_PASS=${RABBITMQ_PASSWORD}
- RABBITMQ_DEFAULT_VHOST=${RABBITMQ_VHOST}
ports:
- "5672:5672"
- "15672:15672"
- "${RABBITMQ_PORT}:5672"
- "${RABBITMQ_MANAGEMENT_PORT}:15672"
volumes:
- rabbitmq_data:/var/lib/rabbitmq
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.11
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "ping"]
interval: 30s
@@ -62,167 +85,164 @@ services:
retries: 3
# ================================================================
# DATABASE SERVICES
# DATABASES - NO DUPLICATION
# ================================================================
# Auth Database
auth-db:
image: postgres:15
image: postgres:15-alpine
container_name: bakery-auth-db
restart: unless-stopped
# ONLY reference .env variables
environment:
- POSTGRES_DB=auth_db
- POSTGRES_USER=auth_user
- POSTGRES_PASSWORD=auth_pass123
- POSTGRES_DB=${AUTH_DB_NAME}
- POSTGRES_USER=${AUTH_DB_USER}
- POSTGRES_PASSWORD=${AUTH_DB_PASSWORD}
- POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS}
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- postgres_auth_data:/var/lib/postgresql/data
- auth_db_data:/var/lib/postgresql/data
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.20
healthcheck:
test: ["CMD-SHELL", "pg_isready -U auth_user -d auth_db"]
test: ["CMD-SHELL", "pg_isready -U ${AUTH_DB_USER} -d ${AUTH_DB_NAME}"]
interval: 10s
timeout: 5s
retries: 5
# Tenant Database
tenant-db:
image: postgres:15
container_name: bakery-tenant-db
environment:
- POSTGRES_DB=tenant_db
- POSTGRES_USER=tenant_user
- POSTGRES_PASSWORD=tenant_pass123
volumes:
- postgres_tenant_data:/var/lib/postgresql/data
networks:
- bakery-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U tenant_user -d tenant_db"]
interval: 10s
timeout: 5s
retries: 5
# Training Database
training-db:
image: postgres:15
image: postgres:15-alpine
container_name: bakery-training-db
restart: unless-stopped
environment:
- POSTGRES_DB=training_db
- POSTGRES_USER=training_user
- POSTGRES_PASSWORD=training_pass123
- POSTGRES_DB=${TRAINING_DB_NAME}
- POSTGRES_USER=${TRAINING_DB_USER}
- POSTGRES_PASSWORD=${TRAINING_DB_PASSWORD}
- POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS}
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- postgres_training_data:/var/lib/postgresql/data
- training_db_data:/var/lib/postgresql/data
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.21
healthcheck:
test: ["CMD-SHELL", "pg_isready -U training_user -d training_db"]
test: ["CMD-SHELL", "pg_isready -U ${TRAINING_DB_USER} -d ${TRAINING_DB_NAME}"]
interval: 10s
timeout: 5s
retries: 5
# Forecasting Database
forecasting-db:
image: postgres:15
image: postgres:15-alpine
container_name: bakery-forecasting-db
restart: unless-stopped
environment:
- POSTGRES_DB=forecasting_db
- POSTGRES_USER=forecasting_user
- POSTGRES_PASSWORD=forecasting_pass123
- POSTGRES_DB=${FORECASTING_DB_NAME}
- POSTGRES_USER=${FORECASTING_DB_USER}
- POSTGRES_PASSWORD=${FORECASTING_DB_PASSWORD}
- POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS}
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- postgres_forecasting_data:/var/lib/postgresql/data
- forecasting_db_data:/var/lib/postgresql/data
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.22
healthcheck:
test: ["CMD-SHELL", "pg_isready -U forecasting_user -d forecasting_db"]
test: ["CMD-SHELL", "pg_isready -U ${FORECASTING_DB_USER} -d ${FORECASTING_DB_NAME}"]
interval: 10s
timeout: 5s
retries: 5
# Data Database
data-db:
image: postgres:15
image: postgres:15-alpine
container_name: bakery-data-db
restart: unless-stopped
environment:
- POSTGRES_DB=data_db
- POSTGRES_USER=data_user
- POSTGRES_PASSWORD=data_pass123
- POSTGRES_DB=${DATA_DB_NAME}
- POSTGRES_USER=${DATA_DB_USER}
- POSTGRES_PASSWORD=${DATA_DB_PASSWORD}
- POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS}
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- postgres_data_data:/var/lib/postgresql/data
- data_db_data:/var/lib/postgresql/data
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.23
healthcheck:
test: ["CMD-SHELL", "pg_isready -U data_user -d data_db"]
test: ["CMD-SHELL", "pg_isready -U ${DATA_DB_USER} -d ${DATA_DB_NAME}"]
interval: 10s
timeout: 5s
retries: 5
tenant-db:
image: postgres:15-alpine
container_name: bakery-tenant-db
restart: unless-stopped
environment:
- POSTGRES_DB=${TENANT_DB_NAME}
- POSTGRES_USER=${TENANT_DB_USER}
- POSTGRES_PASSWORD=${TENANT_DB_PASSWORD}
- POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS}
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- tenant_db_data:/var/lib/postgresql/data
networks:
bakery-network:
ipv4_address: 172.20.0.24
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${TENANT_DB_USER} -d ${TENANT_DB_NAME}"]
interval: 10s
timeout: 5s
retries: 5
# Notification Database
notification-db:
image: postgres:15
image: postgres:15-alpine
container_name: bakery-notification-db
restart: unless-stopped
environment:
- POSTGRES_DB=notification_db
- POSTGRES_USER=notification_user
- POSTGRES_PASSWORD=notification_pass123
- POSTGRES_DB=${NOTIFICATION_DB_NAME}
- POSTGRES_USER=${NOTIFICATION_DB_USER}
- POSTGRES_PASSWORD=${NOTIFICATION_DB_PASSWORD}
- POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS}
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- postgres_notification_data:/var/lib/postgresql/data
- notification_db_data:/var/lib/postgresql/data
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.25
healthcheck:
test: ["CMD-SHELL", "pg_isready -U notification_user -d notification_db"]
test: ["CMD-SHELL", "pg_isready -U ${NOTIFICATION_DB_USER} -d ${NOTIFICATION_DB_NAME}"]
interval: 10s
timeout: 5s
retries: 5
# ================================================================
# MICROSERVICES
# MICROSERVICES - CLEAN APPROACH
# ================================================================
# API Gateway - Enhanced with Redis caching
gateway:
build:
context: .
dockerfile: ./gateway/Dockerfile
args:
- ENVIRONMENT=${ENVIRONMENT}
- BUILD_DATE=${BUILD_DATE}
image: bakery/gateway:${IMAGE_TAG}
container_name: bakery-gateway
environment:
# Service Discovery
- AUTH_SERVICE_URL=http://auth-service:8000
- TENANT_SERVICE_URL=http://tenant-service:8000
- TRAINING_SERVICE_URL=http://training-service:8000
- FORECASTING_SERVICE_URL=http://forecasting-service:8000
- DATA_SERVICE_URL=http://data-service:8000
- NOTIFICATION_SERVICE_URL=http://notification-service:8000
# Authentication & Caching
- JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production}
- JWT_ALGORITHM=HS256
- REDIS_URL=redis://:redis_pass123@redis:6379/0
# CORS Configuration
- CORS_ORIGINS=http://localhost:3000,http://localhost:3001,https://panaderia.vercel.app
# Service Configuration
- SERVICE_NAME=gateway
- SERVICE_VERSION=1.0.0
- LOG_LEVEL=INFO
# Rate Limiting
- RATE_LIMIT_CALLS_PER_MINUTE=60
- RATE_LIMIT_BURST=10
restart: unless-stopped
# ONLY load from .env file - no duplication
env_file: .env
ports:
- "8000:8000"
- "${GATEWAY_PORT}:8000"
depends_on:
redis:
condition: service_healthy
rabbitmq:
condition: service_healthy
auth-service:
condition: service_healthy
tenant-service:
condition: service_healthy
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.100
volumes:
- log_storage:/app/logs
- ./gateway:/app
- ./shared:/app/shared
healthcheck:
@@ -231,40 +251,20 @@ services:
timeout: 10s
retries: 3
# Auth Service - Enhanced with proper JWT handling
auth-service:
build:
context: .
dockerfile: ./services/auth/Dockerfile
args:
- ENVIRONMENT=${ENVIRONMENT}
- BUILD_DATE=${BUILD_DATE}
image: bakery/auth-service:${IMAGE_TAG}
container_name: bakery-auth-service
environment:
# Database
- DATABASE_URL=postgresql+asyncpg://auth_user:auth_pass123@auth-db:5432/auth_db
# Redis for sessions and rate limiting
- REDIS_URL=redis://:redis_pass123@redis:6379/1
# Message Queue
- RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/
# JWT Configuration
- JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production}
- JWT_ALGORITHM=HS256
- JWT_ACCESS_TOKEN_EXPIRE_MINUTES=30
- JWT_REFRESH_TOKEN_EXPIRE_DAYS=7
# Security Configuration
- PASSWORD_MIN_LENGTH=8
- MAX_LOGIN_ATTEMPTS=5
- LOCKOUT_DURATION_MINUTES=30
# Service Configuration
- SERVICE_NAME=auth-service
- SERVICE_VERSION=1.0.0
- LOG_LEVEL=INFO
restart: unless-stopped
# ONLY load from .env file - no duplication
env_file: .env
ports:
- "8001:8000"
- "${AUTH_SERVICE_PORT}:8000"
depends_on:
auth-db:
condition: service_healthy
@@ -273,8 +273,10 @@ services:
rabbitmq:
condition: service_healthy
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.101
volumes:
- log_storage:/app/logs
- ./services/auth:/app
- ./shared:/app/shared
healthcheck:
@@ -283,36 +285,19 @@ services:
timeout: 10s
retries: 3
# Tenant Service - New enhanced service
tenant-service:
build:
context: .
dockerfile: ./services/tenant/Dockerfile
args:
- ENVIRONMENT=${ENVIRONMENT}
- BUILD_DATE=${BUILD_DATE}
image: bakery/tenant-service:${IMAGE_TAG}
container_name: bakery-tenant-service
environment:
# Database
- DATABASE_URL=postgresql+asyncpg://tenant_user:tenant_pass123@tenant-db:5432/tenant_db
# Redis for caching
- REDIS_URL=redis://:redis_pass123@redis:6379/2
# Message Queue
- RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/
# Service Discovery
- AUTH_SERVICE_URL=http://auth-service:8000
# JWT Configuration (for token verification)
- JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production}
- JWT_ALGORITHM=HS256
# Service Configuration
- SERVICE_NAME=tenant-service
- SERVICE_VERSION=1.0.0
- LOG_LEVEL=INFO
restart: unless-stopped
env_file: .env
ports:
- "8005:8000"
- "${TENANT_SERVICE_PORT}:8000"
depends_on:
tenant-db:
condition: service_healthy
@@ -323,8 +308,10 @@ services:
auth-service:
condition: service_healthy
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.105
volumes:
- log_storage:/app/logs
- ./services/tenant:/app
- ./shared:/app/shared
healthcheck:
@@ -333,44 +320,19 @@ services:
timeout: 10s
retries: 3
# Training Service - Enhanced with tenant isolation
training-service:
build:
context: .
dockerfile: ./services/training/Dockerfile
args:
- ENVIRONMENT=${ENVIRONMENT}
- BUILD_DATE=${BUILD_DATE}
image: bakery/training-service:${IMAGE_TAG}
container_name: bakery-training-service
environment:
# Database
- DATABASE_URL=postgresql+asyncpg://training_user:training_pass123@training-db:5432/training_db
# Redis for job queuing and caching
- REDIS_URL=redis://:redis_pass123@redis:6379/3
# Message Queue
- RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/
# Service Discovery
- AUTH_SERVICE_URL=http://auth-service:8000
- DATA_SERVICE_URL=http://data-service:8000
- TENANT_SERVICE_URL=http://tenant-service:8000
# JWT Configuration
- JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production}
- JWT_ALGORITHM=HS256
# ML Configuration
- MODEL_STORAGE_PATH=/app/models
- MAX_TRAINING_TIME_MINUTES=30
- MIN_TRAINING_DATA_DAYS=30
- PROPHET_SEASONALITY_MODE=additive
# Service Configuration
- SERVICE_NAME=training-service
- SERVICE_VERSION=1.0.0
- LOG_LEVEL=INFO
restart: unless-stopped
env_file: .env
ports:
- "8002:8000"
- "${TRAINING_SERVICE_PORT}:8000"
depends_on:
training-db:
condition: service_healthy
@@ -383,53 +345,37 @@ services:
data-service:
condition: service_healthy
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.102
volumes:
- log_storage:/app/logs
- model_storage:/app/models
- ./services/training:/app
- ./shared:/app/shared
- ./models:/app/models # Persistent model storage
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
deploy:
resources:
limits:
memory: ${TRAINING_MEMORY_LIMIT:-2G}
cpus: '${TRAINING_CPU_LIMIT:-1.5}'
# Forecasting Service - Enhanced with proper auth
forecasting-service:
build:
context: .
dockerfile: ./services/forecasting/Dockerfile
args:
- ENVIRONMENT=${ENVIRONMENT}
- BUILD_DATE=${BUILD_DATE}
image: bakery/forecasting-service:${IMAGE_TAG}
container_name: bakery-forecasting-service
environment:
# Database
- DATABASE_URL=postgresql+asyncpg://forecasting_user:forecasting_pass123@forecasting-db:5432/forecasting_db
# Redis for caching predictions
- REDIS_URL=redis://:redis_pass123@redis:6379/4
# Message Queue
- RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/
# Service Discovery
- AUTH_SERVICE_URL=http://auth-service:8000
- TRAINING_SERVICE_URL=http://training-service:8000
- DATA_SERVICE_URL=http://data-service:8000
# JWT Configuration
- JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production}
- JWT_ALGORITHM=HS256
# ML Configuration
- MODEL_STORAGE_PATH=/app/models
- PREDICTION_CACHE_TTL_HOURS=6
# Service Configuration
- SERVICE_NAME=forecasting-service
- SERVICE_VERSION=1.0.0
- LOG_LEVEL=INFO
restart: unless-stopped
env_file: .env
ports:
- "8003:8000"
- "${FORECASTING_SERVICE_PORT}:8000"
depends_on:
forecasting-db:
condition: service_healthy
@@ -442,57 +388,32 @@ services:
training-service:
condition: service_healthy
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.103
volumes:
- log_storage:/app/logs
- model_storage:/app/models
- ./services/forecasting:/app
- ./shared:/app/shared
- ./models:/app/models # Shared model storage
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
# Data Service - Enhanced with external API integration
data-service:
build:
context: .
dockerfile: ./services/data/Dockerfile
args:
- ENVIRONMENT=${ENVIRONMENT}
- BUILD_DATE=${BUILD_DATE}
image: bakery/data-service:${IMAGE_TAG}
container_name: bakery-data-service
environment:
# Database
- DATABASE_URL=postgresql+asyncpg://data_user:data_pass123@data-db:5432/data_db
# Redis for API caching
- REDIS_URL=redis://:redis_pass123@redis:6379/5
# Message Queue
- RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/
# Service Discovery
- AUTH_SERVICE_URL=http://auth-service:8000
- TENANT_SERVICE_URL=http://tenant-service:8000
# External API Keys
- AEMET_API_KEY=${AEMET_API_KEY:-eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1YWxmYXJvQGdtYWlsLmNvbSIsImp0aSI6ImRjZWVmNTEwLTdmYzEtNGMxNy1hODZkLWQ4NzdlZDc5ZDllNyIsImlzcyI6IkFFTUVUIiwiaWF0IjoxNzUyODMwMDg3LCJ1c2VySWQiOiJkY2VlZjUxMC03ZmMxLTRjMTctYTg2ZC1kODc3ZWQ3OWQ5ZTciLCJyb2xlIjoiIn0.C047gaiEhWhH4ItDgkHSwg8HzKTzw87TOPRTRf8j-2w}
- MADRID_OPENDATA_API_KEY=${MADRID_OPENDATA_API_KEY:-your-madrid-opendata-key}
# JWT Configuration
- JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production}
- JWT_ALGORITHM=HS256
# Data Configuration
- WEATHER_CACHE_TTL_HOURS=1
- TRAFFIC_CACHE_TTL_HOURS=1
- DATA_RETENTION_DAYS=365
# Service Configuration
- SERVICE_NAME=data-service
- SERVICE_VERSION=1.0.0
- LOG_LEVEL=INFO
restart: unless-stopped
env_file: .env
ports:
- "8004:8000"
- "${DATA_SERVICE_PORT}:8000"
depends_on:
data-db:
condition: service_healthy
@@ -503,8 +424,10 @@ services:
auth-service:
condition: service_healthy
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.104
volumes:
- log_storage:/app/logs
- ./services/data:/app
- ./shared:/app/shared
healthcheck:
@@ -513,49 +436,19 @@ services:
timeout: 10s
retries: 3
# Notification Service - Enhanced with WhatsApp and Email
notification-service:
build:
context: .
dockerfile: ./services/notification/Dockerfile
args:
- ENVIRONMENT=${ENVIRONMENT}
- BUILD_DATE=${BUILD_DATE}
image: bakery/notification-service:${IMAGE_TAG}
container_name: bakery-notification-service
environment:
# Database
- DATABASE_URL=postgresql+asyncpg://notification_user:notification_pass123@notification-db:5432/notification_db
# Redis for queue management
- REDIS_URL=redis://:redis_pass123@redis:6379/6
# Message Queue
- RABBITMQ_URL=amqp://bakery:forecast123@rabbitmq:5672/
# Service Discovery
- AUTH_SERVICE_URL=http://auth-service:8000
- TENANT_SERVICE_URL=http://tenant-service:8000
# Email Configuration
- SMTP_HOST=${SMTP_HOST:-smtp.gmail.com}
- SMTP_PORT=${SMTP_PORT:-587}
- SMTP_USER=${SMTP_USER:-your-email@gmail.com}
- SMTP_PASSWORD=${SMTP_PASSWORD:-your-app-password}
- SMTP_FROM_NAME=Bakery Forecast
# WhatsApp Configuration (Twilio)
- WHATSAPP_ACCOUNT_SID=${WHATSAPP_ACCOUNT_SID:-your-twilio-sid}
- WHATSAPP_AUTH_TOKEN=${WHATSAPP_AUTH_TOKEN:-your-twilio-token}
- WHATSAPP_FROM_NUMBER=${WHATSAPP_FROM_NUMBER:-whatsapp:+14155238886}
# JWT Configuration
- JWT_SECRET_KEY=${JWT_SECRET_KEY:-your-super-secret-jwt-key-change-in-production}
- JWT_ALGORITHM=HS256
# Service Configuration
- SERVICE_NAME=notification-service
- SERVICE_VERSION=1.0.0
- LOG_LEVEL=INFO
restart: unless-stopped
env_file: .env
ports:
- "8006:8000"
- "${NOTIFICATION_SERVICE_PORT}:8000"
depends_on:
notification-db:
condition: service_healthy
@@ -566,8 +459,10 @@ services:
auth-service:
condition: service_healthy
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.106
volumes:
- log_storage:/app/logs
- ./services/notification:/app
- ./shared:/app/shared
healthcheck:
@@ -577,98 +472,119 @@ services:
retries: 3
# ================================================================
# MONITORING SERVICES
# MONITORING - SIMPLE APPROACH
# ================================================================
# Prometheus - Metrics collection
prometheus:
image: prom/prometheus:latest
image: prom/prometheus:v2.45.0
container_name: bakery-prometheus
restart: unless-stopped
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--storage.tsdb.retention.time=${PROMETHEUS_RETENTION:-200h}'
- '--web.enable-lifecycle'
ports:
- "9090:9090"
- "${PROMETHEUS_PORT}:9090"
volumes:
- ./infrastructure/monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- ./infrastructure/monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
networks:
- bakery-network
depends_on:
- gateway
- auth-service
- tenant-service
- training-service
- forecasting-service
- data-service
- notification-service
bakery-network:
ipv4_address: 172.20.0.200
profiles:
- monitoring
# Grafana - Metrics visualization
grafana:
image: grafana/grafana:latest
image: grafana/grafana:10.0.0
container_name: bakery-grafana
restart: unless-stopped
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin123
- GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}
- GF_USERS_ALLOW_SIGN_UP=false
- GF_DEFAULT_TIMEZONE=${TIMEZONE}
- GF_SERVER_ROOT_URL=${GRAFANA_ROOT_URL}
ports:
- "3002:3000"
- "${GRAFANA_PORT}:3000"
volumes:
- grafana_data:/var/lib/grafana
- ./infrastructure/monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./infrastructure/monitoring/grafana/datasources:/etc/grafana/provisioning/datasources
- ./infrastructure/monitoring/grafana:/etc/grafana/provisioning:ro
networks:
- bakery-network
bakery-network:
ipv4_address: 172.20.0.201
depends_on:
- prometheus
profiles:
- monitoring
# ================================================================
# FRONTEND SERVICES (Optional for development)
# FRONTEND - CLEAN CONFIG
# ================================================================
# React Dashboard
dashboard:
build:
context: ./frontend
dockerfile: Dockerfile.dev
container_name: bakery-frontend
environment:
- REACT_APP_API_URL=http://localhost:8000/api/v1
- REACT_APP_WS_URL=ws://localhost:8000/ws
dockerfile: Dockerfile.${ENVIRONMENT}
args:
- REACT_APP_API_URL=${FRONTEND_API_URL}
- REACT_APP_WS_URL=${FRONTEND_WS_URL}
- REACT_APP_ENVIRONMENT=${ENVIRONMENT}
image: bakery/dashboard:${IMAGE_TAG}
container_name: bakery-dashboard
restart: unless-stopped
ports:
- "3000:3000"
- "${DASHBOARD_PORT}:3000"
depends_on:
- gateway
gateway:
condition: service_healthy
networks:
bakery-network:
ipv4_address: 172.20.0.110
profiles:
- frontend
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/"]
interval: 30s
timeout: 10s
retries: 3
# ================================================================
# DEVELOPMENT TOOLS - OPTIONAL
# ================================================================
pgadmin:
image: dpage/pgadmin4:7.4
container_name: bakery-pgadmin
restart: unless-stopped
environment:
- PGADMIN_DEFAULT_EMAIL=${PGADMIN_EMAIL}
- PGADMIN_DEFAULT_PASSWORD=${PGADMIN_PASSWORD}
- PGADMIN_CONFIG_SERVER_MODE=False
ports:
- "${PGADMIN_PORT}:80"
volumes:
- ./infrastructure/pgadmin/servers.json:/pgadmin4/servers.json:ro
networks:
- bakery-network
volumes:
- ./frontend:/app
- /app/node_modules
command: npm start
profiles:
- development
- admin
# ================================================================
# ENVIRONMENT VARIABLES TEMPLATE
# ================================================================
# Create a .env file with these variables:
#
# # JWT Secret (CHANGE IN PRODUCTION!)
# JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production-min-32-chars
#
# # External API Keys
# AEMET_API_KEY=your-aemet-api-key-here
# MADRID_OPENDATA_API_KEY=your-madrid-opendata-key-here
#
# # Email Configuration (Gmail example)
# SMTP_HOST=smtp.gmail.com
# SMTP_PORT=587
# SMTP_USER=your-email@gmail.com
# SMTP_PASSWORD=your-app-specific-password
#
# # WhatsApp/Twilio Configuration
# WHATSAPP_ACCOUNT_SID=your-twilio-account-sid
# WHATSAPP_AUTH_TOKEN=your-twilio-auth-token
# WHATSAPP_FROM_NUMBER=whatsapp:+14155238886
redis-commander:
image: rediscommander/redis-commander:latest
container_name: bakery-redis-commander
restart: unless-stopped
environment:
- REDIS_HOSTS=local:redis:6379:0:${REDIS_PASSWORD}
- HTTP_USER=${REDIS_COMMANDER_USER}
- HTTP_PASSWORD=${REDIS_COMMANDER_PASSWORD}
ports:
- "${REDIS_COMMANDER_PORT}:8081"
networks:
- bakery-network
profiles:
- development
- admin
depends_on:
- redis

View File

@@ -1,11 +1,15 @@
# infrastructure/monitoring/grafana/dashboards/dashboard.yml
# Grafana dashboard provisioning
apiVersion: 1
providers:
- name: 'Bakery Forecasting'
- name: 'bakery-dashboards'
orgId: 1
folder: ''
folder: 'Bakery Forecasting'
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: true
options:
path: /etc/grafana/provisioning/dashboards

View File

@@ -1,3 +1,6 @@
# infrastructure/monitoring/grafana/datasources/prometheus.yml
# Grafana Prometheus datasource configuration
apiVersion: 1
datasources:
@@ -6,4 +9,20 @@ datasources:
access: proxy
url: http://prometheus:9090
isDefault: true
version: 1
editable: true
jsonData:
timeInterval: "15s"
queryTimeout: "60s"
httpMethod: "POST"
exemplarTraceIdDestinations:
- name: trace_id
datasourceUid: jaeger
- name: Jaeger
type: jaeger
access: proxy
url: http://jaeger:16686
uid: jaeger
version: 1
editable: true

View File

@@ -1,17 +1,30 @@
---
# infrastructure/monitoring/prometheus/prometheus.yml
# Prometheus configuration
global:
scrape_interval: 15s
evaluation_interval: 15s
external_labels:
cluster: 'bakery-forecasting'
replica: 'prometheus-01'
rule_files:
- "alerts.yml"
- "/etc/prometheus/rules/*.yml"
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
scrape_configs:
# Service discovery for microservices
- job_name: 'gateway'
static_configs:
- targets: ['gateway:8000']
metrics_path: '/metrics'
scrape_interval: 30s
scrape_timeout: 10s
- job_name: 'auth-service'
static_configs:
@@ -49,11 +62,21 @@ scrape_configs:
metrics_path: '/metrics'
scrape_interval: 30s
# Infrastructure monitoring
- job_name: 'redis'
static_configs:
- targets: ['redis:6379']
metrics_path: '/metrics'
scrape_interval: 30s
- job_name: 'rabbitmq'
static_configs:
- targets: ['rabbitmq:15692']
metrics_path: '/metrics'
scrape_interval: 30s
# Database monitoring (requires postgres_exporter)
- job_name: 'postgres'
static_configs:
- targets: ['postgres-exporter:9187']
scrape_interval: 30s

View File

@@ -0,0 +1,86 @@
# infrastructure/monitoring/prometheus/rules/alerts.yml
# Prometheus alerting rules
groups:
- name: bakery_services
rules:
# Service availability alerts
- alert: ServiceDown
expr: up == 0
for: 2m
labels:
severity: critical
annotations:
summary: "Service {{ $labels.job }} is down"
description: "Service {{ $labels.job }} has been down for more than 2 minutes."
# High error rate alerts
- alert: HighErrorRate
expr: rate(http_requests_total{status=~"5.."}[5m]) > 0.1
for: 5m
labels:
severity: warning
annotations:
summary: "High error rate on {{ $labels.job }}"
description: "Error rate is {{ $value }} errors per second on {{ $labels.job }}."
# High response time alerts
- alert: HighResponseTime
expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m])) > 1
for: 5m
labels:
severity: warning
annotations:
summary: "High response time on {{ $labels.job }}"
description: "95th percentile response time is {{ $value }}s on {{ $labels.job }}."
# Memory usage alerts
- alert: HighMemoryUsage
expr: process_resident_memory_bytes / 1024 / 1024 > 500
for: 5m
labels:
severity: warning
annotations:
summary: "High memory usage on {{ $labels.job }}"
description: "Memory usage is {{ $value }}MB on {{ $labels.job }}."
# Database connection alerts
- alert: DatabaseConnectionHigh
expr: pg_stat_activity_count > 80
for: 5m
labels:
severity: warning
annotations:
summary: "High database connections"
description: "Database has {{ $value }} active connections."
- name: bakery_business
rules:
# Training job alerts
- alert: TrainingJobFailed
expr: increase(training_jobs_failed_total[1h]) > 0
labels:
severity: warning
annotations:
summary: "Training job failed"
description: "{{ $value }} training jobs have failed in the last hour."
# Prediction accuracy alerts
- alert: LowPredictionAccuracy
expr: prediction_accuracy < 0.7
for: 15m
labels:
severity: warning
annotations:
summary: "Low prediction accuracy"
description: "Prediction accuracy is {{ $value }} for tenant {{ $labels.tenant_id }}."
# API rate limit alerts
- alert: APIRateLimitHit
expr: increase(rate_limit_hits_total[5m]) > 10
for: 5m
labels:
severity: warning
annotations:
summary: "API rate limit hit frequently"
description: "Rate limit has been hit {{ $value }} times in 5 minutes."

View File

@@ -0,0 +1,6 @@
auth-db:5432:auth_db:auth_user:auth_pass123
training-db:5432:training_db:training_user:training_pass123
forecasting-db:5432:forecasting_db:forecasting_user:forecasting_pass123
data-db:5432:data_db:data_user:data_pass123
tenant-db:5432:tenant_db:tenant_user:tenant_pass123
notification-db:5432:notification_db:notification_user:notification_pass123

View File

@@ -0,0 +1,64 @@
{
"Servers": {
"1": {
"Name": "Auth Database",
"Group": "Bakery Services",
"Host": "auth-db",
"Port": 5432,
"MaintenanceDB": "auth_db",
"Username": "auth_user",
"PassFile": "/pgadmin4/pgpass",
"SSLMode": "prefer"
},
"2": {
"Name": "Training Database",
"Group": "Bakery Services",
"Host": "training-db",
"Port": 5432,
"MaintenanceDB": "training_db",
"Username": "training_user",
"PassFile": "/pgadmin4/pgpass",
"SSLMode": "prefer"
},
"3": {
"Name": "Forecasting Database",
"Group": "Bakery Services",
"Host": "forecasting-db",
"Port": 5432,
"MaintenanceDB": "forecasting_db",
"Username": "forecasting_user",
"PassFile": "/pgadmin4/pgpass",
"SSLMode": "prefer"
},
"4": {
"Name": "Data Database",
"Group": "Bakery Services",
"Host": "data-db",
"Port": 5432,
"MaintenanceDB": "data_db",
"Username": "data_user",
"PassFile": "/pgadmin4/pgpass",
"SSLMode": "prefer"
},
"5": {
"Name": "Tenant Database",
"Group": "Bakery Services",
"Host": "tenant-db",
"Port": 5432,
"MaintenanceDB": "tenant_db",
"Username": "tenant_user",
"PassFile": "/pgadmin4/pgpass",
"SSLMode": "prefer"
},
"6": {
"Name": "Notification Database",
"Group": "Bakery Services",
"Host": "notification-db",
"Port": 5432,
"MaintenanceDB": "notification_db",
"Username": "notification_user",
"PassFile": "/pgadmin4/pgpass",
"SSLMode": "prefer"
}
}
}

View File

@@ -0,0 +1,26 @@
-- Create extensions for all databases
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pg_stat_statements";
CREATE EXTENSION IF NOT EXISTS "pg_trgm";
-- Create Spanish collation for proper text sorting
-- This will be used for bakery names, product names, etc.
-- CREATE COLLATION IF NOT EXISTS spanish (provider = icu, locale = 'es-ES');
-- Set timezone to Madrid
SET timezone = 'Europe/Madrid';
-- Performance tuning for small to medium databases
ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_statements';
ALTER SYSTEM SET max_connections = 100;
ALTER SYSTEM SET shared_buffers = '256MB';
ALTER SYSTEM SET effective_cache_size = '1GB';
ALTER SYSTEM SET maintenance_work_mem = '64MB';
ALTER SYSTEM SET checkpoint_completion_target = 0.9;
ALTER SYSTEM SET wal_buffers = '16MB';
ALTER SYSTEM SET default_statistics_target = 100;
ALTER SYSTEM SET random_page_cost = 1.1;
ALTER SYSTEM SET effective_io_concurrency = 200;
-- Reload configuration
SELECT pg_reload_conf();

View File

@@ -0,0 +1,94 @@
{
"rabbit_version": "3.12.0",
"rabbitmq_version": "3.12.0",
"product_name": "RabbitMQ",
"product_version": "3.12.0",
"users": [
{
"name": "bakery",
"password_hash": "hash_of_forecast123",
"hashing_algorithm": "rabbit_password_hashing_sha256",
"tags": ["administrator"]
}
],
"vhosts": [
{
"name": "/"
}
],
"permissions": [
{
"user": "bakery",
"vhost": "/",
"configure": ".*",
"write": ".*",
"read": ".*"
}
],
"exchanges": [
{
"name": "bakery_events",
"vhost": "/",
"type": "topic",
"durable": true,
"auto_delete": false,
"internal": false,
"arguments": {}
}
],
"queues": [
{
"name": "training_events",
"vhost": "/",
"durable": true,
"auto_delete": false,
"arguments": {
"x-message-ttl": 86400000
}
},
{
"name": "forecasting_events",
"vhost": "/",
"durable": true,
"auto_delete": false,
"arguments": {
"x-message-ttl": 86400000
}
},
{
"name": "notification_events",
"vhost": "/",
"durable": true,
"auto_delete": false,
"arguments": {
"x-message-ttl": 86400000
}
}
],
"bindings": [
{
"source": "bakery_events",
"vhost": "/",
"destination": "training_events",
"destination_type": "queue",
"routing_key": "training.*",
"arguments": {}
},
{
"source": "bakery_events",
"vhost": "/",
"destination": "forecasting_events",
"destination_type": "queue",
"routing_key": "forecasting.*",
"arguments": {}
},
{
"source": "bakery_events",
"vhost": "/",
"destination": "notification_events",
"destination_type": "queue",
"routing_key": "notification.*",
"arguments": {}
}
]
}

View File

@@ -0,0 +1,26 @@
# infrastructure/rabbitmq/rabbitmq.conf
# RabbitMQ configuration file
# Network settings
listeners.tcp.default = 5672
management.tcp.port = 15672
# Memory and disk thresholds
vm_memory_high_watermark.relative = 0.6
disk_free_limit.relative = 2.0
# Default user (will be overridden by environment variables)
default_user = bakery
default_pass = forecast123
default_vhost = /
# Management plugin
management.load_definitions = /etc/rabbitmq/definitions.json
# Logging
log.console = true
log.console.level = info
log.file = false
# Queue settings
queue_master_locator = min-masters

View File

@@ -0,0 +1,51 @@
# infrastructure/redis/redis.conf
# Redis configuration file
# Network settings
bind 0.0.0.0
port 6379
timeout 300
tcp-keepalive 300
# General settings
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
# Persistence settings
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
# Append only file settings
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
# Memory management
maxmemory 512mb
maxmemory-policy allkeys-lru
maxmemory-samples 5
# Security
requirepass redis_pass123
# Slow log
slowlog-log-slower-than 10000
slowlog-max-len 128
# Client output buffer limits
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60

14
scripts/docker-logs.sh Executable file
View File

@@ -0,0 +1,14 @@
# scripts/docker-logs.sh
#!/bin/bash
# View logs for specific service or all services
SERVICE=${1:-"all"}
if [ "$SERVICE" = "all" ]; then
echo "📋 Showing logs for all services..."
docker-compose logs -f --tail=100
else
echo "📋 Showing logs for $SERVICE..."
docker-compose logs -f --tail=100 $SERVICE
fi

214
scripts/docker-setup.sh Executable file
View File

@@ -0,0 +1,214 @@
# ================================================================
# FIXED SETUP SCRIPT
# scripts/docker-setup.sh
# ================================================================
#!/bin/bash
# Fixed setup script with proper error handling
set -e
ENVIRONMENT=${1:-development}
PROFILES=${2:-"development,frontend"}
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
# Logging functions
print_step() {
echo -e "${GREEN}[STEP]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_step "Setting up Bakery Forecasting Platform"
echo "Environment: $ENVIRONMENT"
echo "Profiles: $PROFILES"
# Check if .env file exists
if [ ! -f ".env" ]; then
print_error ".env file not found!"
echo "Please create .env file with the content from the artifact."
echo "Run: cp .env.example .env"
exit 1
fi
# Validate critical environment variables
print_step "Validating environment variables..."
# Source the .env file to check variables
set -a # automatically export all variables
source .env
set +a
# Check critical variables
critical_vars=(
"IMAGE_TAG"
"AUTH_DB_NAME"
"AUTH_DB_USER"
"AUTH_DB_PASSWORD"
"REDIS_PASSWORD"
"RABBITMQ_USER"
"RABBITMQ_PASSWORD"
"GATEWAY_PORT"
"AUTH_SERVICE_PORT"
)
missing_vars=()
for var in "${critical_vars[@]}"; do
if [ -z "${!var}" ]; then
missing_vars+=("$var")
fi
done
if [ ${#missing_vars[@]} -gt 0 ]; then
print_error "Missing required environment variables:"
printf '%s\n' "${missing_vars[@]}"
exit 1
fi
print_step "Environment variables validated successfully"
# Create necessary directories
print_step "Creating necessary directories..."
mkdir -p infrastructure/{redis,rabbitmq,postgres/init-scripts,monitoring/{prometheus/rules,grafana/{dashboards,datasources}},pgadmin}
mkdir -p backups logs models templates/{email,whatsapp}
mkdir -p shared/{config,auth,database,messaging,monitoring,utils}
# Create basic monitoring configs if they don't exist
if [ ! -f "infrastructure/monitoring/prometheus/prometheus.yml" ]; then
print_step "Creating basic Prometheus configuration..."
cat > infrastructure/monitoring/prometheus/prometheus.yml << 'EOF'
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'gateway'
static_configs:
- targets: ['gateway:8000']
- job_name: 'auth-service'
static_configs:
- targets: ['auth-service:8000']
- job_name: 'training-service'
static_configs:
- targets: ['training-service:8000']
- job_name: 'forecasting-service'
static_configs:
- targets: ['forecasting-service:8000']
- job_name: 'data-service'
static_configs:
- targets: ['data-service:8000']
- job_name: 'tenant-service'
static_configs:
- targets: ['tenant-service:8000']
- job_name: 'notification-service'
static_configs:
- targets: ['notification-service:8000']
EOF
fi
# Set proper permissions
chmod 644 infrastructure/monitoring/prometheus/prometheus.yml 2>/dev/null || true
# Stop any existing containers
print_step "Stopping existing containers..."
docker-compose down --remove-orphans 2>/dev/null || true
# Build and start services based on environment
case $ENVIRONMENT in
"development")
print_step "Starting development environment..."
IFS=',' read -ra PROFILE_ARRAY <<< "$PROFILES"
PROFILE_ARGS=""
for profile in "${PROFILE_ARRAY[@]}"; do
PROFILE_ARGS="$PROFILE_ARGS --profile $profile"
done
# Build first to catch any build errors
print_step "Building services..."
docker-compose $PROFILE_ARGS build
# Then start
print_step "Starting services..."
docker-compose $PROFILE_ARGS up -d
;;
"production")
print_step "Starting production environment..."
docker-compose -f docker-compose.yml -f docker-compose.prod.yml --profile production --profile monitoring up -d --build
;;
"testing")
print_step "Starting testing environment..."
docker-compose -f docker-compose.yml -f docker-compose.test.yml up -d --build
;;
*)
print_step "Starting with custom profiles: $PROFILES"
IFS=',' read -ra PROFILE_ARRAY <<< "$PROFILES"
PROFILE_ARGS=""
for profile in "${PROFILE_ARRAY[@]}"; do
PROFILE_ARGS="$PROFILE_ARGS --profile $profile"
done
docker-compose $PROFILE_ARGS build
docker-compose $PROFILE_ARGS up -d
;;
esac
# Wait a moment for services to start
print_step "Waiting for services to start..."
sleep 10
# Check service status
print_step "Checking service status..."
if command -v curl &> /dev/null; then
# Check if gateway is responding
if curl -f -s "http://localhost:${GATEWAY_PORT}/health" > /dev/null 2>&1; then
echo "✅ Gateway is responding"
else
echo "⚠️ Gateway is not yet responding (this is normal during first startup)"
fi
else
echo "⚠️ curl not found - skipping health check"
fi
print_step "Setup completed!"
echo ""
echo "================================================================"
echo -e "${GREEN}SERVICES AVAILABLE${NC}"
echo "================================================================"
echo "- Gateway: http://localhost:${GATEWAY_PORT}"
echo "- API Docs: http://localhost:${GATEWAY_PORT}/docs"
echo "- Dashboard: http://localhost:${DASHBOARD_PORT} (if frontend profile enabled)"
echo "- Grafana: http://localhost:${GRAFANA_PORT} (${GRAFANA_ADMIN_USER}/${GRAFANA_ADMIN_PASSWORD})"
echo "- pgAdmin: http://localhost:${PGADMIN_PORT} (${PGADMIN_EMAIL}/${PGADMIN_PASSWORD})"
echo "- RabbitMQ: http://localhost:${RABBITMQ_MANAGEMENT_PORT} (${RABBITMQ_USER}/${RABBITMQ_PASSWORD})"
echo "- Redis Commander: http://localhost:${REDIS_COMMANDER_PORT} (${REDIS_COMMANDER_USER}/${REDIS_COMMANDER_PASSWORD})"
echo ""
echo "================================================================"
echo -e "${GREEN}NEXT STEPS${NC}"
echo "================================================================"
echo "1. Check service health:"
echo " ./scripts/docker-health-check.sh"
echo ""
echo "2. View logs:"
echo " docker-compose logs -f"
echo ""
echo "3. Check specific service:"
echo " docker-compose logs -f auth-service"
echo ""
echo "If you see any errors, check the logs for more details."

90
scripts/health-check.sh Executable file
View File

@@ -0,0 +1,90 @@
# scripts/docker-health-check.sh
#!/bin/bash
# Comprehensive health check for all services
services=(
"bakery-redis:6379"
"bakery-rabbitmq:15672"
"bakery-gateway:8000"
"bakery-auth-service:8000"
"bakery-tenant-service:8000"
"bakery-training-service:8000"
"bakery-forecasting-service:8000"
"bakery-data-service:8000"
"bakery-notification-service:8000"
)
echo "🏥 Checking service health..."
for service_port in "${services[@]}"; do
service=$(echo $service_port | cut -d: -f1)
port=$(echo $service_port | cut -d: -f2)
if docker ps --format "table {{.Names}}" | grep -q "^$service$"; then
if [ "$service" = "bakery-redis" ]; then
# Redis health check
if docker exec $service redis-cli -a redis_pass123 ping > /dev/null 2>&1; then
echo "$service is healthy"
else
echo "$service is unhealthy"
fi
elif [ "$service" = "bakery-rabbitmq" ]; then
# RabbitMQ health check
if curl -s -u bakery:forecast123 http://localhost:$port/api/health/checks/alarms > /dev/null; then
echo "$service is healthy"
else
echo "$service is unhealthy"
fi
else
# HTTP service health check
container_ip=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $service)
if curl -f -s "http://$container_ip:8000/health" > /dev/null; then
echo "$service is healthy"
else
echo "$service is unhealthy"
fi
fi
else
echo "⚠️ $service is not running"
fi
done
echo ""
echo "🔍 Checking database connections..."
databases=("auth-db" "training-db" "forecasting-db" "data-db" "tenant-db" "notification-db")
for db in "${databases[@]}"; do
if docker ps --format "table {{.Names}}" | grep -q "^bakery-$db$"; then
db_name=$(echo $db | sed 's/-/_/g')
user=$(echo $db | sed 's/-db//' | sed 's/-/_/g')_user
if docker exec bakery-$db pg_isready -U $user -d $db_name > /dev/null 2>&1; then
echo "✅ bakery-$db is ready"
else
echo "❌ bakery-$db is not ready"
fi
else
echo "⚠️ bakery-$db is not running"
fi
done
echo ""
echo "📊 Service resource usage:"
docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" $(docker ps --format "{{.Names}}" | grep "^bakery-")
# scripts/docker-logs.sh
#!/bin/bash
# View logs for specific service or all services
SERVICE=${1:-"all"}
if [ "$SERVICE" = "all" ]; then
echo "📋 Showing logs for all services..."
docker-compose logs -f --tail=100
else
echo "📋 Showing logs for $SERVICE..."
docker-compose logs -f --tail=100 $SERVICE
fi

106
scripts/restart-service.sh Executable file
View File

@@ -0,0 +1,106 @@
# ================================================================
# SERVICE RESTART SCRIPT
# scripts/restart-service.sh
# ================================================================
#!/bin/bash
# Restart individual service script
set -e
GREEN='\033[0;32m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'
print_step() {
echo -e "${BLUE}[RESTART]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
restart_service() {
local service=$1
print_step "Restarting $service..."
# Build the service
docker-compose build $service
# Restart with zero downtime
docker-compose up -d --no-deps --force-recreate $service
# Wait a bit for the service to start
sleep 5
# Check health
local port
case $service in
"gateway") port=8000 ;;
"auth-service") port=8001 ;;
"training-service") port=8002 ;;
"forecasting-service") port=8003 ;;
"data-service") port=8004 ;;
"tenant-service") port=8005 ;;
"notification-service") port=8006 ;;
*)
print_error "Unknown service: $service"
exit 1
;;
esac
# Health check with timeout
local attempts=0
local max_attempts=12 # 60 seconds total
while [ $attempts -lt $max_attempts ]; do
if curl -f -s "http://localhost:$port/health" > /dev/null 2>&1; then
print_success "$service is healthy and ready"
return 0
fi
attempts=$((attempts + 1))
echo "Waiting for $service to be healthy... ($attempts/$max_attempts)"
sleep 5
done
print_error "$service failed to become healthy within 60 seconds"
return 1
}
# Main function
main() {
if [ $# -eq 0 ]; then
echo "Usage: $0 <service-name>"
echo ""
echo "Available services:"
echo " gateway"
echo " auth-service"
echo " training-service"
echo " forecasting-service"
echo " data-service"
echo " tenant-service"
echo " notification-service"
echo ""
echo "Example: $0 auth-service"
exit 1
fi
local service=$1
echo "================================================================"
echo "RESTARTING SERVICE: $service"
echo "================================================================"
restart_service $service
}
# Run main function
main "$@"

File diff suppressed because it is too large Load Diff

297
scripts/validate-config.sh Executable file
View File

@@ -0,0 +1,297 @@
# ================================================================
# CONFIGURATION VALIDATION SCRIPT
# scripts/validate-config.sh
# ================================================================
#!/bin/bash
# Configuration validation script
set -e
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'
print_header() {
echo ""
echo "================================================================"
echo -e "${GREEN}$1${NC}"
echo "================================================================"
}
print_success() {
echo -e "${GREEN}[✓]${NC} $1"
}
print_error() {
echo -e "${RED}[✗]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[!]${NC} $1"
}
validate_env_file() {
print_header "VALIDATING ENVIRONMENT CONFIGURATION"
if [ ! -f ".env" ]; then
print_error ".env file not found"
exit 1
fi
# Load environment variables
source .env
# Critical settings validation
critical_vars=(
"JWT_SECRET_KEY"
"AUTH_DATABASE_URL"
"TRAINING_DATABASE_URL"
"FORECASTING_DATABASE_URL"
"DATA_DATABASE_URL"
"TENANT_DATABASE_URL"
"NOTIFICATION_DATABASE_URL"
"REDIS_URL"
"RABBITMQ_URL"
)
all_good=true
for var in "${critical_vars[@]}"; do
if [ -z "${!var}" ]; then
print_error "$var is not set"
all_good=false
elif [[ "${!var}" == *"change"* ]] || [[ "${!var}" == *"default"* ]]; then
print_warning "$var appears to use default/placeholder value"
else
print_success "$var is configured"
fi
done
# Check JWT secret strength
if [ ${#JWT_SECRET_KEY} -lt 32 ]; then
print_error "JWT_SECRET_KEY must be at least 32 characters long"
all_good=false
fi
# Check environment
if [ "$ENVIRONMENT" = "production" ]; then
production_vars=("AEMET_API_KEY" "MADRID_OPENDATA_API_KEY" "SMTP_USER" "SMTP_PASSWORD")
for var in "${production_vars[@]}"; do
if [ -z "${!var}" ]; then
print_warning "$var should be configured for production"
fi
done
fi
if $all_good; then
print_success "Environment configuration is valid"
else
print_error "Environment configuration has issues"
exit 1
fi
}
validate_service_configs() {
print_header "VALIDATING SERVICE CONFIGURATIONS"
services=("auth" "training" "forecasting" "data" "tenant" "notification")
for service in "${services[@]}"; do
config_file="services/$service/app/core/config.py"
if [ -f "$config_file" ]; then
print_success "$service configuration exists"
# Check if configuration follows the standard
if grep -q "BaseServiceSettings" "$config_file"; then
print_success "$service uses BaseServiceSettings"
else
print_warning "$service doesn't inherit from BaseServiceSettings"
fi
if grep -q "DATABASE_URL" "$config_file"; then
print_success "$service has database configuration"
else
print_warning "$service missing database configuration"
fi
else
print_error "$service configuration missing"
fi
done
# Check gateway configuration
if [ -f "gateway/app/core/config.py" ]; then
print_success "Gateway configuration exists"
else
print_error "Gateway configuration missing"
fi
}
validate_shared_config() {
print_header "VALIDATING SHARED CONFIGURATION"
if [ -f "shared/config/base.py" ]; then
print_success "Base configuration exists"
if grep -q "BaseServiceSettings" "shared/config/base.py"; then
print_success "BaseServiceSettings class found"
else
print_error "BaseServiceSettings class missing"
fi
else
print_error "Base configuration missing"
fi
shared_modules=("auth" "database" "messaging" "monitoring" "utils")
for module in "${shared_modules[@]}"; do
if [ -d "shared/$module" ]; then
print_success "Shared $module module exists"
else
print_warning "Shared $module module missing"
fi
done
}
validate_docker_config() {
print_header "VALIDATING DOCKER CONFIGURATION"
if [ -f "docker-compose.yml" ]; then
print_success "Docker Compose configuration exists"
# Check if all services are defined
services=("gateway" "auth-service" "training-service" "forecasting-service" "data-service" "tenant-service" "notification-service")
for service in "${services[@]}"; do
if grep -q "$service:" docker-compose.yml; then
print_success "$service defined in docker-compose.yml"
else
print_error "$service missing from docker-compose.yml"
fi
done
# Check if all databases are defined
databases=("auth-db" "training-db" "forecasting-db" "data-db" "tenant-db" "notification-db")
for db in "${databases[@]}"; do
if grep -q "$db:" docker-compose.yml; then
print_success "$db defined in docker-compose.yml"
else
print_error "$db missing from docker-compose.yml"
fi
done
# Check infrastructure services
infra=("redis" "rabbitmq" "prometheus" "grafana")
for service in "${infra[@]}"; do
if grep -q "$service:" docker-compose.yml; then
print_success "$service defined in docker-compose.yml"
else
print_warning "$service missing from docker-compose.yml"
fi
done
else
print_error "Docker Compose configuration missing"
fi
# Check Dockerfiles
services=("gateway" "auth" "training" "forecasting" "data" "tenant" "notification")
for service in "${services[@]}"; do
if [ "$service" = "gateway" ]; then
dockerfile="gateway/Dockerfile"
else
dockerfile="services/$service/Dockerfile"
fi
if [ -f "$dockerfile" ]; then
print_success "$service Dockerfile exists"
else
print_warning "$service Dockerfile missing"
fi
done
}
validate_directory_structure() {
print_header "VALIDATING DIRECTORY STRUCTURE"
required_dirs=(
"shared/config"
"shared/auth"
"shared/database"
"shared/messaging"
"gateway/app/core"
"services/auth/app/core"
"services/training/app/core"
"services/forecasting/app/core"
"services/data/app/core"
"services/tenant/app/core"
"services/notification/app/core"
"scripts"
"logs"
"models"
"templates"
)
missing_dirs=()
for dir in "${required_dirs[@]}"; do
if [ -d "$dir" ]; then
print_success "$dir exists"
else
print_warning "$dir missing"
missing_dirs+=("$dir")
fi
done
if [ ${#missing_dirs[@]} -gt 0 ]; then
print_warning "Creating missing directories..."
for dir in "${missing_dirs[@]}"; do
mkdir -p "$dir"
print_success "Created $dir"
done
fi
}
validate_scripts() {
print_header "VALIDATING UTILITY SCRIPTS"
scripts=("setup.sh" "test.sh" "deploy.sh" "health-check.sh" "validate-config.sh")
for script in "${scripts[@]}"; do
script_path="scripts/$script"
if [ -f "$script_path" ]; then
print_success "$script exists"
if [ -x "$script_path" ]; then
print_success "$script is executable"
else
print_warning "$script is not executable - fixing..."
chmod +x "$script_path"
fi
else
print_warning "$script missing"
fi
done
}
# Main validation function
main() {
print_header "CONFIGURATION VALIDATION"
validate_directory_structure
validate_shared_config
validate_service_configs
validate_env_file
validate_docker_config
validate_scripts
print_header "VALIDATION COMPLETE"
echo "If all validations passed, you're ready to start the services!"
echo ""
echo "Next steps:"
echo "1. docker-compose up -d"
echo "2. ./scripts/health-check.sh"
}
# Run validation
main "$@"

View File

@@ -0,0 +1,22 @@
"""
Authentication configuration for forecasting service
"""
from shared.auth.jwt_handler import JWTHandler
from shared.auth.decorators import require_auth, require_role
from app.core.config import settings
# Initialize JWT handler
jwt_handler = JWTHandler(
secret_key=settings.JWT_SECRET_KEY,
algorithm=settings.JWT_ALGORITHM,
access_token_expire_minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES
)
# Export commonly used functions
verify_token = jwt_handler.verify_token
create_access_token = jwt_handler.create_access_token
get_current_user = jwt_handler.get_current_user
# Export decorators
__all__ = ['verify_token', 'create_access_token', 'get_current_user', 'require_auth', 'require_role']

View File

@@ -0,0 +1,22 @@
"""
Authentication configuration for notification service
"""
from shared.auth.jwt_handler import JWTHandler
from shared.auth.decorators import require_auth, require_role
from app.core.config import settings
# Initialize JWT handler
jwt_handler = JWTHandler(
secret_key=settings.JWT_SECRET_KEY,
algorithm=settings.JWT_ALGORITHM,
access_token_expire_minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES
)
# Export commonly used functions
verify_token = jwt_handler.verify_token
create_access_token = jwt_handler.create_access_token
get_current_user = jwt_handler.get_current_user
# Export decorators
__all__ = ['verify_token', 'create_access_token', 'get_current_user', 'require_auth', 'require_role']

View File

@@ -0,0 +1,22 @@
"""
Authentication configuration for tenant service
"""
from shared.auth.jwt_handler import JWTHandler
from shared.auth.decorators import require_auth, require_role
from app.core.config import settings
# Initialize JWT handler
jwt_handler = JWTHandler(
secret_key=settings.JWT_SECRET_KEY,
algorithm=settings.JWT_ALGORITHM,
access_token_expire_minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES
)
# Export commonly used functions
verify_token = jwt_handler.verify_token
create_access_token = jwt_handler.create_access_token
get_current_user = jwt_handler.get_current_user
# Export decorators
__all__ = ['verify_token', 'create_access_token', 'get_current_user', 'require_auth', 'require_role']

View File

@@ -108,10 +108,6 @@ app.add_middleware(
allow_headers=["*"],
)
app.add_middleware(
TrustedHostMiddleware,
allowed_hosts=settings.ALLOWED_HOSTS
)
# Request middleware for logging and metrics
@app.middleware("http")

View File

View File

@@ -284,14 +284,6 @@ class BaseServiceSettings(BaseSettings):
raise ValueError("JWT_SECRET_KEY must be at least 32 characters long")
return v
@validator('DATABASE_URL')
def validate_database_url(cls, v):
if not v:
raise ValueError("DATABASE_URL is required")
if not v.startswith(('postgresql://', 'postgresql+asyncpg://')):
raise ValueError("DATABASE_URL must be a PostgreSQL URL")
return v
@validator('LOG_LEVEL')
def validate_log_level(cls, v):
valid_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']