# ============================================================================= # Bakery IA - Tiltfile for Secure Local Development # ============================================================================= # Features: # - TLS encryption for PostgreSQL and Redis # - Strong 32-character passwords with PersistentVolumeClaims # - PostgreSQL pgcrypto extension and audit logging # - Organized resource dependencies and live-reload capabilities # - Local registry for faster image builds and deployments # # Build Optimization: # - Services only rebuild when their specific code changes (not all services) # - Shared folder changes trigger rebuild of ALL services (as they all depend on it) # - Uses 'only' parameter to watch only relevant files per service # - Frontend only rebuilds when frontend/ code changes # - Gateway only rebuilds when gateway/ or shared/ code changes # ============================================================================= # ============================================================================= # PREPULL BASE IMAGES STEP - CRITICAL FIRST STEP # ============================================================================= # Run the prepull script first - if this fails, don't continue local_resource( 'prepull-base-images', cmd='''#!/usr/bin/env bash echo "==========================================" echo "PREPULLING BASE IMAGES - CRITICAL STEP" echo "==========================================" echo "" # Run the prepull script if ./scripts/prepull-base-images.sh; then echo "" echo "✓ Base images prepull completed successfully" echo "==========================================" echo "CONTINUING WITH TILT SETUP..." echo "==========================================" exit 0 else echo "" echo "❌ Base images prepull FAILED - stopping Tilt execution" echo "This usually happens due to Docker Hub rate limits" echo "Please try again later or configure Docker Hub credentials" echo "==========================================" # Exit with error code to prevent further execution exit 1 fi ''', labels=['00-prepull'], auto_init=True, allow_parallel=False ) # ============================================================================= # TILT CONFIGURATION # ============================================================================= # Update settings update_settings( max_parallel_updates=2, # Reduce parallel updates to avoid resource exhaustion k8s_upsert_timeout_secs=120 # Increase timeout for slower local builds ) # Ensure we're running in the correct context allow_k8s_contexts('kind-bakery-ia-local') # ============================================================================= # DISK SPACE MANAGEMENT & CLEANUP CONFIGURATION # ============================================================================= # Disk space management settings disk_cleanup_enabled = True # Default to True, can be disabled with TILT_DISABLE_CLEANUP=true if 'TILT_DISABLE_CLEANUP' in os.environ: disk_cleanup_enabled = os.environ['TILT_DISABLE_CLEANUP'].lower() != 'true' disk_space_threshold_gb = '10' if 'TILT_DISK_THRESHOLD_GB' in os.environ: disk_space_threshold_gb = os.environ['TILT_DISK_THRESHOLD_GB'] disk_cleanup_frequency_minutes = '30' if 'TILT_CLEANUP_FREQUENCY' in os.environ: disk_cleanup_frequency_minutes = os.environ['TILT_CLEANUP_FREQUENCY'] print(""" DISK SPACE MANAGEMENT CONFIGURATION ====================================== Cleanup Enabled: {} Free Space Threshold: {}GB Cleanup Frequency: Every {} minutes To disable cleanup: export TILT_DISABLE_CLEANUP=true To change threshold: export TILT_DISK_THRESHOLD_GB=20 To change frequency: export TILT_CLEANUP_FREQUENCY=60 """.format( 'YES' if disk_cleanup_enabled else 'NO (TILT_DISABLE_CLEANUP=true)', disk_space_threshold_gb, disk_cleanup_frequency_minutes )) # Automatic cleanup scheduler (informational only - actual scheduling done externally) if disk_cleanup_enabled: local_resource( 'automatic-disk-cleanup-info', cmd=''' echo "Automatic disk cleanup is ENABLED" echo "Settings:" echo " - Threshold: ''' + disk_space_threshold_gb + ''' GB free space" echo " - Frequency: Every ''' + disk_cleanup_frequency_minutes + ''' minutes" echo "" echo "Note: Actual cleanup runs via external scheduling (cron job or similar)" echo "To run cleanup now: tilt trigger manual-disk-cleanup" ''', labels=['99-cleanup'], auto_init=True, allow_parallel=False ) # Manual cleanup trigger (can be run on demand) local_resource( 'manual-disk-cleanup', cmd=''' echo "Starting manual disk cleanup..." python3 scripts/cleanup_disk_space.py --manual --verbose ''', labels=['99-cleanup'], auto_init=False, allow_parallel=False ) # Disk space monitoring resource local_resource( 'disk-space-monitor', cmd=''' echo "DISK SPACE MONITORING" echo "======================================" # Get disk usage df -h / | grep -v Filesystem | awk '{{print "Total: " $2 " | Used: " $3 " | Free: " $4 " | Usage: " $5}}' # Get Docker disk usage echo "" echo "DOCKER DISK USAGE:" docker system df # Get Kubernetes disk usage (if available) echo "" echo "KUBERNETES DISK USAGE:" kubectl get pvc -n bakery-ia --no-headers 2>/dev/null | awk '{{print "PVC: " $1 " | Status: " $2 " | Capacity: " $3 " | Used: " $4}}' || echo " Kubernetes PVCs not available" echo "" echo "Cleanup Status:" if [ "{disk_cleanup_enabled}" = "True" ]; then echo " Automatic cleanup: ENABLED (every {disk_cleanup_frequency_minutes} minutes)" echo " Threshold: {disk_space_threshold_gb}GB free space" else echo " Automatic cleanup: DISABLED" echo " To enable: unset TILT_DISABLE_CLEANUP or set TILT_DISABLE_CLEANUP=false" fi echo "" echo "Manual cleanup commands:" echo " tilt trigger manual-disk-cleanup # Run cleanup now" echo " docker system prune -a # Manual Docker cleanup" echo " kubectl delete jobs --all # Clean up completed jobs" ''', labels=['99-cleanup'], auto_init=False, allow_parallel=False ) # ============================================================================= # DOCKER REGISTRY CONFIGURATION # ============================================================================= # Docker registry configuration # Set USE_DOCKERHUB=true environment variable to push images to Docker Hub # Otherwise, uses local registry for faster builds and deployments use_dockerhub = False # Default to False if 'USE_DOCKERHUB' in os.environ: use_dockerhub = os.environ['USE_DOCKERHUB'].lower() == 'true' dockerhub_username = 'uals' # Default username if 'DOCKERHUB_USERNAME' in os.environ: dockerhub_username = os.environ['DOCKERHUB_USERNAME'] # Base image registry configuration for Dockerfile ARGs # This controls where the base Python image is pulled from during builds base_registry = 'localhost:5000' # Default for local dev python_image = 'python_3.11-slim' # Local registry uses underscores if 'BASE_REGISTRY' in os.environ: base_registry = os.environ['BASE_REGISTRY'] if 'PYTHON_IMAGE' in os.environ: python_image = os.environ['PYTHON_IMAGE'] # For Docker Hub mode, use canonical image names if use_dockerhub: base_registry = 'docker.io' python_image = 'python:3.11-slim' if use_dockerhub: print(""" DOCKER HUB MODE ENABLED Images will be pushed to Docker Hub: docker.io/%s Base images will be pulled from: %s/%s Make sure you're logged in: docker login To disable: unset USE_DOCKERHUB or set USE_DOCKERHUB=false """ % (dockerhub_username, base_registry, python_image)) default_registry('docker.io/%s' % dockerhub_username) else: print(""" LOCAL REGISTRY MODE Using local registry for faster builds: localhost:5001 Base images will be pulled from: %s/%s This registry is created by kubernetes_restart.sh script To use Docker Hub: export USE_DOCKERHUB=true To change base registry: export BASE_REGISTRY= To change Python image: export PYTHON_IMAGE= """ % (base_registry, python_image)) default_registry('localhost:5001') # ============================================================================= # SECURITY & INITIAL SETUP # ============================================================================= print(""" ====================================== Bakery IA Secure Development Mode ====================================== Security Features: TLS encryption for PostgreSQL and Redis Strong 32-character passwords PersistentVolumeClaims (no data loss) Column encryption: pgcrypto extension Audit logging: PostgreSQL query logging Object storage: MinIO with TLS for ML models Monitoring: Service metrics available at /metrics endpoints Telemetry ready (traces, metrics, logs) SigNoz deployment optional for local dev (see signoz-info resource) Applying security configurations... """) # Apply security configurations before loading main manifests local_resource( 'security-setup', cmd=''' echo "Applying security secrets and configurations..." # First, ensure all required namespaces exist echo "Creating namespaces..." kubectl apply -f infrastructure/namespaces/bakery-ia.yaml kubectl apply -f infrastructure/namespaces/tekton-pipelines.yaml # Wait for namespaces to be ready echo "Waiting for namespaces to be ready..." for ns in bakery-ia tekton-pipelines; do until kubectl get namespace $ns 2>/dev/null; do echo "Waiting for namespace $ns to be created..." sleep 2 done echo "Namespace $ns is available" done # Apply common secrets and configs kubectl apply -f infrastructure/environments/common/configs/configmap.yaml kubectl apply -f infrastructure/environments/common/configs/secrets.yaml # Apply database secrets and configs kubectl apply -f infrastructure/platform/storage/postgres/secrets/postgres-tls-secret.yaml kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-init-config.yaml kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-logging-config.yaml # Apply Redis secrets kubectl apply -f infrastructure/platform/storage/redis/secrets/redis-tls-secret.yaml # Apply MinIO secrets and configs kubectl apply -f infrastructure/platform/storage/minio/minio-secrets.yaml kubectl apply -f infrastructure/platform/storage/minio/secrets/minio-tls-secret.yaml # Apply Mail/SMTP secrets (already included in common/configs/secrets.yaml) # Apply CI/CD secrets kubectl apply -f infrastructure/cicd/tekton-helm/templates/secrets.yaml echo "Security configurations applied" ''', resource_deps=['prepull-base-images'], # Removed dockerhub-secret dependency labels=['00-security'], auto_init=True ) # Verify TLS certificates are mounted correctly # ============================================================================= # LOAD KUBERNETES MANIFESTS # ============================================================================= # Load the main kustomize overlay for the dev environment k8s_yaml(kustomize('infrastructure/environments/dev/k8s-manifests')) # ============================================================================= # DOCKER BUILD HELPERS # ============================================================================= # Helper function for Python services with live updates # This function ensures services only rebuild when their specific code changes, # but all services rebuild when shared/ folder changes def build_python_service(service_name, service_path): docker_build( 'bakery/' + service_name, context='.', dockerfile='./services/' + service_path + '/Dockerfile', # Build arguments for environment-configurable base images build_args={ 'BASE_REGISTRY': base_registry, 'PYTHON_IMAGE': python_image, }, # Only watch files relevant to this specific service + shared code only=[ './services/' + service_path, './shared', './scripts', ], live_update=[ # Fall back to full image build if Dockerfile or requirements change fall_back_on([ './services/' + service_path + '/Dockerfile', './services/' + service_path + '/requirements.txt', './shared/requirements-tracing.txt', ]), # Sync service code sync('./services/' + service_path, '/app'), # Sync shared libraries sync('./shared', '/app/shared'), # Sync scripts sync('./scripts', '/app/scripts'), # Install new dependencies if requirements.txt changes run( 'pip install --no-cache-dir -r requirements.txt', trigger=['./services/' + service_path + '/requirements.txt'] ), # Restart uvicorn on Python file changes (HUP signal triggers graceful reload) run( 'kill -HUP 1', trigger=[ './services/' + service_path + '/**/*.py', './shared/**/*.py' ] ), ], # Ignore common patterns that don't require rebuilds ignore=[ '.git', '**/__pycache__', '**/*.pyc', '**/.pytest_cache', '**/node_modules', '**/.DS_Store' ] ) # ============================================================================= # INFRASTRUCTURE IMAGES # ============================================================================= # Frontend (React + Vite) frontend_debug_env = 'false' # Default to false if 'FRONTEND_DEBUG' in os.environ: frontend_debug_env = os.environ['FRONTEND_DEBUG'] frontend_debug = frontend_debug_env.lower() == 'true' if frontend_debug: print(""" FRONTEND DEBUG MODE ENABLED Building frontend with NO minification for easier debugging. Full React error messages will be displayed. To disable: unset FRONTEND_DEBUG or set FRONTEND_DEBUG=false """) else: print(""" FRONTEND PRODUCTION MODE Building frontend with minification for optimized performance. To enable debug mode: export FRONTEND_DEBUG=true """) docker_build( 'bakery/dashboard', context='./frontend', dockerfile='./frontend/Dockerfile.kubernetes.debug' if frontend_debug else './frontend/Dockerfile.kubernetes', live_update=[ sync('./frontend/src', '/app/src'), sync('./frontend/public', '/app/public'), ], build_args={ 'NODE_OPTIONS': '--max-old-space-size=8192' }, ignore=[ 'playwright-report/**', 'test-results/**', 'node_modules/**', '.DS_Store' ] ) # Gateway docker_build( 'bakery/gateway', context='.', dockerfile='./gateway/Dockerfile', # Build arguments for environment-configurable base images build_args={ 'BASE_REGISTRY': base_registry, 'PYTHON_IMAGE': python_image, }, # Only watch gateway-specific files and shared code only=[ './gateway', './shared', './scripts', ], live_update=[ fall_back_on([ './gateway/Dockerfile', './gateway/requirements.txt', './shared/requirements-tracing.txt', ]), sync('./gateway', '/app'), sync('./shared', '/app/shared'), sync('./scripts', '/app/scripts'), run('kill -HUP 1', trigger=['./gateway/**/*.py', './shared/**/*.py']), ], ignore=[ '.git', '**/__pycache__', '**/*.pyc', '**/.pytest_cache', '**/node_modules', '**/.DS_Store' ] ) # ============================================================================= # MICROSERVICE IMAGES # ============================================================================= # Core Services build_python_service('auth-service', 'auth') build_python_service('tenant-service', 'tenant') # Data & Analytics Services build_python_service('training-service', 'training') build_python_service('forecasting-service', 'forecasting') build_python_service('ai-insights-service', 'ai_insights') # Operations Services build_python_service('sales-service', 'sales') build_python_service('inventory-service', 'inventory') build_python_service('production-service', 'production') build_python_service('procurement-service', 'procurement') build_python_service('distribution-service', 'distribution') # Supporting Services build_python_service('recipes-service', 'recipes') build_python_service('suppliers-service', 'suppliers') build_python_service('pos-service', 'pos') build_python_service('orders-service', 'orders') build_python_service('external-service', 'external') # Platform Services build_python_service('notification-service', 'notification') build_python_service('alert-processor', 'alert_processor') build_python_service('orchestrator-service', 'orchestrator') # Demo Services build_python_service('demo-session-service', 'demo_session') # Tell Tilt that demo-cleanup-worker uses the demo-session-service image k8s_image_json_path( 'bakery/demo-session-service', '{.spec.template.spec.containers[?(@.name=="worker")].image}', name='demo-cleanup-worker' ) # ============================================================================= # INFRASTRUCTURE RESOURCES # ============================================================================= # Redis & RabbitMQ k8s_resource('redis', resource_deps=['security-setup'], labels=['01-infrastructure']) k8s_resource('rabbitmq', resource_deps=['security-setup'], labels=['01-infrastructure']) # MinIO Storage k8s_resource('minio', resource_deps=['security-setup'], labels=['01-infrastructure']) k8s_resource('minio-bucket-init', resource_deps=['minio'], labels=['01-infrastructure']) # Unbound DNSSEC Resolver - Infrastructure component for Mailu DNS validation local_resource( 'unbound-helm', cmd=''' echo "Deploying Unbound DNS resolver via Helm..." echo "" # Check if Unbound is already deployed if helm list -n bakery-ia | grep -q unbound; then echo "Unbound already deployed, checking status..." helm status unbound -n bakery-ia else echo "Installing Unbound..." # Determine environment (dev or prod) based on context ENVIRONMENT="dev" if [[ "$(kubectl config current-context)" == *"prod"* ]]; then ENVIRONMENT="prod" fi echo "Environment detected: $ENVIRONMENT" # Install Unbound with appropriate values if [ "$ENVIRONMENT" = "dev" ]; then helm upgrade --install unbound infrastructure/platform/infrastructure/unbound-helm \ -n bakery-ia \ --create-namespace \ -f infrastructure/platform/networking/dns/unbound-helm/values.yaml \ -f infrastructure/platform/networking/dns/unbound-helm/dev/values.yaml \ --timeout 5m \ --wait else helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \ -n bakery-ia \ --create-namespace \ -f infrastructure/platform/networking/dns/unbound-helm/values.yaml \ -f infrastructure/platform/networking/dns/unbound-helm/prod/values.yaml \ --timeout 5m \ --wait fi echo "" echo "Unbound deployment completed" fi echo "" echo "Unbound DNS Service Information:" echo " Service Name: unbound-dns.bakery-ia.svc.cluster.local" echo " Ports: UDP/TCP 53" echo " Used by: Mailu for DNS validation" echo "" echo "To check pod status: kubectl get pods -n bakery-ia | grep unbound" ''', resource_deps=['security-setup'], labels=['01-infrastructure'], auto_init=True # Auto-deploy with Tilt startup ) # Mail Infrastructure (Mailu) - Manual trigger for Helm deployment local_resource( 'mailu-helm', cmd=''' echo "Deploying Mailu via Helm..." echo "" # ===================================================== # Step 1: Ensure Unbound is deployed and get its IP # ===================================================== echo "Checking Unbound DNS resolver..." if ! kubectl get svc unbound-dns -n bakery-ia &>/dev/null; then echo "ERROR: Unbound DNS service not found!" echo "Please deploy Unbound first by triggering 'unbound-helm' resource" exit 1 fi UNBOUND_IP=$(kubectl get svc unbound-dns -n bakery-ia -o jsonpath='{.spec.clusterIP}') echo "Unbound DNS service IP: $UNBOUND_IP" # ===================================================== # Step 2: Configure CoreDNS to forward to Unbound # ===================================================== echo "" echo "Configuring CoreDNS to forward external queries to Unbound for DNSSEC validation..." # Check current CoreDNS forward configuration CURRENT_FORWARD=$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' | grep -o 'forward \. [0-9.]*' | awk '{print $3}') if [ "$CURRENT_FORWARD" != "$UNBOUND_IP" ]; then echo "Updating CoreDNS to forward to Unbound ($UNBOUND_IP)..." # Patch CoreDNS ConfigMap kubectl patch configmap coredns -n kube-system --type merge -p "{ \"data\": { \"Corefile\": \".:53 {\\n errors\\n health {\\n lameduck 5s\\n }\\n ready\\n kubernetes cluster.local in-addr.arpa ip6.arpa {\\n pods insecure\\n fallthrough in-addr.arpa ip6.arpa\\n ttl 30\\n }\\n prometheus :9153\\n forward . $UNBOUND_IP {\\n max_concurrent 1000\\n }\\n cache 30 {\\n disable success cluster.local\\n disable denial cluster.local\\n }\\n loop\\n reload\\n loadbalance\\n}\\n\" } }" # Restart CoreDNS kubectl rollout restart deployment coredns -n kube-system echo "Waiting for CoreDNS to restart..." kubectl rollout status deployment coredns -n kube-system --timeout=60s echo "CoreDNS configured successfully" else echo "CoreDNS already configured to forward to Unbound" fi # ===================================================== # Step 3: Create self-signed TLS certificate for Mailu Front # ===================================================== echo "" echo "Checking Mailu TLS certificates..." if ! kubectl get secret mailu-certificates -n bakery-ia &>/dev/null; then echo "Creating self-signed TLS certificate for Mailu Front..." # Generate certificate in temp directory TEMP_DIR=$(mktemp -d) cd "$TEMP_DIR" openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ -keyout tls.key -out tls.crt \ -subj "/CN=mail.bakery-ia.local/O=bakery-ia" 2>/dev/null kubectl create secret tls mailu-certificates \ --cert=tls.crt \ --key=tls.key \ -n bakery-ia rm -rf "$TEMP_DIR" echo "TLS certificate created" else echo "Mailu TLS certificate already exists" fi # ===================================================== # Step 4: Deploy Mailu via Helm # ===================================================== echo "" # Check if Mailu is already deployed if helm list -n bakery-ia | grep -q mailu; then echo "Mailu already deployed, checking status..." helm status mailu -n bakery-ia else echo "Installing Mailu..." # Add Mailu Helm repository if not already added helm repo add mailu https://mailu.github.io/helm-charts 2>/dev/null || true helm repo update mailu # Determine environment (dev or prod) based on context ENVIRONMENT="dev" if [[ "$(kubectl config current-context)" == *"prod"* ]]; then ENVIRONMENT="prod" fi echo "Environment detected: $ENVIRONMENT" # Install Mailu with appropriate values if [ "$ENVIRONMENT" = "dev" ]; then helm upgrade --install mailu mailu/mailu \ -n bakery-ia \ --create-namespace \ -f infrastructure/platform/mail/mailu-helm/values.yaml \ -f infrastructure/platform/mail/mailu-helm/dev/values.yaml \ --timeout 10m else helm upgrade --install mailu mailu/mailu \ -n bakery-ia \ --create-namespace \ -f infrastructure/platform/mail/mailu-helm/values.yaml \ -f infrastructure/platform/mail/mailu-helm/prod/values.yaml \ --timeout 10m fi echo "" echo "Mailu deployment completed" fi # ===================================================== # Step 5: Wait for pods and show status # ===================================================== echo "" echo "Waiting for Mailu pods to be ready..." sleep 10 echo "" echo "Mailu Pod Status:" kubectl get pods -n bakery-ia | grep mailu echo "" echo "Mailu Access Information:" echo " Admin Panel: https://mail.bakery-ia.local/admin" echo " Webmail: https://mail.bakery-ia.local/webmail" echo " SMTP: mail.bakery-ia.local:587 (STARTTLS)" echo " IMAP: mail.bakery-ia.local:993 (SSL/TLS)" echo "" echo "To create admin user:" echo " kubectl exec -it -n bakery-ia deployment/mailu-admin -- flask mailu admin admin bakery-ia.local 'YourPassword123!'" echo "" echo "To check pod status: kubectl get pods -n bakery-ia | grep mailu" ''', resource_deps=['unbound-helm'], # Ensure Unbound is deployed first labels=['01-infrastructure'], auto_init=False, # Manual trigger only ) # Nominatim Geocoding - Manual trigger for Helm deployment local_resource( 'nominatim-helm', cmd=''' echo "Deploying Nominatim geocoding service via Helm..." echo "" # Check if Nominatim is already deployed if helm list -n bakery-ia | grep -q nominatim; then echo "Nominatim already deployed, checking status..." helm status nominatim -n bakery-ia else echo "Installing Nominatim..." # Determine environment (dev or prod) based on context ENVIRONMENT="dev" if [[ "$(kubectl config current-context)" == *"prod"* ]]; then ENVIRONMENT="prod" fi echo "Environment detected: $ENVIRONMENT" # Install Nominatim with appropriate values if [ "$ENVIRONMENT" = "dev" ]; then helm upgrade --install nominatim infrastructure/platform/nominatim/nominatim-helm \ -n bakery-ia \ --create-namespace \ -f infrastructure/platform/nominatim/nominatim-helm/values.yaml \ -f infrastructure/platform/nominatim/nominatim-helm/dev/values.yaml \ --timeout 10m \ --wait else helm upgrade --install nominatim infrastructure/platform/nominatim/nominatim-helm \ -n bakery-ia \ --create-namespace \ -f infrastructure/platform/nominatim/nominatim-helm/values.yaml \ -f infrastructure/platform/nominatim/nominatim-helm/prod/values.yaml \ --timeout 10m \ --wait fi echo "" echo "Nominatim deployment completed" fi echo "" echo "Nominatim Service Information:" echo " Service Name: nominatim-service.bakery-ia.svc.cluster.local" echo " Port: 8080" echo " Health Check: http://nominatim-service:8080/status" echo "" echo "To check pod status: kubectl get pods -n bakery-ia | grep nominatim" echo "To check Helm release: helm status nominatim -n bakery-ia" ''', labels=['01-infrastructure'], auto_init=False, # Manual trigger only ) # ============================================================================= # MONITORING RESOURCES - SigNoz (Unified Observability) # ============================================================================= # Deploy SigNoz using Helm with automatic deployment and progress tracking local_resource( 'signoz-deploy', cmd=''' echo "Deploying SigNoz Monitoring Stack..." echo "" # Check if SigNoz is already deployed if helm list -n bakery-ia | grep -q signoz; then echo "SigNoz already deployed, checking status..." helm status signoz -n bakery-ia else echo "Installing SigNoz..." # Add SigNoz Helm repository if not already added helm repo add signoz https://charts.signoz.io 2>/dev/null || true helm repo update signoz # Install SigNoz with custom values in the bakery-ia namespace helm upgrade --install signoz signoz/signoz \ -n bakery-ia \ -f infrastructure/monitoring/signoz/signoz-values-dev.yaml \ --timeout 10m \ --wait echo "" echo "SigNoz deployment completed" fi echo "" echo "SigNoz Access Information:" echo " URL: https://monitoring.bakery-ia.local" echo " Username: admin" echo " Password: admin" echo "" echo "OpenTelemetry Collector Endpoints:" echo " gRPC: localhost:4317" echo " HTTP: localhost:4318" echo "" echo "To check pod status: kubectl get pods -n signoz" ''', labels=['05-monitoring'], auto_init=False, ) # Deploy Flux CD using Helm with automatic deployment and progress tracking local_resource( 'flux-cd-deploy', cmd=''' echo "Deploying Flux CD GitOps Toolkit..." echo "" # Check if Flux is already deployed if helm list -n flux-system | grep -q flux-cd; then echo "Flux CD already deployed, checking status..." helm status flux-cd -n flux-system else echo "Installing Flux CD..." # Install Flux CRDs first if not already installed if ! kubectl get crd gitrepositories.source.toolkit.fluxcd.io >/dev/null 2>&1; then echo "Installing Flux CRDs..." curl -sL https://fluxcd.io/install.sh | sudo bash flux install --namespace=flux-system --network-policy=false fi # Create the namespace if it doesn't exist kubectl create namespace flux-system --dry-run=client -o yaml | kubectl apply -f - # Install Flux CD with custom values using the local chart helm upgrade --install flux-cd infrastructure/cicd/flux \ -n flux-system \ --create-namespace \ --timeout 10m \ --wait echo "" echo "Flux CD deployment completed" fi echo "" echo "Flux CD Access Information:" echo "To check status: flux check" echo "To check GitRepository: kubectl get gitrepository -n flux-system" echo "To check Kustomization: kubectl get kustomization -n flux-system" echo "" echo "To check pod status: kubectl get pods -n flux-system" ''', labels=['99-cicd'], auto_init=False, ) # Optional exporters (in monitoring namespace) - DISABLED since using SigNoz # k8s_resource('node-exporter', labels=['05-monitoring']) # k8s_resource('postgres-exporter', resource_deps=['auth-db'], labels=['05-monitoring']) # ============================================================================= # DATABASE RESOURCES # ============================================================================= # Core Service Databases k8s_resource('auth-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('tenant-db', resource_deps=['security-setup'], labels=['06-databases']) # Data & Analytics Databases k8s_resource('training-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('forecasting-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('ai-insights-db', resource_deps=['security-setup'], labels=['06-databases']) # Operations Databases k8s_resource('sales-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('inventory-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('production-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('procurement-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('distribution-db', resource_deps=['security-setup'], labels=['06-databases']) # Supporting Service Databases k8s_resource('recipes-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('suppliers-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('pos-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('orders-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('external-db', resource_deps=['security-setup'], labels=['06-databases']) # Platform Service Databases k8s_resource('notification-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('alert-processor-db', resource_deps=['security-setup'], labels=['06-databases']) k8s_resource('orchestrator-db', resource_deps=['security-setup'], labels=['06-databases']) # Demo Service Databases k8s_resource('demo-session-db', resource_deps=['security-setup'], labels=['06-databases']) # ============================================================================= # MIGRATION JOBS # ============================================================================= # Core Service Migrations k8s_resource('auth-migration', resource_deps=['auth-db'], labels=['07-migrations']) k8s_resource('tenant-migration', resource_deps=['tenant-db'], labels=['07-migrations']) # Data & Analytics Migrations k8s_resource('training-migration', resource_deps=['training-db'], labels=['07-migrations']) k8s_resource('forecasting-migration', resource_deps=['forecasting-db'], labels=['07-migrations']) k8s_resource('ai-insights-migration', resource_deps=['ai-insights-db'], labels=['07-migrations']) # Operations Migrations k8s_resource('sales-migration', resource_deps=['sales-db'], labels=['07-migrations']) k8s_resource('inventory-migration', resource_deps=['inventory-db'], labels=['07-migrations']) k8s_resource('production-migration', resource_deps=['production-db'], labels=['07-migrations']) k8s_resource('procurement-migration', resource_deps=['procurement-db'], labels=['07-migrations']) k8s_resource('distribution-migration', resource_deps=['distribution-db'], labels=['07-migrations']) # Supporting Service Migrations k8s_resource('recipes-migration', resource_deps=['recipes-db'], labels=['07-migrations']) k8s_resource('suppliers-migration', resource_deps=['suppliers-db'], labels=['07-migrations']) k8s_resource('pos-migration', resource_deps=['pos-db'], labels=['07-migrations']) k8s_resource('orders-migration', resource_deps=['orders-db'], labels=['07-migrations']) k8s_resource('external-migration', resource_deps=['external-db'], labels=['07-migrations']) # Platform Service Migrations k8s_resource('notification-migration', resource_deps=['notification-db'], labels=['07-migrations']) k8s_resource('alert-processor-migration', resource_deps=['alert-processor-db'], labels=['07-migrations']) k8s_resource('orchestrator-migration', resource_deps=['orchestrator-db'], labels=['07-migrations']) # Demo Service Migrations k8s_resource('demo-session-migration', resource_deps=['demo-session-db'], labels=['07-migrations']) # ============================================================================= # DATA INITIALIZATION JOBS # ============================================================================= k8s_resource('external-data-init', resource_deps=['external-migration', 'redis'], labels=['08-data-init']) # ============================================================================= # APPLICATION SERVICES # ============================================================================= # Core Services k8s_resource('auth-service', resource_deps=['auth-migration', 'redis'], labels=['09-services-core']) k8s_resource('tenant-service', resource_deps=['tenant-migration', 'redis'], labels=['09-services-core']) # Data & Analytics Services k8s_resource('training-service', resource_deps=['training-migration', 'redis'], labels=['10-services-analytics']) k8s_resource('forecasting-service', resource_deps=['forecasting-migration', 'redis'], labels=['10-services-analytics']) k8s_resource('ai-insights-service', resource_deps=['ai-insights-migration', 'redis', 'forecasting-service', 'production-service', 'procurement-service'], labels=['10-services-analytics']) # Operations Services k8s_resource('sales-service', resource_deps=['sales-migration', 'redis'], labels=['11-services-operations']) k8s_resource('inventory-service', resource_deps=['inventory-migration', 'redis'], labels=['11-services-operations']) k8s_resource('production-service', resource_deps=['production-migration', 'redis'], labels=['11-services-operations']) k8s_resource('procurement-service', resource_deps=['procurement-migration', 'redis'], labels=['11-services-operations']) k8s_resource('distribution-service', resource_deps=['distribution-migration', 'redis', 'rabbitmq'], labels=['11-services-operations']) # Supporting Services k8s_resource('recipes-service', resource_deps=['recipes-migration', 'redis'], labels=['12-services-supporting']) k8s_resource('suppliers-service', resource_deps=['suppliers-migration', 'redis'], labels=['12-services-supporting']) k8s_resource('pos-service', resource_deps=['pos-migration', 'redis'], labels=['12-services-supporting']) k8s_resource('orders-service', resource_deps=['orders-migration', 'redis'], labels=['12-services-supporting']) k8s_resource('external-service', resource_deps=['external-migration', 'external-data-init', 'redis'], labels=['12-services-supporting']) # Platform Services k8s_resource('notification-service', resource_deps=['notification-migration', 'redis', 'rabbitmq'], labels=['13-services-platform']) k8s_resource('alert-processor', resource_deps=['alert-processor-migration', 'redis', 'rabbitmq'], labels=['13-services-platform']) k8s_resource('orchestrator-service', resource_deps=['orchestrator-migration', 'redis'], labels=['13-services-platform']) # Demo Services k8s_resource('demo-session-service', resource_deps=['demo-session-migration', 'redis'], labels=['14-services-demo']) k8s_resource('demo-cleanup-worker', resource_deps=['demo-session-service', 'redis'], labels=['14-services-demo']) # ============================================================================= # FRONTEND & GATEWAY # ============================================================================= k8s_resource('gateway', resource_deps=['auth-service'], labels=['15-frontend']) k8s_resource('frontend', resource_deps=['gateway'], labels=['15-frontend']) # ============================================================================= # CRONJOBS (Remaining K8s CronJobs) # ============================================================================= k8s_resource('demo-session-cleanup', resource_deps=['demo-session-service'], labels=['16-cronjobs']) k8s_resource('external-data-rotation', resource_deps=['external-service'], labels=['16-cronjobs']) # ============================================================================= # WATCH SETTINGS # ============================================================================= # Watch settings watch_settings( ignore=[ '.git/**', '**/__pycache__/**', '**/*.pyc', '**/.pytest_cache/**', '**/node_modules/**', '**/.DS_Store', '**/*.swp', '**/*.swo', '**/.venv/**', '**/venv/**', '**/.mypy_cache/**', '**/.ruff_cache/**', '**/.tox/**', '**/htmlcov/**', '**/.coverage', '**/dist/**', '**/build/**', '**/*.egg-info/**', '**/infrastructure/tls/**/*.pem', '**/infrastructure/tls/**/*.cnf', '**/infrastructure/tls/**/*.csr', '**/infrastructure/tls/**/*.srl', '**/*.tmp', '**/*.tmp.*', '**/migrations/versions/*.tmp.*', '**/playwright-report/**', '**/test-results/**', ] ) # ============================================================================= # CI/CD INFRASTRUCTURE - MANUAL TRIGGERS # ============================================================================= # Tekton Pipelines - Manual trigger for local development using Helm local_resource( 'tekton-pipelines', cmd=''' echo "Setting up Tekton Pipelines for CI/CD using Helm..." echo "" # Check if Tekton CRDs are already installed if kubectl get crd pipelines.tekton.dev >/dev/null 2>&1; then echo " Tekton CRDs already installed" else echo " Installing Tekton v0.57.0..." kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml echo " Waiting for Tekton to be ready..." kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-controller -n tekton-pipelines kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-webhook -n tekton-pipelines echo " Tekton installed and ready" fi echo "" echo "Installing Tekton configurations via Helm..." # Check if Tekton Helm release is already deployed if helm list -n tekton-pipelines | grep -q tekton-cicd; then echo " Updating existing Tekton CICD deployment..." helm upgrade --install tekton-cicd infrastructure/cicd/tekton-helm \ -n tekton-pipelines \ --create-namespace \ --timeout 10m \ --wait else echo " Installing new Tekton CICD deployment..." helm upgrade --install tekton-cicd infrastructure/cicd/tekton-helm \ -n tekton-pipelines \ --create-namespace \ --timeout 10m \ --wait fi echo "" echo "Tekton setup complete!" echo "To check status: kubectl get pods -n tekton-pipelines" echo "To check Helm release: helm status tekton-cicd -n tekton-pipelines" ''', labels=['99-cicd'], auto_init=False, # Manual trigger only ) # Gitea - Manual trigger for local Git server local_resource( 'gitea', cmd=''' echo "Setting up Gitea for local Git server..." echo "" # Create namespace kubectl create namespace gitea || true # Create admin secret first chmod +x infrastructure/cicd/gitea/setup-admin-secret.sh ./infrastructure/cicd/gitea/setup-admin-secret.sh # Install Gitea using Helm helm repo add gitea https://dl.gitea.io/charts || true helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml echo "" echo "Gitea setup complete!" echo "Access Gitea at: http://gitea.bakery-ia.local (for dev) or http://gitea.bakewise.ai (for prod)" echo "Make sure to add the appropriate hostname to /etc/hosts or configure DNS" echo "Check status: kubectl get pods -n gitea" echo "To uninstall: helm uninstall gitea -n gitea" ''', labels=['99-cicd'], auto_init=False, # Manual trigger only ) # ============================================================================= # STARTUP SUMMARY # ============================================================================= print(""" Security setup complete! Database Security Features Active: TLS encryption: PostgreSQL and Redis Strong passwords: 32-character cryptographic Persistent storage: PVCs for all databases Column encryption: pgcrypto extension Audit logging: PostgreSQL query logging Internal Schedulers Active: Alert Priority Recalculation: Hourly @ :15 (alert-processor) Usage Tracking: Daily @ 2:00 AM UTC (tenant-service) Disk Cleanup: Every {disk_cleanup_frequency_minutes} minutes (threshold: {disk_space_threshold_gb}GB) Access your application: Main Application: https://bakery-ia.local API Endpoints: https://bakery-ia.local/api/v1/... Local Access: https://localhost Service Metrics: Gateway: http://localhost:8000/metrics Any Service: kubectl port-forward 8000:8000 SigNoz (Unified Observability): Deploy via Tilt: Trigger 'signoz-deployment' resource Manual deploy: ./infrastructure/monitoring/signoz/deploy-signoz.sh dev Access (if deployed): https://monitoring.bakery-ia.local Username: admin Password: admin CI/CD Infrastructure (Manual Triggers): Tekton: Trigger 'tekton-pipelines' resource Flux: Trigger 'flux-cd' resource Gitea: Trigger 'gitea' resource Verify security: kubectl get pvc -n bakery-ia kubectl get secrets -n bakery-ia | grep tls kubectl logs -n bakery-ia | grep SSL Verify schedulers: kubectl exec -it -n bakery-ia deployment/alert-processor -- curl localhost:8000/scheduler/status kubectl logs -f -n bakery-ia -l app=tenant-service | grep "usage tracking" Documentation: docs/SECURITY_IMPLEMENTATION_COMPLETE.md docs/DATABASE_SECURITY_ANALYSIS_REPORT.md Build Optimization Active: Services only rebuild when their code changes Shared folder changes trigger ALL services (as expected) Reduces unnecessary rebuilds and disk usage Edit service code: only that service rebuilds Edit shared/ code: all services rebuild (required) Useful Commands: # Work on specific services only tilt up # View logs by label tilt logs 09-services-core tilt logs 13-services-platform DNS Configuration: # To access the application via domain names, add these entries to your hosts file: # sudo nano /etc/hosts # Add these lines: # 127.0.0.1 bakery-ia.local # 127.0.0.1 monitoring.bakery-ia.local ====================================== """)