Files
bakery-ia/Tiltfile
2026-01-19 15:15:04 +01:00

975 lines
38 KiB
Plaintext

# =============================================================================
# Bakery IA - Tiltfile for Secure Local Development
# =============================================================================
# Features:
# - TLS encryption for PostgreSQL and Redis
# - Strong 32-character passwords with PersistentVolumeClaims
# - PostgreSQL pgcrypto extension and audit logging
# - Organized resource dependencies and live-reload capabilities
# - Local registry for faster image builds and deployments
#
# Build Optimization:
# - Services only rebuild when their specific code changes (not all services)
# - Shared folder changes trigger rebuild of ALL services (as they all depend on it)
# - Uses 'only' parameter to watch only relevant files per service
# - Frontend only rebuilds when frontend/ code changes
# - Gateway only rebuilds when gateway/ or shared/ code changes
# =============================================================================
# =============================================================================
# PREPULL BASE IMAGES STEP - CRITICAL FIRST STEP
# =============================================================================
# Run the prepull script first - if this fails, don't continue
local_resource(
'prepull-base-images',
cmd='''#!/usr/bin/env bash
echo "=========================================="
echo "PREPULLING BASE IMAGES - CRITICAL STEP"
echo "=========================================="
echo ""
# Run the prepull script
if ./scripts/prepull-base-images.sh; then
echo ""
echo "✓ Base images prepull completed successfully"
echo "=========================================="
echo "CONTINUING WITH TILT SETUP..."
echo "=========================================="
exit 0
else
echo ""
echo "❌ Base images prepull FAILED - stopping Tilt execution"
echo "This usually happens due to Docker Hub rate limits"
echo "Please try again later or configure Docker Hub credentials"
echo "=========================================="
# Exit with error code to prevent further execution
exit 1
fi
''',
labels=['00-prepull'],
auto_init=True,
allow_parallel=False
)
# =============================================================================
# TILT CONFIGURATION
# =============================================================================
# Update settings
update_settings(
max_parallel_updates=2, # Reduce parallel updates to avoid resource exhaustion
k8s_upsert_timeout_secs=120 # Increase timeout for slower local builds
)
# Ensure we're running in the correct context
allow_k8s_contexts('kind-bakery-ia-local')
# =============================================================================
# DISK SPACE MANAGEMENT & CLEANUP CONFIGURATION
# =============================================================================
# Disk space management settings
disk_cleanup_enabled = True # Default to True, can be disabled with TILT_DISABLE_CLEANUP=true
if 'TILT_DISABLE_CLEANUP' in os.environ:
disk_cleanup_enabled = os.environ['TILT_DISABLE_CLEANUP'].lower() != 'true'
disk_space_threshold_gb = '10'
if 'TILT_DISK_THRESHOLD_GB' in os.environ:
disk_space_threshold_gb = os.environ['TILT_DISK_THRESHOLD_GB']
disk_cleanup_frequency_minutes = '30'
if 'TILT_CLEANUP_FREQUENCY' in os.environ:
disk_cleanup_frequency_minutes = os.environ['TILT_CLEANUP_FREQUENCY']
print("""
DISK SPACE MANAGEMENT CONFIGURATION
======================================
Cleanup Enabled: {}
Free Space Threshold: {}GB
Cleanup Frequency: Every {} minutes
To disable cleanup: export TILT_DISABLE_CLEANUP=true
To change threshold: export TILT_DISK_THRESHOLD_GB=20
To change frequency: export TILT_CLEANUP_FREQUENCY=60
""".format(
'YES' if disk_cleanup_enabled else 'NO (TILT_DISABLE_CLEANUP=true)',
disk_space_threshold_gb,
disk_cleanup_frequency_minutes
))
# Automatic cleanup scheduler (informational only - actual scheduling done externally)
if disk_cleanup_enabled:
local_resource(
'automatic-disk-cleanup-info',
cmd='''
echo "Automatic disk cleanup is ENABLED"
echo "Settings:"
echo " - Threshold: ''' + disk_space_threshold_gb + ''' GB free space"
echo " - Frequency: Every ''' + disk_cleanup_frequency_minutes + ''' minutes"
echo ""
echo "Note: Actual cleanup runs via external scheduling (cron job or similar)"
echo "To run cleanup now: tilt trigger manual-disk-cleanup"
''',
labels=['99-cleanup'],
auto_init=True,
allow_parallel=False
)
# Manual cleanup trigger (can be run on demand)
local_resource(
'manual-disk-cleanup',
cmd='''
echo "Starting manual disk cleanup..."
python3 scripts/cleanup_disk_space.py --manual --verbose
''',
labels=['99-cleanup'],
auto_init=False,
allow_parallel=False
)
# Disk space monitoring resource
local_resource(
'disk-space-monitor',
cmd='''
echo "DISK SPACE MONITORING"
echo "======================================"
# Get disk usage
df -h / | grep -v Filesystem | awk '{{print "Total: " $2 " | Used: " $3 " | Free: " $4 " | Usage: " $5}}'
# Get Docker disk usage
echo ""
echo "DOCKER DISK USAGE:"
docker system df
# Get Kubernetes disk usage (if available)
echo ""
echo "KUBERNETES DISK USAGE:"
kubectl get pvc -n bakery-ia --no-headers 2>/dev/null | awk '{{print "PVC: " $1 " | Status: " $2 " | Capacity: " $3 " | Used: " $4}}' || echo " Kubernetes PVCs not available"
echo ""
echo "Cleanup Status:"
if [ "{disk_cleanup_enabled}" = "True" ]; then
echo " Automatic cleanup: ENABLED (every {disk_cleanup_frequency_minutes} minutes)"
echo " Threshold: {disk_space_threshold_gb}GB free space"
else
echo " Automatic cleanup: DISABLED"
echo " To enable: unset TILT_DISABLE_CLEANUP or set TILT_DISABLE_CLEANUP=false"
fi
echo ""
echo "Manual cleanup commands:"
echo " tilt trigger manual-disk-cleanup # Run cleanup now"
echo " docker system prune -a # Manual Docker cleanup"
echo " kubectl delete jobs --all # Clean up completed jobs"
''',
labels=['99-cleanup'],
auto_init=False,
allow_parallel=False
)
# =============================================================================
# DOCKER REGISTRY CONFIGURATION
# =============================================================================
# Docker registry configuration
# Set USE_DOCKERHUB=true environment variable to push images to Docker Hub
# Otherwise, uses local registry for faster builds and deployments
use_dockerhub = False # Default to False
if 'USE_DOCKERHUB' in os.environ:
use_dockerhub = os.environ['USE_DOCKERHUB'].lower() == 'true'
dockerhub_username = 'uals' # Default username
if 'DOCKERHUB_USERNAME' in os.environ:
dockerhub_username = os.environ['DOCKERHUB_USERNAME']
if use_dockerhub:
print("""
DOCKER HUB MODE ENABLED
Images will be pushed to Docker Hub: docker.io/%s
Make sure you're logged in: docker login
To disable: unset USE_DOCKERHUB or set USE_DOCKERHUB=false
""" % dockerhub_username)
default_registry('docker.io/%s' % dockerhub_username)
else:
print("""
LOCAL REGISTRY MODE
Using local registry for faster builds: localhost:5001
This registry is created by kubernetes_restart.sh script
To use Docker Hub: export USE_DOCKERHUB=true
""")
default_registry('localhost:5001')
# =============================================================================
# SECURITY & INITIAL SETUP
# =============================================================================
print("""
======================================
Bakery IA Secure Development Mode
======================================
Security Features:
TLS encryption for PostgreSQL and Redis
Strong 32-character passwords
PersistentVolumeClaims (no data loss)
Column encryption: pgcrypto extension
Audit logging: PostgreSQL query logging
Object storage: MinIO with TLS for ML models
Monitoring:
Service metrics available at /metrics endpoints
Telemetry ready (traces, metrics, logs)
SigNoz deployment optional for local dev (see signoz-info resource)
Applying security configurations...
""")
# Apply security configurations before loading main manifests
local_resource(
'security-setup',
cmd='''
echo "Applying security secrets and configurations..."
# First, ensure all required namespaces exist
echo "Creating namespaces..."
kubectl apply -f infrastructure/namespaces/bakery-ia.yaml
kubectl apply -f infrastructure/namespaces/tekton-pipelines.yaml
# Wait for namespaces to be ready
echo "Waiting for namespaces to be ready..."
for ns in bakery-ia tekton-pipelines; do
until kubectl get namespace $ns 2>/dev/null; do
echo "Waiting for namespace $ns to be created..."
sleep 2
done
echo "Namespace $ns is available"
done
# Apply common secrets and configs
kubectl apply -f infrastructure/environments/common/configs/configmap.yaml
kubectl apply -f infrastructure/environments/common/configs/secrets.yaml
# Apply database secrets and configs
kubectl apply -f infrastructure/platform/storage/postgres/secrets/postgres-tls-secret.yaml
kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-init-config.yaml
kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-logging-config.yaml
# Apply Redis secrets
kubectl apply -f infrastructure/platform/storage/redis/secrets/redis-tls-secret.yaml
# Apply MinIO secrets and configs
kubectl apply -f infrastructure/platform/storage/minio/minio-secrets.yaml
kubectl apply -f infrastructure/platform/storage/minio/secrets/minio-tls-secret.yaml
# Apply Mail/SMTP secrets (already included in common/configs/secrets.yaml)
# Apply CI/CD secrets
kubectl apply -f infrastructure/cicd/tekton-helm/templates/secrets.yaml
echo "Security configurations applied"
''',
resource_deps=['prepull-base-images'], # Removed dockerhub-secret dependency
labels=['00-security'],
auto_init=True
)
# Verify TLS certificates are mounted correctly
# =============================================================================
# LOAD KUBERNETES MANIFESTS
# =============================================================================
# Load the main kustomize overlay for the dev environment
k8s_yaml(kustomize('infrastructure/environments/dev/k8s-manifests'))
# =============================================================================
# DOCKER BUILD HELPERS
# =============================================================================
# Helper function for Python services with live updates
# This function ensures services only rebuild when their specific code changes,
# but all services rebuild when shared/ folder changes
def build_python_service(service_name, service_path):
docker_build(
'bakery/' + service_name,
context='.',
dockerfile='./services/' + service_path + '/Dockerfile',
# Only watch files relevant to this specific service + shared code
only=[
'./services/' + service_path,
'./shared',
'./scripts',
],
live_update=[
# Fall back to full image build if Dockerfile or requirements change
fall_back_on([
'./services/' + service_path + '/Dockerfile',
'./services/' + service_path + '/requirements.txt',
'./shared/requirements-tracing.txt',
]),
# Sync service code
sync('./services/' + service_path, '/app'),
# Sync shared libraries
sync('./shared', '/app/shared'),
# Sync scripts
sync('./scripts', '/app/scripts'),
# Install new dependencies if requirements.txt changes
run(
'pip install --no-cache-dir -r requirements.txt',
trigger=['./services/' + service_path + '/requirements.txt']
),
# Restart uvicorn on Python file changes (HUP signal triggers graceful reload)
run(
'kill -HUP 1',
trigger=[
'./services/' + service_path + '/**/*.py',
'./shared/**/*.py'
]
),
],
# Ignore common patterns that don't require rebuilds
ignore=[
'.git',
'**/__pycache__',
'**/*.pyc',
'**/.pytest_cache',
'**/node_modules',
'**/.DS_Store'
]
)
# =============================================================================
# INFRASTRUCTURE IMAGES
# =============================================================================
# Frontend (React + Vite)
frontend_debug_env = 'false' # Default to false
if 'FRONTEND_DEBUG' in os.environ:
frontend_debug_env = os.environ['FRONTEND_DEBUG']
frontend_debug = frontend_debug_env.lower() == 'true'
if frontend_debug:
print("""
FRONTEND DEBUG MODE ENABLED
Building frontend with NO minification for easier debugging.
Full React error messages will be displayed.
To disable: unset FRONTEND_DEBUG or set FRONTEND_DEBUG=false
""")
else:
print("""
FRONTEND PRODUCTION MODE
Building frontend with minification for optimized performance.
To enable debug mode: export FRONTEND_DEBUG=true
""")
docker_build(
'bakery/dashboard',
context='./frontend',
dockerfile='./frontend/Dockerfile.kubernetes.debug' if frontend_debug else './frontend/Dockerfile.kubernetes',
live_update=[
sync('./frontend/src', '/app/src'),
sync('./frontend/public', '/app/public'),
],
build_args={
'NODE_OPTIONS': '--max-old-space-size=8192'
},
ignore=[
'playwright-report/**',
'test-results/**',
'node_modules/**',
'.DS_Store'
]
)
# Gateway
docker_build(
'bakery/gateway',
context='.',
dockerfile='./gateway/Dockerfile',
# Only watch gateway-specific files and shared code
only=[
'./gateway',
'./shared',
'./scripts',
],
live_update=[
fall_back_on([
'./gateway/Dockerfile',
'./gateway/requirements.txt',
'./shared/requirements-tracing.txt',
]),
sync('./gateway', '/app'),
sync('./shared', '/app/shared'),
sync('./scripts', '/app/scripts'),
run('kill -HUP 1', trigger=['./gateway/**/*.py', './shared/**/*.py']),
],
ignore=[
'.git',
'**/__pycache__',
'**/*.pyc',
'**/.pytest_cache',
'**/node_modules',
'**/.DS_Store'
]
)
# =============================================================================
# MICROSERVICE IMAGES
# =============================================================================
# Core Services
build_python_service('auth-service', 'auth')
build_python_service('tenant-service', 'tenant')
# Data & Analytics Services
build_python_service('training-service', 'training')
build_python_service('forecasting-service', 'forecasting')
build_python_service('ai-insights-service', 'ai_insights')
# Operations Services
build_python_service('sales-service', 'sales')
build_python_service('inventory-service', 'inventory')
build_python_service('production-service', 'production')
build_python_service('procurement-service', 'procurement')
build_python_service('distribution-service', 'distribution')
# Supporting Services
build_python_service('recipes-service', 'recipes')
build_python_service('suppliers-service', 'suppliers')
build_python_service('pos-service', 'pos')
build_python_service('orders-service', 'orders')
build_python_service('external-service', 'external')
# Platform Services
build_python_service('notification-service', 'notification')
build_python_service('alert-processor', 'alert_processor')
build_python_service('orchestrator-service', 'orchestrator')
# Demo Services
build_python_service('demo-session-service', 'demo_session')
# Tell Tilt that demo-cleanup-worker uses the demo-session-service image
k8s_image_json_path(
'bakery/demo-session-service',
'{.spec.template.spec.containers[?(@.name=="worker")].image}',
name='demo-cleanup-worker'
)
# =============================================================================
# INFRASTRUCTURE RESOURCES
# =============================================================================
# Redis & RabbitMQ
k8s_resource('redis', resource_deps=['security-setup'], labels=['01-infrastructure'])
k8s_resource('rabbitmq', resource_deps=['security-setup'], labels=['01-infrastructure'])
k8s_resource('nominatim', labels=['01-infrastructure'])
# MinIO Storage
k8s_resource('minio', resource_deps=['security-setup'], labels=['01-infrastructure'])
k8s_resource('minio-bucket-init', resource_deps=['minio'], labels=['01-infrastructure'])
# Unbound DNSSEC Resolver - Infrastructure component for Mailu DNS validation
k8s_resource('unbound-resolver', resource_deps=['security-setup'], labels=['01-infrastructure'])
# Mail Infrastructure (Mailu) - Manual trigger for Helm deployment
local_resource(
'mailu-helm',
cmd='''
echo "Deploying Mailu via Helm..."
echo ""
# Check if Mailu is already deployed
if helm list -n bakery-ia | grep -q mailu; then
echo "Mailu already deployed, checking status..."
helm status mailu -n bakery-ia
else
echo "Installing Mailu..."
# Add Mailu Helm repository if not already added
helm repo add mailu https://mailu.github.io/helm-charts 2>/dev/null || true
helm repo update mailu
# Determine environment (dev or prod) based on context
ENVIRONMENT="dev"
if [[ "$(kubectl config current-context)" == *"prod"* ]]; then
ENVIRONMENT="prod"
fi
echo "Environment detected: $ENVIRONMENT"
# Install Mailu with appropriate values
if [ "$ENVIRONMENT" = "dev" ]; then
helm upgrade --install mailu mailu/mailu \
-n bakery-ia \
--create-namespace \
-f infrastructure/platform/mail/mailu-helm/values.yaml \
-f infrastructure/platform/mail/mailu-helm/dev/values.yaml \
--timeout 10m \
--wait
else
helm upgrade --install mailu mailu/mailu \
-n bakery-ia \
--create-namespace \
-f infrastructure/platform/mail/mailu-helm/values.yaml \
-f infrastructure/platform/mail/mailu-helm/prod/values.yaml \
--timeout 10m \
--wait
fi
echo ""
echo "Mailu deployment completed"
fi
echo ""
echo "Mailu Access Information:"
echo " Admin Panel: https://mail.[domain]/admin"
echo " Webmail: https://mail.[domain]/webmail"
echo " SMTP: mail.[domain]:587 (STARTTLS)"
echo " IMAP: mail.[domain]:993 (SSL/TLS)"
echo ""
echo "To check pod status: kubectl get pods -n bakery-ia | grep mailu"
''',
labels=['01-infrastructure'],
auto_init=False, # Manual trigger only
)
# =============================================================================
# MONITORING RESOURCES - SigNoz (Unified Observability)
# =============================================================================
# Deploy SigNoz using Helm with automatic deployment and progress tracking
local_resource(
'signoz-deploy',
cmd='''
echo "Deploying SigNoz Monitoring Stack..."
echo ""
# Check if SigNoz is already deployed
if helm list -n bakery-ia | grep -q signoz; then
echo "SigNoz already deployed, checking status..."
helm status signoz -n bakery-ia
else
echo "Installing SigNoz..."
# Add SigNoz Helm repository if not already added
helm repo add signoz https://charts.signoz.io 2>/dev/null || true
helm repo update signoz
# Install SigNoz with custom values in the bakery-ia namespace
helm upgrade --install signoz signoz/signoz \
-n bakery-ia \
-f infrastructure/monitoring/signoz/signoz-values-dev.yaml \
--timeout 10m \
--wait
echo ""
echo "SigNoz deployment completed"
fi
echo ""
echo "SigNoz Access Information:"
echo " URL: https://monitoring.bakery-ia.local"
echo " Username: admin"
echo " Password: admin"
echo ""
echo "OpenTelemetry Collector Endpoints:"
echo " gRPC: localhost:4317"
echo " HTTP: localhost:4318"
echo ""
echo "To check pod status: kubectl get pods -n signoz"
''',
labels=['05-monitoring'],
auto_init=False,
)
# Deploy Flux CD using Helm with automatic deployment and progress tracking
local_resource(
'flux-cd-deploy',
cmd='''
echo "Deploying Flux CD GitOps Toolkit..."
echo ""
# Check if Flux is already deployed
if helm list -n flux-system | grep -q flux-cd; then
echo "Flux CD already deployed, checking status..."
helm status flux-cd -n flux-system
else
echo "Installing Flux CD..."
# Install Flux CRDs first if not already installed
if ! kubectl get crd gitrepositories.source.toolkit.fluxcd.io >/dev/null 2>&1; then
echo "Installing Flux CRDs..."
curl -sL https://fluxcd.io/install.sh | sudo bash
flux install --namespace=flux-system --network-policy=false
fi
# Create the namespace if it doesn't exist
kubectl create namespace flux-system --dry-run=client -o yaml | kubectl apply -f -
# Install Flux CD with custom values using the local chart
helm upgrade --install flux-cd infrastructure/cicd/flux \
-n flux-system \
--create-namespace \
--timeout 10m \
--wait
echo ""
echo "Flux CD deployment completed"
fi
echo ""
echo "Flux CD Access Information:"
echo "To check status: flux check"
echo "To check GitRepository: kubectl get gitrepository -n flux-system"
echo "To check Kustomization: kubectl get kustomization -n flux-system"
echo ""
echo "To check pod status: kubectl get pods -n flux-system"
''',
labels=['99-cicd'],
auto_init=False,
)
# Optional exporters (in monitoring namespace) - DISABLED since using SigNoz
# k8s_resource('node-exporter', labels=['05-monitoring'])
# k8s_resource('postgres-exporter', resource_deps=['auth-db'], labels=['05-monitoring'])
# =============================================================================
# DATABASE RESOURCES
# =============================================================================
# Core Service Databases
k8s_resource('auth-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('tenant-db', resource_deps=['security-setup'], labels=['06-databases'])
# Data & Analytics Databases
k8s_resource('training-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('forecasting-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('ai-insights-db', resource_deps=['security-setup'], labels=['06-databases'])
# Operations Databases
k8s_resource('sales-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('inventory-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('production-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('procurement-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('distribution-db', resource_deps=['security-setup'], labels=['06-databases'])
# Supporting Service Databases
k8s_resource('recipes-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('suppliers-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('pos-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('orders-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('external-db', resource_deps=['security-setup'], labels=['06-databases'])
# Platform Service Databases
k8s_resource('notification-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('alert-processor-db', resource_deps=['security-setup'], labels=['06-databases'])
k8s_resource('orchestrator-db', resource_deps=['security-setup'], labels=['06-databases'])
# Demo Service Databases
k8s_resource('demo-session-db', resource_deps=['security-setup'], labels=['06-databases'])
# =============================================================================
# MIGRATION JOBS
# =============================================================================
# Core Service Migrations
k8s_resource('auth-migration', resource_deps=['auth-db'], labels=['07-migrations'])
k8s_resource('tenant-migration', resource_deps=['tenant-db'], labels=['07-migrations'])
# Data & Analytics Migrations
k8s_resource('training-migration', resource_deps=['training-db'], labels=['07-migrations'])
k8s_resource('forecasting-migration', resource_deps=['forecasting-db'], labels=['07-migrations'])
k8s_resource('ai-insights-migration', resource_deps=['ai-insights-db'], labels=['07-migrations'])
# Operations Migrations
k8s_resource('sales-migration', resource_deps=['sales-db'], labels=['07-migrations'])
k8s_resource('inventory-migration', resource_deps=['inventory-db'], labels=['07-migrations'])
k8s_resource('production-migration', resource_deps=['production-db'], labels=['07-migrations'])
k8s_resource('procurement-migration', resource_deps=['procurement-db'], labels=['07-migrations'])
k8s_resource('distribution-migration', resource_deps=['distribution-db'], labels=['07-migrations'])
# Supporting Service Migrations
k8s_resource('recipes-migration', resource_deps=['recipes-db'], labels=['07-migrations'])
k8s_resource('suppliers-migration', resource_deps=['suppliers-db'], labels=['07-migrations'])
k8s_resource('pos-migration', resource_deps=['pos-db'], labels=['07-migrations'])
k8s_resource('orders-migration', resource_deps=['orders-db'], labels=['07-migrations'])
k8s_resource('external-migration', resource_deps=['external-db'], labels=['07-migrations'])
# Platform Service Migrations
k8s_resource('notification-migration', resource_deps=['notification-db'], labels=['07-migrations'])
k8s_resource('alert-processor-migration', resource_deps=['alert-processor-db'], labels=['07-migrations'])
k8s_resource('orchestrator-migration', resource_deps=['orchestrator-db'], labels=['07-migrations'])
# Demo Service Migrations
k8s_resource('demo-session-migration', resource_deps=['demo-session-db'], labels=['07-migrations'])
# =============================================================================
# DATA INITIALIZATION JOBS
# =============================================================================
k8s_resource('external-data-init', resource_deps=['external-migration', 'redis'], labels=['08-data-init'])
k8s_resource('nominatim-init', labels=['08-data-init'])
# =============================================================================
# APPLICATION SERVICES
# =============================================================================
# Core Services
k8s_resource('auth-service', resource_deps=['auth-migration', 'redis'], labels=['09-services-core'])
k8s_resource('tenant-service', resource_deps=['tenant-migration', 'redis'], labels=['09-services-core'])
# Data & Analytics Services
k8s_resource('training-service', resource_deps=['training-migration', 'redis'], labels=['10-services-analytics'])
k8s_resource('forecasting-service', resource_deps=['forecasting-migration', 'redis'], labels=['10-services-analytics'])
k8s_resource('ai-insights-service', resource_deps=['ai-insights-migration', 'redis', 'forecasting-service', 'production-service', 'procurement-service'], labels=['10-services-analytics'])
# Operations Services
k8s_resource('sales-service', resource_deps=['sales-migration', 'redis'], labels=['11-services-operations'])
k8s_resource('inventory-service', resource_deps=['inventory-migration', 'redis'], labels=['11-services-operations'])
k8s_resource('production-service', resource_deps=['production-migration', 'redis'], labels=['11-services-operations'])
k8s_resource('procurement-service', resource_deps=['procurement-migration', 'redis'], labels=['11-services-operations'])
k8s_resource('distribution-service', resource_deps=['distribution-migration', 'redis', 'rabbitmq'], labels=['11-services-operations'])
# Supporting Services
k8s_resource('recipes-service', resource_deps=['recipes-migration', 'redis'], labels=['12-services-supporting'])
k8s_resource('suppliers-service', resource_deps=['suppliers-migration', 'redis'], labels=['12-services-supporting'])
k8s_resource('pos-service', resource_deps=['pos-migration', 'redis'], labels=['12-services-supporting'])
k8s_resource('orders-service', resource_deps=['orders-migration', 'redis'], labels=['12-services-supporting'])
k8s_resource('external-service', resource_deps=['external-migration', 'external-data-init', 'redis'], labels=['12-services-supporting'])
# Platform Services
k8s_resource('notification-service', resource_deps=['notification-migration', 'redis', 'rabbitmq'], labels=['13-services-platform'])
k8s_resource('alert-processor', resource_deps=['alert-processor-migration', 'redis', 'rabbitmq'], labels=['13-services-platform'])
k8s_resource('orchestrator-service', resource_deps=['orchestrator-migration', 'redis'], labels=['13-services-platform'])
# Demo Services
k8s_resource('demo-session-service', resource_deps=['demo-session-migration', 'redis'], labels=['14-services-demo'])
k8s_resource('demo-cleanup-worker', resource_deps=['demo-session-service', 'redis'], labels=['14-services-demo'])
# =============================================================================
# FRONTEND & GATEWAY
# =============================================================================
k8s_resource('gateway', resource_deps=['auth-service'], labels=['15-frontend'])
k8s_resource('frontend', resource_deps=['gateway'], labels=['15-frontend'])
# =============================================================================
# CRONJOBS (Remaining K8s CronJobs)
# =============================================================================
k8s_resource('demo-session-cleanup', resource_deps=['demo-session-service'], labels=['16-cronjobs'])
k8s_resource('external-data-rotation', resource_deps=['external-service'], labels=['16-cronjobs'])
# =============================================================================
# WATCH SETTINGS
# =============================================================================
# Watch settings
watch_settings(
ignore=[
'.git/**',
'**/__pycache__/**',
'**/*.pyc',
'**/.pytest_cache/**',
'**/node_modules/**',
'**/.DS_Store',
'**/*.swp',
'**/*.swo',
'**/.venv/**',
'**/venv/**',
'**/.mypy_cache/**',
'**/.ruff_cache/**',
'**/.tox/**',
'**/htmlcov/**',
'**/.coverage',
'**/dist/**',
'**/build/**',
'**/*.egg-info/**',
'**/infrastructure/tls/**/*.pem',
'**/infrastructure/tls/**/*.cnf',
'**/infrastructure/tls/**/*.csr',
'**/infrastructure/tls/**/*.srl',
'**/*.tmp',
'**/*.tmp.*',
'**/migrations/versions/*.tmp.*',
'**/playwright-report/**',
'**/test-results/**',
]
)
# =============================================================================
# CI/CD INFRASTRUCTURE - MANUAL TRIGGERS
# =============================================================================
# Tekton Pipelines - Manual trigger for local development using Helm
local_resource(
'tekton-pipelines',
cmd='''
echo "Setting up Tekton Pipelines for CI/CD using Helm..."
echo ""
# Check if Tekton CRDs are already installed
if kubectl get crd pipelines.tekton.dev >/dev/null 2>&1; then
echo " Tekton CRDs already installed"
else
echo " Installing Tekton v0.57.0..."
kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
echo " Waiting for Tekton to be ready..."
kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-controller -n tekton-pipelines
kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-webhook -n tekton-pipelines
echo " Tekton installed and ready"
fi
echo ""
echo "Installing Tekton configurations via Helm..."
# Check if Tekton Helm release is already deployed
if helm list -n tekton-pipelines | grep -q tekton-cicd; then
echo " Updating existing Tekton CICD deployment..."
helm upgrade --install tekton-cicd infrastructure/cicd/tekton-helm \
-n tekton-pipelines \
--create-namespace \
--timeout 10m \
--wait
else
echo " Installing new Tekton CICD deployment..."
helm upgrade --install tekton-cicd infrastructure/cicd/tekton-helm \
-n tekton-pipelines \
--create-namespace \
--timeout 10m \
--wait
fi
echo ""
echo "Tekton setup complete!"
echo "To check status: kubectl get pods -n tekton-pipelines"
echo "To check Helm release: helm status tekton-cicd -n tekton-pipelines"
''',
labels=['99-cicd'],
auto_init=False, # Manual trigger only
)
# Gitea - Manual trigger for local Git server
local_resource(
'gitea',
cmd='''
echo "Setting up Gitea for local Git server..."
echo ""
# Create namespace
kubectl create namespace gitea || true
# Create admin secret first
chmod +x infrastructure/cicd/gitea/setup-admin-secret.sh
./infrastructure/cicd/gitea/setup-admin-secret.sh
# Install Gitea using Helm
helm repo add gitea https://dl.gitea.io/charts || true
helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml
echo ""
echo "Gitea setup complete!"
echo "Access Gitea at: http://gitea.bakery-ia.local (for dev) or http://gitea.bakewise.ai (for prod)"
echo "Make sure to add the appropriate hostname to /etc/hosts or configure DNS"
echo "Check status: kubectl get pods -n gitea"
echo "To uninstall: helm uninstall gitea -n gitea"
''',
labels=['99-cicd'],
auto_init=False, # Manual trigger only
)
# =============================================================================
# STARTUP SUMMARY
# =============================================================================
print("""
Security setup complete!
Database Security Features Active:
TLS encryption: PostgreSQL and Redis
Strong passwords: 32-character cryptographic
Persistent storage: PVCs for all databases
Column encryption: pgcrypto extension
Audit logging: PostgreSQL query logging
Internal Schedulers Active:
Alert Priority Recalculation: Hourly @ :15 (alert-processor)
Usage Tracking: Daily @ 2:00 AM UTC (tenant-service)
Disk Cleanup: Every {disk_cleanup_frequency_minutes} minutes (threshold: {disk_space_threshold_gb}GB)
Access your application:
Main Application: https://bakery-ia.local
API Endpoints: https://bakery-ia.local/api/v1/...
Local Access: https://localhost
Service Metrics:
Gateway: http://localhost:8000/metrics
Any Service: kubectl port-forward <service> 8000:8000
SigNoz (Unified Observability):
Deploy via Tilt: Trigger 'signoz-deployment' resource
Manual deploy: ./infrastructure/monitoring/signoz/deploy-signoz.sh dev
Access (if deployed): https://monitoring.bakery-ia.local
Username: admin
Password: admin
CI/CD Infrastructure (Manual Triggers):
Tekton: Trigger 'tekton-pipelines' resource
Flux: Trigger 'flux-cd' resource
Gitea: Trigger 'gitea' resource
Verify security:
kubectl get pvc -n bakery-ia
kubectl get secrets -n bakery-ia | grep tls
kubectl logs -n bakery-ia <db-pod> | grep SSL
Verify schedulers:
kubectl exec -it -n bakery-ia deployment/alert-processor -- curl localhost:8000/scheduler/status
kubectl logs -f -n bakery-ia -l app=tenant-service | grep "usage tracking"
Documentation:
docs/SECURITY_IMPLEMENTATION_COMPLETE.md
docs/DATABASE_SECURITY_ANALYSIS_REPORT.md
Build Optimization Active:
Services only rebuild when their code changes
Shared folder changes trigger ALL services (as expected)
Reduces unnecessary rebuilds and disk usage
Edit service code: only that service rebuilds
Edit shared/ code: all services rebuild (required)
Useful Commands:
# Work on specific services only
tilt up <service-name> <service-name>
# View logs by label
tilt logs 09-services-core
tilt logs 13-services-platform
DNS Configuration:
# To access the application via domain names, add these entries to your hosts file:
# sudo nano /etc/hosts
# Add these lines:
# 127.0.0.1 bakery-ia.local
# 127.0.0.1 monitoring.bakery-ia.local
======================================
""")