1576 lines
64 KiB
Plaintext
1576 lines
64 KiB
Plaintext
# =============================================================================
|
|
# Bakery IA - Tiltfile for Secure Local Development
|
|
# =============================================================================
|
|
# Features:
|
|
# - TLS encryption for PostgreSQL and Redis
|
|
# - Strong 32-character passwords with PersistentVolumeClaims
|
|
# - PostgreSQL pgcrypto extension and audit logging
|
|
# - Organized resource dependencies and live-reload capabilities
|
|
# - Local registry for faster image builds and deployments
|
|
#
|
|
# Build Optimization:
|
|
# - Services only rebuild when their specific code changes (not all services)
|
|
# - Shared folder changes trigger rebuild of ALL services (as they all depend on it)
|
|
# - Uses 'only' parameter to watch only relevant files per service
|
|
# - Frontend only rebuilds when frontend/ code changes
|
|
# - Gateway only rebuilds when gateway/ or shared/ code changes
|
|
# =============================================================================
|
|
|
|
# =============================================================================
|
|
# GLOBAL VARIABLES - DEFINED FIRST TO BE AVAILABLE FOR ALL RESOURCES
|
|
# =============================================================================
|
|
|
|
# Docker registry configuration
|
|
# Set USE_DOCKERHUB=true environment variable to push images to Docker Hub
|
|
# Set USE_GITEA_REGISTRY=true environment variable to push images to Gitea registry
|
|
# Otherwise, uses local registry for faster builds and deployments
|
|
use_dockerhub = False # Default to False
|
|
use_gitea_registry = False # Default to False - Gitea registry not working currently
|
|
if 'USE_DOCKERHUB' in os.environ:
|
|
use_dockerhub = os.environ['USE_DOCKERHUB'].lower() == 'true'
|
|
if 'USE_GITEA_REGISTRY' in os.environ:
|
|
use_gitea_registry = os.environ['USE_GITEA_REGISTRY'].lower() == 'true'
|
|
|
|
dockerhub_username = 'uals' # Default username
|
|
if 'DOCKERHUB_USERNAME' in os.environ:
|
|
dockerhub_username = os.environ['DOCKERHUB_USERNAME']
|
|
|
|
# Base image registry configuration for Dockerfile ARGs
|
|
# This controls where the base Python image is pulled from during builds
|
|
base_registry = 'localhost:5000' # Default for local dev
|
|
python_image = 'python_3.11-slim' # Local registry uses underscores
|
|
|
|
if 'BASE_REGISTRY' in os.environ:
|
|
base_registry = os.environ['BASE_REGISTRY']
|
|
if 'PYTHON_IMAGE' in os.environ:
|
|
python_image = os.environ['PYTHON_IMAGE']
|
|
|
|
# For Docker Hub mode, use canonical image names
|
|
if use_dockerhub:
|
|
base_registry = 'docker.io'
|
|
python_image = 'python:3.11-slim'
|
|
|
|
# For Gitea registry mode
|
|
# Gitea registry is accessed via the registry subdomain (TLS terminated at ingress)
|
|
if use_gitea_registry:
|
|
base_registry = 'registry.bakery-ia.local'
|
|
python_image = 'python:3.11-slim'
|
|
# Add fallback to local registry if Gitea registry is not available
|
|
fallback_registry = 'localhost:5001'
|
|
|
|
|
|
# =============================================================================
|
|
# PREPULL BASE IMAGES STEP - CRITICAL FIRST STEP
|
|
# =============================================================================
|
|
|
|
# Run the prepull script - if this fails, don't continue
|
|
# When using Gitea registry, make sure Gitea is available first
|
|
if use_gitea_registry:
|
|
local_resource(
|
|
'prepull-base-images',
|
|
cmd='''#!/usr/bin/env bash
|
|
echo "=========================================="
|
|
echo "PREPULLING BASE IMAGES - CRITICAL STEP"
|
|
echo "Using Gitea Registry Mode"
|
|
echo "=========================================="
|
|
echo ""
|
|
|
|
# Export environment variables for the prepull script
|
|
export USE_GITEA_REGISTRY=true
|
|
export USE_LOCAL_REGISTRY=false
|
|
|
|
# Wait for Gitea registry to be accessible
|
|
echo "Waiting for Gitea registry to be accessible..."
|
|
echo "Registry URL: registry.bakery-ia.local (via ingress)"
|
|
MAX_RETRIES=30
|
|
RETRY_COUNT=0
|
|
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
|
|
# Try HTTPS via ingress (registry.bakery-ia.local routes to gitea-http:3000)
|
|
if curl -sk https://registry.bakery-ia.local/v2/ >/dev/null 2>&1; then
|
|
echo "✓ Gitea registry is accessible via HTTPS"
|
|
break
|
|
fi
|
|
# Also try directly via Gitea HTTP service within cluster
|
|
if curl -s http://gitea-http.gitea.svc.cluster.local:3000/v2/ >/dev/null 2>&1; then
|
|
echo "✓ Gitea registry is accessible via internal service"
|
|
break
|
|
fi
|
|
echo " Waiting for Gitea registry... (attempt $((RETRY_COUNT+1))/$MAX_RETRIES)"
|
|
sleep 10
|
|
RETRY_COUNT=$((RETRY_COUNT+1))
|
|
done
|
|
|
|
if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then
|
|
echo "⚠ Warning: Gitea registry not accessible after $MAX_RETRIES attempts"
|
|
echo " Falling back to local registry"
|
|
export USE_GITEA_REGISTRY=false
|
|
export USE_LOCAL_REGISTRY=true
|
|
fi
|
|
|
|
# Run the prepull script
|
|
if ./scripts/prepull-base-images.sh; then
|
|
echo ""
|
|
echo "✓ Base images prepull completed successfully"
|
|
echo "=========================================="
|
|
echo "CONTINUING WITH TILT SETUP..."
|
|
echo "=========================================="
|
|
exit 0
|
|
else
|
|
echo ""
|
|
echo "❌ Base images prepull FAILED - stopping Tilt execution"
|
|
echo "This usually happens due to Docker Hub rate limits"
|
|
echo "Please try again later or configure Docker Hub credentials"
|
|
echo "=========================================="
|
|
# Exit with error code to prevent further execution
|
|
exit 1
|
|
fi
|
|
''',
|
|
resource_deps=['gitea'], # Depend on Gitea when using Gitea registry
|
|
labels=['00-prepull'],
|
|
auto_init=True,
|
|
allow_parallel=False
|
|
)
|
|
else:
|
|
local_resource(
|
|
'prepull-base-images',
|
|
cmd='''#!/usr/bin/env bash
|
|
echo "=========================================="
|
|
echo "PREPULLING BASE IMAGES - CRITICAL STEP"
|
|
echo "Using Local Registry Mode"
|
|
echo "=========================================="
|
|
echo ""
|
|
|
|
# Export environment variables for the prepull script
|
|
export USE_GITEA_REGISTRY=false
|
|
export USE_LOCAL_REGISTRY=true
|
|
|
|
# Run the prepull script
|
|
if ./scripts/prepull-base-images.sh; then
|
|
echo ""
|
|
echo "✓ Base images prepull completed successfully"
|
|
echo "=========================================="
|
|
echo "CONTINUING WITH TILT SETUP..."
|
|
echo "=========================================="
|
|
exit 0
|
|
else
|
|
echo ""
|
|
echo "❌ Base images prepull FAILED - stopping Tilt execution"
|
|
echo "This usually happens due to Docker Hub rate limits"
|
|
echo "Please try again later or configure Docker Hub credentials"
|
|
echo "=========================================="
|
|
# Exit with error code to prevent further execution
|
|
exit 1
|
|
fi
|
|
''',
|
|
labels=['00-prepull'],
|
|
auto_init=True,
|
|
allow_parallel=False
|
|
)
|
|
|
|
|
|
# =============================================================================
|
|
# TILT CONFIGURATION
|
|
# =============================================================================
|
|
|
|
# Update settings
|
|
update_settings(
|
|
max_parallel_updates=2, # Reduce parallel updates to avoid resource exhaustion
|
|
k8s_upsert_timeout_secs=120 # Increase timeout for slower local builds
|
|
)
|
|
|
|
# Ensure we're running in the correct context
|
|
allow_k8s_contexts('kind-bakery-ia-local')
|
|
|
|
# =============================================================================
|
|
# DISK SPACE MANAGEMENT & CLEANUP CONFIGURATION
|
|
# =============================================================================
|
|
|
|
# Disk space management settings
|
|
disk_cleanup_enabled = True # Default to True, can be disabled with TILT_DISABLE_CLEANUP=true
|
|
if 'TILT_DISABLE_CLEANUP' in os.environ:
|
|
disk_cleanup_enabled = os.environ['TILT_DISABLE_CLEANUP'].lower() != 'true'
|
|
|
|
disk_space_threshold_gb = '10'
|
|
if 'TILT_DISK_THRESHOLD_GB' in os.environ:
|
|
disk_space_threshold_gb = os.environ['TILT_DISK_THRESHOLD_GB']
|
|
|
|
disk_cleanup_frequency_minutes = '30'
|
|
if 'TILT_CLEANUP_FREQUENCY' in os.environ:
|
|
disk_cleanup_frequency_minutes = os.environ['TILT_CLEANUP_FREQUENCY']
|
|
|
|
print("""
|
|
DISK SPACE MANAGEMENT CONFIGURATION
|
|
======================================
|
|
Cleanup Enabled: {}
|
|
Free Space Threshold: {}GB
|
|
Cleanup Frequency: Every {} minutes
|
|
|
|
To disable cleanup: export TILT_DISABLE_CLEANUP=true
|
|
To change threshold: export TILT_DISK_THRESHOLD_GB=20
|
|
To change frequency: export TILT_CLEANUP_FREQUENCY=60
|
|
""".format(
|
|
'YES' if disk_cleanup_enabled else 'NO (TILT_DISABLE_CLEANUP=true)',
|
|
disk_space_threshold_gb,
|
|
disk_cleanup_frequency_minutes
|
|
))
|
|
|
|
# Automatic cleanup scheduler (informational only - actual scheduling done externally)
|
|
if disk_cleanup_enabled:
|
|
local_resource(
|
|
'automatic-disk-cleanup-info',
|
|
cmd='''
|
|
echo "Automatic disk cleanup is ENABLED"
|
|
echo "Settings:"
|
|
echo " - Threshold: ''' + disk_space_threshold_gb + ''' GB free space"
|
|
echo " - Frequency: Every ''' + disk_cleanup_frequency_minutes + ''' minutes"
|
|
echo ""
|
|
echo "Note: Actual cleanup runs via external scheduling (cron job or similar)"
|
|
echo "To run cleanup now: tilt trigger manual-disk-cleanup"
|
|
''',
|
|
labels=['99-cleanup'],
|
|
auto_init=True,
|
|
allow_parallel=False
|
|
)
|
|
|
|
# Manual cleanup trigger (can be run on demand)
|
|
local_resource(
|
|
'manual-disk-cleanup',
|
|
cmd='''
|
|
echo "Starting manual disk cleanup..."
|
|
python3 scripts/cleanup_disk_space.py --manual --verbose
|
|
''',
|
|
labels=['99-cleanup'],
|
|
auto_init=False,
|
|
allow_parallel=False
|
|
)
|
|
|
|
# Disk space monitoring resource
|
|
local_resource(
|
|
'disk-space-monitor',
|
|
cmd='''
|
|
echo "DISK SPACE MONITORING"
|
|
echo "======================================"
|
|
|
|
# Get disk usage
|
|
df -h / | grep -v Filesystem | awk '{{print "Total: " $2 " | Used: " $3 " | Free: " $4 " | Usage: " $5}}'
|
|
|
|
# Get Docker disk usage
|
|
echo ""
|
|
echo "DOCKER DISK USAGE:"
|
|
docker system df
|
|
|
|
# Get Kubernetes disk usage (if available)
|
|
echo ""
|
|
echo "KUBERNETES DISK USAGE:"
|
|
kubectl get pvc -n bakery-ia --no-headers 2>/dev/null | awk '{{print "PVC: " $1 " | Status: " $2 " | Capacity: " $3 " | Used: " $4}}' || echo " Kubernetes PVCs not available"
|
|
|
|
echo ""
|
|
echo "Cleanup Status:"
|
|
if [ "{disk_cleanup_enabled}" = "True" ]; then
|
|
echo " Automatic cleanup: ENABLED (every {disk_cleanup_frequency_minutes} minutes)"
|
|
echo " Threshold: {disk_space_threshold_gb}GB free space"
|
|
else
|
|
echo " Automatic cleanup: DISABLED"
|
|
echo " To enable: unset TILT_DISABLE_CLEANUP or set TILT_DISABLE_CLEANUP=false"
|
|
fi
|
|
|
|
echo ""
|
|
echo "Manual cleanup commands:"
|
|
echo " tilt trigger manual-disk-cleanup # Run cleanup now"
|
|
echo " docker system prune -a # Manual Docker cleanup"
|
|
echo " kubectl delete jobs --all # Clean up completed jobs"
|
|
''',
|
|
labels=['99-cleanup'],
|
|
auto_init=False,
|
|
allow_parallel=False
|
|
)
|
|
|
|
# Use the registry configuration defined at the top of the file
|
|
if use_dockerhub:
|
|
print("""
|
|
DOCKER HUB MODE ENABLED
|
|
Images will be pushed to Docker Hub: docker.io/%s
|
|
Base images will be pulled from: %s/%s
|
|
Make sure you're logged in: docker login
|
|
To disable: unset USE_DOCKERHUB or set USE_DOCKERHUB=false
|
|
""" % (dockerhub_username, base_registry, python_image))
|
|
default_registry('docker.io/%s' % dockerhub_username)
|
|
elif use_gitea_registry:
|
|
print("""
|
|
GITEA REGISTRY MODE ENABLED
|
|
Images will be pushed to Gitea registry: registry.bakery-ia.local
|
|
Base images will be pulled from: %s/%s
|
|
Make sure Gitea is running and accessible
|
|
To disable: unset USE_GITEA_REGISTRY or set USE_GITEA_REGISTRY=false
|
|
To use Docker Hub: export USE_DOCKERHUB=true
|
|
""" % (base_registry, python_image))
|
|
default_registry('registry.bakery-ia.local')
|
|
else:
|
|
print("""
|
|
LOCAL REGISTRY MODE
|
|
Using local registry for faster builds: localhost:5001
|
|
Base images will be pulled from: %s/%s
|
|
This registry is created by kubernetes_restart.sh script
|
|
To use Docker Hub: export USE_DOCKERHUB=true
|
|
To use Gitea registry: export USE_GITEA_REGISTRY=true
|
|
To change base registry: export BASE_REGISTRY=<registry-url>
|
|
To change Python image: export PYTHON_IMAGE=<image:tag>
|
|
""" % (base_registry, python_image))
|
|
default_registry('localhost:5001')
|
|
|
|
# =============================================================================
|
|
# INGRESS HEALTH CHECK
|
|
# =============================================================================
|
|
|
|
# Check ingress status and readiness
|
|
local_resource(
|
|
'ingress-status-check',
|
|
cmd='''
|
|
echo "=========================================="
|
|
echo "CHECKING INGRESS STATUS AND READINESS"
|
|
echo "=========================================="
|
|
|
|
# Wait for ingress controller to be ready
|
|
echo "Waiting for ingress controller to be ready..."
|
|
kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=controller -n ingress-nginx --timeout=300s
|
|
|
|
# Check ingress controller status
|
|
echo ""
|
|
echo "INGRESS CONTROLLER STATUS:"
|
|
kubectl get pods -n ingress-nginx -l app.kubernetes.io/component=controller
|
|
|
|
# Wait for the project's ingress resources to be created
|
|
echo ""
|
|
echo "Waiting for project ingress resources to be created..."
|
|
|
|
# Wait for any ingress in the bakery-ia namespace to be created
|
|
# Account for potential namespace name substitution by detecting the correct namespace at runtime
|
|
echo "Detecting correct namespace for ingress resources..."
|
|
|
|
# The namespace name might have been substituted during kustomize processing
|
|
# Look for ingress resources in any namespace that could be ours
|
|
COUNT=0
|
|
MAX_COUNT=24 # 2 minutes with 5-second intervals
|
|
while [ $COUNT -lt $MAX_COUNT ]; do
|
|
# Look for ingress resources in any namespace
|
|
FOUND_INGRESS_NS=$(kubectl get ingress --all-namespaces --no-headers 2>/dev/null | grep -v "ingress-nginx" | head -1 | awk '{print $1}')
|
|
|
|
if [ -n "$FOUND_INGRESS_NS" ]; then
|
|
NAMESPACE="$FOUND_INGRESS_NS"
|
|
echo "Found ingress resources in namespace: $NAMESPACE"
|
|
break
|
|
fi
|
|
|
|
echo "Waiting for ingress resources to be created in any namespace..."
|
|
sleep 5
|
|
COUNT=$((COUNT + 1))
|
|
done
|
|
|
|
if [ $COUNT -eq $MAX_COUNT ]; then
|
|
echo "Warning: No ingress resources found after timeout."
|
|
echo "Listing all namespaces to help diagnose:"
|
|
kubectl get namespaces
|
|
echo "Listing all ingress resources:"
|
|
kubectl get ingress --all-namespaces
|
|
# Fallback to bakery-ia namespace
|
|
NAMESPACE="bakery-ia"
|
|
else
|
|
echo "Using detected namespace: $NAMESPACE"
|
|
fi
|
|
|
|
# Now wait for ingress resources in the detected namespace
|
|
COUNT=0
|
|
MAX_COUNT=24 # 2 minutes with 5-second intervals
|
|
while [ $COUNT -lt $MAX_COUNT ]; do
|
|
# Check if namespace exists before querying ingress
|
|
if kubectl get namespace "$NAMESPACE" &>/dev/null; then
|
|
INGRESS_COUNT=$(kubectl get ingress -n "$NAMESPACE" --no-headers 2>/dev/null | wc -l)
|
|
if [ "$INGRESS_COUNT" -gt 0 ]; then
|
|
echo "Ingress resources found in $NAMESPACE namespace."
|
|
break
|
|
fi
|
|
fi
|
|
echo "Waiting for ingress resources in $NAMESPACE namespace to be created..."
|
|
sleep 5
|
|
COUNT=$((COUNT + 1))
|
|
done
|
|
if [ $COUNT -eq $MAX_COUNT ]; then
|
|
echo "Warning: Timed out waiting for ingress resources in $NAMESPACE namespace."
|
|
echo "Listing all namespaces to help diagnose:"
|
|
kubectl get namespaces
|
|
echo "Listing all ingress resources:"
|
|
kubectl get ingress --all-namespaces
|
|
fi
|
|
|
|
# Wait for ingress to have address assigned (be more flexible about the name)
|
|
echo "Waiting for ingress to have address assigned..."
|
|
# Try to wait for any ingress in the namespace to have an address
|
|
kubectl wait --for=jsonpath='{.status.loadBalancer.ingress[0].ip}' ingress --all -n "$NAMESPACE" --timeout=30s 2>/dev/null || echo "Ingress may not have external IP (this is OK in Kind)"
|
|
|
|
# Check ingress resources
|
|
echo ""
|
|
echo "INGRESS RESOURCES:"
|
|
kubectl get ingress -A
|
|
|
|
# Check specific ingress for our namespace
|
|
echo ""
|
|
echo "BAKERY-IA INGRESS DETAILS:"
|
|
kubectl get ingress -n "$NAMESPACE" -o wide
|
|
|
|
# Check ingress load balancer status
|
|
echo ""
|
|
echo "INGRESS LOAD BALANCER STATUS:"
|
|
kubectl get svc -n ingress-nginx ingress-nginx-controller -o wide
|
|
|
|
# Wait a bit for ingress to fully initialize
|
|
sleep 10
|
|
|
|
# Verify ingress endpoints
|
|
echo ""
|
|
echo "INGRESS ENDPOINTS:"
|
|
kubectl get endpoints -n ingress-nginx
|
|
|
|
# Test connectivity to the ingress endpoints
|
|
echo ""
|
|
echo "TESTING INGRESS CONNECTIVITY:"
|
|
# Test if we can reach the ingress controller
|
|
kubectl exec -n ingress-nginx deployment/ingress-nginx-controller --container controller -- \
|
|
/nginx-ingress-controller --version > /dev/null 2>&1 && echo "✓ Ingress controller accessible"
|
|
|
|
echo ""
|
|
echo "Ingress status check completed successfully!"
|
|
echo "Project ingress resources are ready for Gitea and other services."
|
|
echo "=========================================="
|
|
''',
|
|
resource_deps=['apply-k8s-manifests'], # Step 2 depends on Step 1
|
|
labels=['00-ingress-check'],
|
|
auto_init=True,
|
|
allow_parallel=False
|
|
)
|
|
|
|
# =============================================================================
|
|
# SECURITY & INITIAL SETUP
|
|
# =============================================================================
|
|
|
|
print("""
|
|
======================================
|
|
Bakery IA Secure Development Mode
|
|
======================================
|
|
|
|
Security Features:
|
|
TLS encryption for PostgreSQL and Redis
|
|
Strong 32-character passwords
|
|
PersistentVolumeClaims (no data loss)
|
|
Column encryption: pgcrypto extension
|
|
Audit logging: PostgreSQL query logging
|
|
Object storage: MinIO with TLS for ML models
|
|
|
|
Monitoring:
|
|
Service metrics available at /metrics endpoints
|
|
Telemetry ready (traces, metrics, logs)
|
|
SigNoz deployment optional for local dev (see signoz-info resource)
|
|
|
|
Applying security configurations...
|
|
""")
|
|
|
|
|
|
# Apply security configurations before loading main manifests
|
|
# Security setup always depends on prepull-base-images to ensure images are cached
|
|
# When using Gitea registry, the dependency chain is:
|
|
# ingress-status-check -> gitea -> prepull-base-images -> security-setup
|
|
# When using local registry, the dependency chain is:
|
|
# prepull-base-images -> security-setup
|
|
security_resource_deps = ['prepull-base-images'] # Always depend on prepull
|
|
|
|
local_resource(
|
|
'security-setup',
|
|
cmd='''
|
|
echo "Applying security secrets and configurations..."
|
|
|
|
# First, ensure all required namespaces exist
|
|
echo "Creating namespaces..."
|
|
kubectl apply -f infrastructure/namespaces/bakery-ia.yaml
|
|
kubectl apply -f infrastructure/namespaces/tekton-pipelines.yaml
|
|
|
|
# Wait for namespaces to be ready
|
|
echo "Waiting for namespaces to be ready..."
|
|
for ns in bakery-ia tekton-pipelines; do
|
|
until kubectl get namespace $ns 2>/dev/null; do
|
|
echo "Waiting for namespace $ns to be created..."
|
|
sleep 2
|
|
done
|
|
echo "Namespace $ns is available"
|
|
done
|
|
|
|
# Apply common secrets and configs
|
|
kubectl apply -f infrastructure/environments/common/configs/configmap.yaml
|
|
kubectl apply -f infrastructure/environments/common/configs/secrets.yaml
|
|
|
|
# Apply database secrets and configs
|
|
kubectl apply -f infrastructure/platform/storage/postgres/secrets/postgres-tls-secret.yaml
|
|
kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-init-config.yaml
|
|
kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-logging-config.yaml
|
|
|
|
# Apply Redis secrets
|
|
kubectl apply -f infrastructure/platform/storage/redis/secrets/redis-tls-secret.yaml
|
|
|
|
# Apply MinIO secrets and configs
|
|
kubectl apply -f infrastructure/platform/storage/minio/minio-secrets.yaml
|
|
kubectl apply -f infrastructure/platform/storage/minio/secrets/minio-tls-secret.yaml
|
|
|
|
# Apply Mail/SMTP secrets (already included in common/configs/secrets.yaml)
|
|
|
|
# Apply CI/CD secrets
|
|
kubectl apply -f infrastructure/cicd/tekton-helm/templates/secrets.yaml
|
|
|
|
echo "Security configurations applied"
|
|
''',
|
|
resource_deps=security_resource_deps, # Conditional dependency based on registry usage
|
|
labels=['00-security'],
|
|
auto_init=True
|
|
)
|
|
|
|
# Verify TLS certificates are mounted correctly
|
|
|
|
|
|
|
|
# =============================================================================
|
|
# LOAD KUBERNETES MANIFESTS
|
|
# =============================================================================
|
|
|
|
# Load the main kustomize overlay for the dev environment
|
|
k8s_yaml(kustomize('infrastructure/environments/dev/k8s-manifests'))
|
|
|
|
# Create a visible resource for applying Kubernetes manifests
|
|
local_resource(
|
|
'apply-k8s-manifests',
|
|
cmd='''
|
|
echo "=========================================="
|
|
echo "APPLYING KUBERNETES MANIFESTS"
|
|
echo "=========================================="
|
|
echo "Loading all Kubernetes resources including ingress configuration..."
|
|
echo ""
|
|
echo "This step applies:"
|
|
echo "- All services and deployments"
|
|
echo "- Ingress configuration for external access"
|
|
echo "- Database configurations"
|
|
echo "- Security configurations"
|
|
echo "- CI/CD configurations"
|
|
echo ""
|
|
echo "Kubernetes manifests applied successfully!"
|
|
echo "=========================================="
|
|
''',
|
|
labels=['00-k8s-manifests'],
|
|
auto_init=True,
|
|
allow_parallel=False
|
|
)
|
|
|
|
# =============================================================================
|
|
# DOCKER BUILD HELPERS
|
|
# =============================================================================
|
|
|
|
# Helper function for Python services with live updates
|
|
# This function ensures services only rebuild when their specific code changes,
|
|
# but all services rebuild when shared/ folder changes
|
|
def build_python_service(service_name, service_path):
|
|
docker_build(
|
|
'bakery/' + service_name,
|
|
context='.',
|
|
dockerfile='./services/' + service_path + '/Dockerfile',
|
|
# Build arguments for environment-configurable base images
|
|
build_args={
|
|
'BASE_REGISTRY': base_registry,
|
|
'PYTHON_IMAGE': python_image,
|
|
},
|
|
# Only watch files relevant to this specific service + shared code
|
|
only=[
|
|
'./services/' + service_path,
|
|
'./shared',
|
|
'./scripts',
|
|
],
|
|
live_update=[
|
|
# Fall back to full image build if Dockerfile or requirements change
|
|
fall_back_on([
|
|
'./services/' + service_path + '/Dockerfile',
|
|
'./services/' + service_path + '/requirements.txt',
|
|
'./shared/requirements-tracing.txt',
|
|
]),
|
|
|
|
# Sync service code
|
|
sync('./services/' + service_path, '/app'),
|
|
|
|
# Sync shared libraries
|
|
sync('./shared', '/app/shared'),
|
|
|
|
# Sync scripts
|
|
sync('./scripts', '/app/scripts'),
|
|
|
|
# Install new dependencies if requirements.txt changes
|
|
run(
|
|
'pip install --no-cache-dir -r requirements.txt',
|
|
trigger=['./services/' + service_path + '/requirements.txt']
|
|
),
|
|
|
|
# Restart uvicorn on Python file changes (HUP signal triggers graceful reload)
|
|
run(
|
|
'kill -HUP 1',
|
|
trigger=[
|
|
'./services/' + service_path + '/**/*.py',
|
|
'./shared/**/*.py'
|
|
]
|
|
),
|
|
],
|
|
# Ignore common patterns that don't require rebuilds
|
|
ignore=[
|
|
'.git',
|
|
'**/__pycache__',
|
|
'**/*.pyc',
|
|
'**/.pytest_cache',
|
|
'**/node_modules',
|
|
'**/.DS_Store'
|
|
]
|
|
)
|
|
|
|
# =============================================================================
|
|
# INFRASTRUCTURE IMAGES
|
|
# =============================================================================
|
|
|
|
# Frontend (React + Vite)
|
|
frontend_debug_env = 'false' # Default to false
|
|
if 'FRONTEND_DEBUG' in os.environ:
|
|
frontend_debug_env = os.environ['FRONTEND_DEBUG']
|
|
frontend_debug = frontend_debug_env.lower() == 'true'
|
|
|
|
if frontend_debug:
|
|
print("""
|
|
FRONTEND DEBUG MODE ENABLED
|
|
Building frontend with NO minification for easier debugging.
|
|
Full React error messages will be displayed.
|
|
To disable: unset FRONTEND_DEBUG or set FRONTEND_DEBUG=false
|
|
""")
|
|
else:
|
|
print("""
|
|
FRONTEND PRODUCTION MODE
|
|
Building frontend with minification for optimized performance.
|
|
To enable debug mode: export FRONTEND_DEBUG=true
|
|
""")
|
|
|
|
docker_build(
|
|
'bakery/dashboard',
|
|
context='./frontend',
|
|
dockerfile='./frontend/Dockerfile.kubernetes.debug' if frontend_debug else './frontend/Dockerfile.kubernetes',
|
|
live_update=[
|
|
sync('./frontend/src', '/app/src'),
|
|
sync('./frontend/public', '/app/public'),
|
|
],
|
|
build_args={
|
|
'NODE_OPTIONS': '--max-old-space-size=8192'
|
|
},
|
|
ignore=[
|
|
'playwright-report/**',
|
|
'test-results/**',
|
|
'node_modules/**',
|
|
'.DS_Store'
|
|
]
|
|
)
|
|
|
|
# Gateway
|
|
docker_build(
|
|
'bakery/gateway',
|
|
context='.',
|
|
dockerfile='./gateway/Dockerfile',
|
|
# Build arguments for environment-configurable base images
|
|
build_args={
|
|
'BASE_REGISTRY': base_registry,
|
|
'PYTHON_IMAGE': python_image,
|
|
},
|
|
# Only watch gateway-specific files and shared code
|
|
only=[
|
|
'./gateway',
|
|
'./shared',
|
|
'./scripts',
|
|
],
|
|
live_update=[
|
|
fall_back_on([
|
|
'./gateway/Dockerfile',
|
|
'./gateway/requirements.txt',
|
|
'./shared/requirements-tracing.txt',
|
|
]),
|
|
sync('./gateway', '/app'),
|
|
sync('./shared', '/app/shared'),
|
|
sync('./scripts', '/app/scripts'),
|
|
run('kill -HUP 1', trigger=['./gateway/**/*.py', './shared/**/*.py']),
|
|
],
|
|
ignore=[
|
|
'.git',
|
|
'**/__pycache__',
|
|
'**/*.pyc',
|
|
'**/.pytest_cache',
|
|
'**/node_modules',
|
|
'**/.DS_Store'
|
|
]
|
|
)
|
|
|
|
# =============================================================================
|
|
# MICROSERVICE IMAGES
|
|
# =============================================================================
|
|
|
|
# Core Services
|
|
build_python_service('auth-service', 'auth')
|
|
build_python_service('tenant-service', 'tenant')
|
|
|
|
# Data & Analytics Services
|
|
build_python_service('training-service', 'training')
|
|
build_python_service('forecasting-service', 'forecasting')
|
|
build_python_service('ai-insights-service', 'ai_insights')
|
|
|
|
# Operations Services
|
|
build_python_service('sales-service', 'sales')
|
|
build_python_service('inventory-service', 'inventory')
|
|
build_python_service('production-service', 'production')
|
|
build_python_service('procurement-service', 'procurement')
|
|
build_python_service('distribution-service', 'distribution')
|
|
|
|
# Supporting Services
|
|
build_python_service('recipes-service', 'recipes')
|
|
build_python_service('suppliers-service', 'suppliers')
|
|
build_python_service('pos-service', 'pos')
|
|
build_python_service('orders-service', 'orders')
|
|
build_python_service('external-service', 'external')
|
|
|
|
# Platform Services
|
|
build_python_service('notification-service', 'notification')
|
|
build_python_service('alert-processor', 'alert_processor')
|
|
build_python_service('orchestrator-service', 'orchestrator')
|
|
|
|
# Demo Services
|
|
build_python_service('demo-session-service', 'demo_session')
|
|
|
|
# Tell Tilt that demo-cleanup-worker uses the demo-session-service image
|
|
k8s_image_json_path(
|
|
'bakery/demo-session-service',
|
|
'{.spec.template.spec.containers[?(@.name=="worker")].image}',
|
|
name='demo-cleanup-worker'
|
|
)
|
|
|
|
# =============================================================================
|
|
# INFRASTRUCTURE RESOURCES
|
|
# =============================================================================
|
|
|
|
# Redis & RabbitMQ
|
|
k8s_resource('redis', resource_deps=['security-setup'], labels=['01-infrastructure'])
|
|
k8s_resource('rabbitmq', resource_deps=['security-setup'], labels=['01-infrastructure'])
|
|
|
|
# MinIO Storage
|
|
k8s_resource('minio', resource_deps=['security-setup'], labels=['01-infrastructure'])
|
|
k8s_resource('minio-bucket-init', resource_deps=['minio'], labels=['01-infrastructure'])
|
|
|
|
# Unbound DNSSEC Resolver - Infrastructure component for Mailu DNS validation
|
|
local_resource(
|
|
'unbound-helm',
|
|
cmd='''
|
|
echo "Deploying Unbound DNS resolver via Helm..."
|
|
echo ""
|
|
|
|
# Check if Unbound is already deployed
|
|
if helm list -n bakery-ia | grep -q unbound; then
|
|
echo "Unbound already deployed, checking status..."
|
|
helm status unbound -n bakery-ia
|
|
else
|
|
echo "Installing Unbound..."
|
|
|
|
# Determine environment (dev or prod) based on context
|
|
ENVIRONMENT="dev"
|
|
if [[ "$(kubectl config current-context)" == *"prod"* ]]; then
|
|
ENVIRONMENT="prod"
|
|
fi
|
|
|
|
echo "Environment detected: $ENVIRONMENT"
|
|
|
|
# Install Unbound with appropriate values
|
|
if [ "$ENVIRONMENT" = "dev" ]; then
|
|
helm upgrade --install unbound infrastructure/platform/infrastructure/unbound-helm \
|
|
-n bakery-ia \
|
|
--create-namespace \
|
|
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
|
|
-f infrastructure/platform/networking/dns/unbound-helm/dev/values.yaml \
|
|
--timeout 5m \
|
|
--wait
|
|
else
|
|
helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \
|
|
-n bakery-ia \
|
|
--create-namespace \
|
|
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
|
|
-f infrastructure/platform/networking/dns/unbound-helm/prod/values.yaml \
|
|
--timeout 5m \
|
|
--wait
|
|
fi
|
|
|
|
echo ""
|
|
echo "Unbound deployment completed"
|
|
fi
|
|
|
|
echo ""
|
|
echo "Unbound DNS Service Information:"
|
|
echo " Service Name: unbound-dns.bakery-ia.svc.cluster.local"
|
|
echo " Ports: UDP/TCP 53"
|
|
echo " Used by: Mailu for DNS validation"
|
|
echo ""
|
|
echo "To check pod status: kubectl get pods -n bakery-ia | grep unbound"
|
|
''',
|
|
resource_deps=['security-setup'],
|
|
labels=['01-infrastructure'],
|
|
auto_init=True # Auto-deploy with Tilt startup
|
|
)
|
|
|
|
# Mail Infrastructure (Mailu) - Manual trigger for Helm deployment
|
|
local_resource(
|
|
'mailu-helm',
|
|
cmd='''
|
|
echo "Deploying Mailu via Helm..."
|
|
echo ""
|
|
|
|
# =====================================================
|
|
# Step 1: Ensure Unbound is deployed and get its IP
|
|
# =====================================================
|
|
echo "Checking Unbound DNS resolver..."
|
|
if ! kubectl get svc unbound-dns -n bakery-ia &>/dev/null; then
|
|
echo "ERROR: Unbound DNS service not found!"
|
|
echo "Please deploy Unbound first by triggering 'unbound-helm' resource"
|
|
exit 1
|
|
fi
|
|
|
|
UNBOUND_IP=$(kubectl get svc unbound-dns -n bakery-ia -o jsonpath='{.spec.clusterIP}')
|
|
echo "Unbound DNS service IP: $UNBOUND_IP"
|
|
|
|
# =====================================================
|
|
# Step 2: Configure CoreDNS to forward to Unbound
|
|
# =====================================================
|
|
echo ""
|
|
echo "Configuring CoreDNS to forward external queries to Unbound for DNSSEC validation..."
|
|
|
|
# Check current CoreDNS forward configuration
|
|
CURRENT_FORWARD=$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' | grep -o 'forward \\. [0-9.]*' | awk '{print $3}')
|
|
|
|
if [ "$CURRENT_FORWARD" != "$UNBOUND_IP" ]; then
|
|
echo "Updating CoreDNS to forward to Unbound ($UNBOUND_IP)..."
|
|
|
|
# Patch CoreDNS ConfigMap
|
|
kubectl patch configmap coredns -n kube-system --type merge -p "{
|
|
\"data\": {
|
|
\"Corefile\": \".:53 {\\n errors\\n health {\\n lameduck 5s\\n }\\n ready\\n kubernetes cluster.local in-addr.arpa ip6.arpa {\\n pods insecure\\n fallthrough in-addr.arpa ip6.arpa\\n ttl 30\\n }\\n prometheus :9153\\n forward . $UNBOUND_IP {\\n max_concurrent 1000\\n }\\n cache 30 {\\n disable success cluster.local\\n disable denial cluster.local\\n }\\n loop\\n reload\\n loadbalance\\n}\\n\"
|
|
}
|
|
}"
|
|
|
|
# Restart CoreDNS
|
|
kubectl rollout restart deployment coredns -n kube-system
|
|
echo "Waiting for CoreDNS to restart..."
|
|
kubectl rollout status deployment coredns -n kube-system --timeout=60s
|
|
echo "CoreDNS configured successfully"
|
|
else
|
|
echo "CoreDNS already configured to forward to Unbound"
|
|
fi
|
|
|
|
# =====================================================
|
|
# Step 3: Create self-signed TLS certificate for Mailu Front
|
|
# =====================================================
|
|
echo ""
|
|
echo "Checking Mailu TLS certificates..."
|
|
|
|
if ! kubectl get secret mailu-certificates -n bakery-ia &>/dev/null; then
|
|
echo "Creating self-signed TLS certificate for Mailu Front..."
|
|
|
|
# Generate certificate in temp directory
|
|
TEMP_DIR=$(mktemp -d)
|
|
cd "$TEMP_DIR"
|
|
|
|
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
|
|
-keyout tls.key -out tls.crt \
|
|
-subj "/CN=mail.bakery-ia.dev/O=bakery-ia" 2>/dev/null
|
|
|
|
kubectl create secret tls mailu-certificates \
|
|
--cert=tls.crt \
|
|
--key=tls.key \
|
|
-n bakery-ia
|
|
|
|
rm -rf "$TEMP_DIR"
|
|
echo "TLS certificate created"
|
|
else
|
|
echo "Mailu TLS certificate already exists"
|
|
fi
|
|
|
|
# =====================================================
|
|
# Step 4: Deploy Mailu via Helm
|
|
# =====================================================
|
|
echo ""
|
|
|
|
# Check if Mailu is already deployed
|
|
if helm list -n bakery-ia | grep -q mailu; then
|
|
echo "Mailu already deployed, checking status..."
|
|
helm status mailu -n bakery-ia
|
|
else
|
|
echo "Installing Mailu..."
|
|
|
|
# Add Mailu Helm repository if not already added
|
|
helm repo add mailu https://mailu.github.io/helm-charts 2>/dev/null || true
|
|
helm repo update mailu
|
|
|
|
# Determine environment (dev or prod) based on context
|
|
ENVIRONMENT="dev"
|
|
if [[ "$(kubectl config current-context)" == *"prod"* ]]; then
|
|
ENVIRONMENT="prod"
|
|
fi
|
|
|
|
echo "Environment detected: $ENVIRONMENT"
|
|
|
|
# Install Mailu with appropriate values
|
|
if [ "$ENVIRONMENT" = "dev" ]; then
|
|
helm upgrade --install mailu mailu/mailu \
|
|
-n bakery-ia \
|
|
--create-namespace \
|
|
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
|
-f infrastructure/platform/mail/mailu-helm/dev/values.yaml \
|
|
--timeout 10m
|
|
else
|
|
helm upgrade --install mailu mailu/mailu \
|
|
-n bakery-ia \
|
|
--create-namespace \
|
|
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
|
-f infrastructure/platform/mail/mailu-helm/prod/values.yaml \
|
|
--timeout 10m
|
|
fi
|
|
|
|
echo ""
|
|
echo "Mailu deployment completed"
|
|
fi
|
|
|
|
# =====================================================
|
|
# Step 5: Wait for pods and show status
|
|
# =====================================================
|
|
echo ""
|
|
echo "Waiting for Mailu pods to be ready..."
|
|
sleep 10
|
|
|
|
echo ""
|
|
echo "Mailu Pod Status:"
|
|
kubectl get pods -n bakery-ia | grep mailu
|
|
|
|
echo ""
|
|
echo "Mailu Access Information:"
|
|
echo " Admin Panel: https://mail.bakery-ia.dev/admin"
|
|
echo " Webmail: https://mail.bakery-ia.ldev/webmail"
|
|
echo " SMTP: mail.bakery-ia.dev:587 (STARTTLS)"
|
|
echo " IMAP: mail.bakery-ia.dev:993 (SSL/TLS)"
|
|
echo ""
|
|
echo "To create admin user:"
|
|
echo " kubectl exec -it -n bakery-ia deployment/mailu-admin -- flask mailu admin admin bakery-ia.local 'YourPassword123!'"
|
|
echo ""
|
|
echo "To check pod status: kubectl get pods -n bakery-ia | grep mailu"
|
|
''',
|
|
resource_deps=['unbound-helm'], # Ensure Unbound is deployed first
|
|
labels=['01-infrastructure'],
|
|
auto_init=False, # Manual trigger only
|
|
)
|
|
|
|
# Nominatim Geocoding - Manual trigger for Helm deployment
|
|
local_resource(
|
|
'nominatim-helm',
|
|
cmd='''
|
|
echo "Deploying Nominatim geocoding service via Helm..."
|
|
echo ""
|
|
|
|
# Check if Nominatim is already deployed
|
|
if helm list -n bakery-ia | grep -q nominatim; then
|
|
echo "Nominatim already deployed, checking status..."
|
|
helm status nominatim -n bakery-ia
|
|
else
|
|
echo "Installing Nominatim..."
|
|
|
|
# Determine environment (dev or prod) based on context
|
|
ENVIRONMENT="dev"
|
|
if [[ "$(kubectl config current-context)" == *"prod"* ]]; then
|
|
ENVIRONMENT="prod"
|
|
fi
|
|
|
|
echo "Environment detected: $ENVIRONMENT"
|
|
|
|
# Install Nominatim with appropriate values
|
|
if [ "$ENVIRONMENT" = "dev" ]; then
|
|
helm upgrade --install nominatim infrastructure/platform/nominatim/nominatim-helm \
|
|
-n bakery-ia \
|
|
--create-namespace \
|
|
-f infrastructure/platform/nominatim/nominatim-helm/values.yaml \
|
|
-f infrastructure/platform/nominatim/nominatim-helm/dev/values.yaml \
|
|
--timeout 10m \
|
|
--wait
|
|
else
|
|
helm upgrade --install nominatim infrastructure/platform/nominatim/nominatim-helm \
|
|
-n bakery-ia \
|
|
--create-namespace \
|
|
-f infrastructure/platform/nominatim/nominatim-helm/values.yaml \
|
|
-f infrastructure/platform/nominatim/nominatim-helm/prod/values.yaml \
|
|
--timeout 10m \
|
|
--wait
|
|
fi
|
|
|
|
echo ""
|
|
echo "Nominatim deployment completed"
|
|
fi
|
|
|
|
echo ""
|
|
echo "Nominatim Service Information:"
|
|
echo " Service Name: nominatim-service.bakery-ia.svc.cluster.local"
|
|
echo " Port: 8080"
|
|
echo " Health Check: http://nominatim-service:8080/status"
|
|
echo ""
|
|
echo "To check pod status: kubectl get pods -n bakery-ia | grep nominatim"
|
|
echo "To check Helm release: helm status nominatim -n bakery-ia"
|
|
''',
|
|
labels=['01-infrastructure'],
|
|
auto_init=False, # Manual trigger only
|
|
)
|
|
|
|
|
|
# =============================================================================
|
|
# MONITORING RESOURCES - SigNoz (Unified Observability)
|
|
# =============================================================================
|
|
|
|
# Deploy SigNoz using Helm with automatic deployment and progress tracking
|
|
local_resource(
|
|
'signoz-deploy',
|
|
cmd='''
|
|
echo "Deploying SigNoz Monitoring Stack..."
|
|
echo ""
|
|
|
|
|
|
# Check if SigNoz is already deployed
|
|
if helm list -n bakery-ia | grep -q signoz; then
|
|
echo "SigNoz already deployed, checking status..."
|
|
helm status signoz -n bakery-ia
|
|
else
|
|
echo "Installing SigNoz..."
|
|
|
|
# Add SigNoz Helm repository if not already added
|
|
helm repo add signoz https://charts.signoz.io 2>/dev/null || true
|
|
helm repo update signoz
|
|
|
|
# Install SigNoz with custom values in the bakery-ia namespace
|
|
helm upgrade --install signoz signoz/signoz \
|
|
-n bakery-ia \
|
|
-f infrastructure/monitoring/signoz/signoz-values-dev.yaml \
|
|
--timeout 10m \
|
|
--wait
|
|
|
|
echo ""
|
|
echo "SigNoz deployment completed"
|
|
fi
|
|
|
|
echo ""
|
|
echo "SigNoz Access Information:"
|
|
echo " URL: https://monitoring.bakery-ia.local"
|
|
echo " Username: admin"
|
|
echo " Password: admin"
|
|
echo ""
|
|
echo "OpenTelemetry Collector Endpoints:"
|
|
echo " gRPC: localhost:4317"
|
|
echo " HTTP: localhost:4318"
|
|
echo ""
|
|
echo "To check pod status: kubectl get pods -n signoz"
|
|
''',
|
|
labels=['05-monitoring'],
|
|
auto_init=False,
|
|
)
|
|
|
|
# Deploy Flux CD using Helm with automatic deployment and progress tracking
|
|
local_resource(
|
|
'flux-cd-deploy',
|
|
cmd='''
|
|
echo "Deploying Flux CD GitOps Toolkit..."
|
|
echo ""
|
|
|
|
# Check if Flux is already deployed
|
|
if helm list -n flux-system | grep -q flux-cd; then
|
|
echo "Flux CD already deployed, checking status..."
|
|
helm status flux-cd -n flux-system
|
|
else
|
|
echo "Installing Flux CD..."
|
|
|
|
# Install Flux CRDs first if not already installed
|
|
if ! kubectl get crd gitrepositories.source.toolkit.fluxcd.io >/dev/null 2>&1; then
|
|
echo "Installing Flux CRDs..."
|
|
curl -sL https://fluxcd.io/install.sh | sudo bash
|
|
flux install --namespace=flux-system --network-policy=false
|
|
fi
|
|
|
|
# Create the namespace if it doesn't exist
|
|
kubectl create namespace flux-system --dry-run=client -o yaml | kubectl apply -f -
|
|
|
|
# Install Flux CD with custom values using the local chart
|
|
helm upgrade --install flux-cd infrastructure/cicd/flux \
|
|
-n flux-system \
|
|
--create-namespace \
|
|
--timeout 10m \
|
|
--wait
|
|
|
|
echo ""
|
|
echo "Flux CD deployment completed"
|
|
fi
|
|
|
|
echo ""
|
|
echo "Flux CD Access Information:"
|
|
echo "To check status: flux check"
|
|
echo "To check GitRepository: kubectl get gitrepository -n flux-system"
|
|
echo "To check Kustomization: kubectl get kustomization -n flux-system"
|
|
echo ""
|
|
echo "To check pod status: kubectl get pods -n flux-system"
|
|
''',
|
|
labels=['99-cicd'],
|
|
auto_init=False,
|
|
)
|
|
|
|
|
|
# Optional exporters (in monitoring namespace) - DISABLED since using SigNoz
|
|
# k8s_resource('node-exporter', labels=['05-monitoring'])
|
|
# k8s_resource('postgres-exporter', resource_deps=['auth-db'], labels=['05-monitoring'])
|
|
|
|
# =============================================================================
|
|
# DATABASE RESOURCES
|
|
# =============================================================================
|
|
|
|
# Core Service Databases
|
|
k8s_resource('auth-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('tenant-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
|
|
# Data & Analytics Databases
|
|
k8s_resource('training-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('forecasting-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('ai-insights-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
|
|
# Operations Databases
|
|
k8s_resource('sales-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('inventory-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('production-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('procurement-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('distribution-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
|
|
# Supporting Service Databases
|
|
k8s_resource('recipes-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('suppliers-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('pos-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('orders-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('external-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
|
|
# Platform Service Databases
|
|
k8s_resource('notification-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('alert-processor-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
k8s_resource('orchestrator-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
|
|
# Demo Service Databases
|
|
k8s_resource('demo-session-db', resource_deps=['security-setup'], labels=['06-databases'])
|
|
|
|
# =============================================================================
|
|
# MIGRATION JOBS
|
|
# =============================================================================
|
|
|
|
# Core Service Migrations
|
|
k8s_resource('auth-migration', resource_deps=['auth-db'], labels=['07-migrations'])
|
|
k8s_resource('tenant-migration', resource_deps=['tenant-db'], labels=['07-migrations'])
|
|
|
|
# Data & Analytics Migrations
|
|
k8s_resource('training-migration', resource_deps=['training-db'], labels=['07-migrations'])
|
|
k8s_resource('forecasting-migration', resource_deps=['forecasting-db'], labels=['07-migrations'])
|
|
k8s_resource('ai-insights-migration', resource_deps=['ai-insights-db'], labels=['07-migrations'])
|
|
|
|
# Operations Migrations
|
|
k8s_resource('sales-migration', resource_deps=['sales-db'], labels=['07-migrations'])
|
|
k8s_resource('inventory-migration', resource_deps=['inventory-db'], labels=['07-migrations'])
|
|
k8s_resource('production-migration', resource_deps=['production-db'], labels=['07-migrations'])
|
|
k8s_resource('procurement-migration', resource_deps=['procurement-db'], labels=['07-migrations'])
|
|
k8s_resource('distribution-migration', resource_deps=['distribution-db'], labels=['07-migrations'])
|
|
|
|
# Supporting Service Migrations
|
|
k8s_resource('recipes-migration', resource_deps=['recipes-db'], labels=['07-migrations'])
|
|
k8s_resource('suppliers-migration', resource_deps=['suppliers-db'], labels=['07-migrations'])
|
|
k8s_resource('pos-migration', resource_deps=['pos-db'], labels=['07-migrations'])
|
|
k8s_resource('orders-migration', resource_deps=['orders-db'], labels=['07-migrations'])
|
|
k8s_resource('external-migration', resource_deps=['external-db'], labels=['07-migrations'])
|
|
|
|
# Platform Service Migrations
|
|
k8s_resource('notification-migration', resource_deps=['notification-db'], labels=['07-migrations'])
|
|
k8s_resource('alert-processor-migration', resource_deps=['alert-processor-db'], labels=['07-migrations'])
|
|
k8s_resource('orchestrator-migration', resource_deps=['orchestrator-db'], labels=['07-migrations'])
|
|
|
|
# Demo Service Migrations
|
|
k8s_resource('demo-session-migration', resource_deps=['demo-session-db'], labels=['07-migrations'])
|
|
|
|
# =============================================================================
|
|
# DATA INITIALIZATION JOBS
|
|
# =============================================================================
|
|
|
|
k8s_resource('external-data-init', resource_deps=['external-migration', 'redis'], labels=['08-data-init'])
|
|
|
|
# =============================================================================
|
|
# APPLICATION SERVICES
|
|
# =============================================================================
|
|
|
|
# Core Services
|
|
k8s_resource('auth-service', resource_deps=['auth-migration', 'redis'], labels=['09-services-core'])
|
|
k8s_resource('tenant-service', resource_deps=['tenant-migration', 'redis'], labels=['09-services-core'])
|
|
|
|
# Data & Analytics Services
|
|
k8s_resource('training-service', resource_deps=['training-migration', 'redis'], labels=['10-services-analytics'])
|
|
k8s_resource('forecasting-service', resource_deps=['forecasting-migration', 'redis'], labels=['10-services-analytics'])
|
|
k8s_resource('ai-insights-service', resource_deps=['ai-insights-migration', 'redis', 'forecasting-service', 'production-service', 'procurement-service'], labels=['10-services-analytics'])
|
|
|
|
# Operations Services
|
|
k8s_resource('sales-service', resource_deps=['sales-migration', 'redis'], labels=['11-services-operations'])
|
|
k8s_resource('inventory-service', resource_deps=['inventory-migration', 'redis'], labels=['11-services-operations'])
|
|
k8s_resource('production-service', resource_deps=['production-migration', 'redis'], labels=['11-services-operations'])
|
|
k8s_resource('procurement-service', resource_deps=['procurement-migration', 'redis'], labels=['11-services-operations'])
|
|
k8s_resource('distribution-service', resource_deps=['distribution-migration', 'redis', 'rabbitmq'], labels=['11-services-operations'])
|
|
|
|
# Supporting Services
|
|
k8s_resource('recipes-service', resource_deps=['recipes-migration', 'redis'], labels=['12-services-supporting'])
|
|
k8s_resource('suppliers-service', resource_deps=['suppliers-migration', 'redis'], labels=['12-services-supporting'])
|
|
k8s_resource('pos-service', resource_deps=['pos-migration', 'redis'], labels=['12-services-supporting'])
|
|
k8s_resource('orders-service', resource_deps=['orders-migration', 'redis'], labels=['12-services-supporting'])
|
|
k8s_resource('external-service', resource_deps=['external-migration', 'external-data-init', 'redis'], labels=['12-services-supporting'])
|
|
|
|
# Platform Services
|
|
k8s_resource('notification-service', resource_deps=['notification-migration', 'redis', 'rabbitmq'], labels=['13-services-platform'])
|
|
k8s_resource('alert-processor', resource_deps=['alert-processor-migration', 'redis', 'rabbitmq'], labels=['13-services-platform'])
|
|
k8s_resource('orchestrator-service', resource_deps=['orchestrator-migration', 'redis'], labels=['13-services-platform'])
|
|
|
|
# Demo Services
|
|
k8s_resource('demo-session-service', resource_deps=['demo-session-migration', 'redis'], labels=['14-services-demo'])
|
|
k8s_resource('demo-cleanup-worker', resource_deps=['demo-session-service', 'redis'], labels=['14-services-demo'])
|
|
|
|
# =============================================================================
|
|
# FRONTEND & GATEWAY
|
|
# =============================================================================
|
|
|
|
k8s_resource('gateway', resource_deps=['auth-service'], labels=['15-frontend'])
|
|
k8s_resource('frontend', resource_deps=['gateway'], labels=['15-frontend'])
|
|
|
|
# =============================================================================
|
|
# CRONJOBS (Remaining K8s CronJobs)
|
|
# =============================================================================
|
|
|
|
k8s_resource('demo-session-cleanup', resource_deps=['demo-session-service'], labels=['16-cronjobs'])
|
|
k8s_resource('external-data-rotation', resource_deps=['external-service'], labels=['16-cronjobs'])
|
|
|
|
# =============================================================================
|
|
# WATCH SETTINGS
|
|
# =============================================================================
|
|
|
|
# Watch settings
|
|
watch_settings(
|
|
ignore=[
|
|
'.git/**',
|
|
'**/__pycache__/**',
|
|
'**/*.pyc',
|
|
'**/.pytest_cache/**',
|
|
'**/node_modules/**',
|
|
'**/.DS_Store',
|
|
'**/*.swp',
|
|
'**/*.swo',
|
|
'**/.venv/**',
|
|
'**/venv/**',
|
|
'**/.mypy_cache/**',
|
|
'**/.ruff_cache/**',
|
|
'**/.tox/**',
|
|
'**/htmlcov/**',
|
|
'**/.coverage',
|
|
'**/dist/**',
|
|
'**/build/**',
|
|
'**/*.egg-info/**',
|
|
'**/infrastructure/tls/**/*.pem',
|
|
'**/infrastructure/tls/**/*.cnf',
|
|
'**/infrastructure/tls/**/*.csr',
|
|
'**/infrastructure/tls/**/*.srl',
|
|
'**/*.tmp',
|
|
'**/*.tmp.*',
|
|
'**/migrations/versions/*.tmp.*',
|
|
'**/playwright-report/**',
|
|
'**/test-results/**',
|
|
]
|
|
)
|
|
|
|
# =============================================================================
|
|
# CI/CD INFRASTRUCTURE - MANUAL TRIGGERS
|
|
# =============================================================================
|
|
|
|
# Tekton Pipelines - Manual trigger for local development using Helm
|
|
local_resource(
|
|
'tekton-pipelines',
|
|
cmd='''
|
|
echo "Setting up Tekton Pipelines for CI/CD using Helm..."
|
|
echo ""
|
|
|
|
# Check if Tekton CRDs are already installed
|
|
if kubectl get crd pipelines.tekton.dev >/dev/null 2>&1; then
|
|
echo " Tekton CRDs already installed"
|
|
else
|
|
echo " Installing Tekton v0.57.0..."
|
|
kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
|
|
|
|
echo " Waiting for Tekton to be ready..."
|
|
kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-controller -n tekton-pipelines
|
|
kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-webhook -n tekton-pipelines
|
|
|
|
echo " Tekton installed and ready"
|
|
fi
|
|
|
|
echo ""
|
|
echo "Installing Tekton configurations via Helm..."
|
|
|
|
# Check if Tekton Helm release is already deployed
|
|
if helm list -n tekton-pipelines | grep -q tekton-cicd; then
|
|
echo " Updating existing Tekton CICD deployment..."
|
|
helm upgrade --install tekton-cicd infrastructure/cicd/tekton-helm \
|
|
-n tekton-pipelines \
|
|
--create-namespace \
|
|
--timeout 10m \
|
|
--wait
|
|
else
|
|
echo " Installing new Tekton CICD deployment..."
|
|
helm upgrade --install tekton-cicd infrastructure/cicd/tekton-helm \
|
|
-n tekton-pipelines \
|
|
--create-namespace \
|
|
--timeout 10m \
|
|
--wait
|
|
fi
|
|
|
|
echo ""
|
|
echo "Tekton setup complete!"
|
|
echo "To check status: kubectl get pods -n tekton-pipelines"
|
|
echo "To check Helm release: helm status tekton-cicd -n tekton-pipelines"
|
|
''',
|
|
labels=['99-cicd'],
|
|
auto_init=False, # Manual trigger only
|
|
)
|
|
|
|
# Gitea - Auto-install when Gitea registry is enabled
|
|
gitea_enabled = use_gitea_registry # Enable Gitea when using Gitea registry
|
|
if 'ENABLE_GITEA' in os.environ:
|
|
gitea_enabled = os.environ['ENABLE_GITEA'].lower() == 'true'
|
|
|
|
if gitea_enabled:
|
|
local_resource(
|
|
'gitea',
|
|
cmd='''
|
|
echo "Setting up Gitea for local Git server and container registry..."
|
|
echo ""
|
|
|
|
# Wait for ingress controller to be ready before proceeding
|
|
echo "Waiting for ingress controller to be ready..."
|
|
kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=controller -n ingress-nginx --timeout=300s
|
|
|
|
# Verify ingress resources are properly configured
|
|
echo "Verifying ingress configuration..."
|
|
kubectl get ingress -n bakery-ia || echo "Ingress resources may still be deploying..."
|
|
|
|
# Small delay to ensure ingress is fully operational
|
|
sleep 10
|
|
|
|
# Create namespace
|
|
kubectl create namespace gitea || true
|
|
|
|
# Create admin secret first
|
|
chmod +x infrastructure/cicd/gitea/setup-admin-secret.sh
|
|
./infrastructure/cicd/gitea/setup-admin-secret.sh
|
|
|
|
# Install Gitea using Helm
|
|
helm repo add gitea https://dl.gitea.io/charts || true
|
|
helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml
|
|
|
|
# Wait for Gitea to be ready before proceeding
|
|
echo "Waiting for Gitea to be ready..."
|
|
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=gitea -n gitea --timeout=300s
|
|
|
|
# Check if admin user already exists by attempting to get user list
|
|
echo "Checking if admin user already exists..."
|
|
ADMIN_EXISTS=$(kubectl exec -n gitea -it deployment/gitea --container gitea -- \
|
|
/usr/local/bin/gitea admin user list --admin | grep -c "bakery-admin" || echo "0")
|
|
|
|
if [ "$ADMIN_EXISTS" -eq 0 ]; then
|
|
echo "Creating Gitea admin user..."
|
|
|
|
# Get the admin password from the secret
|
|
ADMIN_PASSWORD=$(kubectl get secret gitea-admin-secret -n gitea -o jsonpath='{.data.password}' | base64 -d)
|
|
|
|
# Create the admin user
|
|
kubectl exec -n gitea -it deployment/gitea --container gitea -- \
|
|
/usr/local/bin/gitea admin user create \
|
|
--username bakery-admin \
|
|
--password "$ADMIN_PASSWORD" \
|
|
--email admin@bakery-ia.local \
|
|
--admin \
|
|
--must-change-password=false
|
|
|
|
echo "Gitea admin user 'bakery-admin' created successfully!"
|
|
else
|
|
echo "Gitea admin user 'bakery-admin' already exists."
|
|
fi
|
|
|
|
echo ""
|
|
echo "Gitea setup complete!"
|
|
echo "Access Gitea at: https://gitea.bakery-ia.local (for dev) or https://gitea.bakewise.ai (for prod)"
|
|
echo "Registry URL: https://registry.bakery-ia.local or gitea.bakery-ia.local:5000"
|
|
echo "Make sure to add the appropriate hostname to /etc/hosts or configure DNS"
|
|
echo "Check status: kubectl get pods -n gitea"
|
|
echo "To uninstall: helm uninstall gitea -n gitea"
|
|
''',
|
|
resource_deps=['ingress-status-check'], # Depend on ingress check to ensure routing is ready
|
|
labels=['99-cicd'],
|
|
auto_init=True, # Auto-install when enabled
|
|
allow_parallel=False
|
|
)
|
|
else:
|
|
# Manual trigger option for when Gitea registry is not enabled but user wants Gitea
|
|
local_resource(
|
|
'gitea',
|
|
cmd='''
|
|
echo "Setting up Gitea for local Git server and container registry..."
|
|
echo ""
|
|
|
|
# Wait for ingress controller to be ready before proceeding
|
|
echo "Waiting for ingress controller to be ready..."
|
|
kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=controller -n ingress-nginx --timeout=300s
|
|
|
|
# Verify ingress resources are properly configured
|
|
echo "Verifying ingress configuration..."
|
|
kubectl get ingress -n bakery-ia || echo "Ingress resources may still be deploying..."
|
|
|
|
# Small delay to ensure ingress is fully operational
|
|
sleep 10
|
|
|
|
# Create namespace
|
|
kubectl create namespace gitea || true
|
|
|
|
# Create admin secret first
|
|
chmod +x infrastructure/cicd/gitea/setup-admin-secret.sh
|
|
./infrastructure/cicd/gitea/setup-admin-secret.sh
|
|
|
|
# Install Gitea using Helm
|
|
helm repo add gitea https://dl.gitea.io/charts || true
|
|
helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml
|
|
|
|
# Wait for Gitea to be ready before proceeding
|
|
echo "Waiting for Gitea to be ready..."
|
|
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=gitea -n gitea --timeout=300s
|
|
|
|
# Check if admin user already exists by attempting to get user list
|
|
echo "Checking if admin user already exists..."
|
|
ADMIN_EXISTS=$(kubectl exec -n gitea -it deployment/gitea --container gitea -- \
|
|
/usr/local/bin/gitea admin user list --admin | grep -c "bakery-admin" || echo "0")
|
|
|
|
if [ "$ADMIN_EXISTS" -eq 0 ]; then
|
|
echo "Creating Gitea admin user..."
|
|
|
|
# Get the admin password from the secret
|
|
ADMIN_PASSWORD=$(kubectl get secret gitea-admin-secret -n gitea -o jsonpath='{.data.password}' | base64 -d)
|
|
|
|
# Create the admin user
|
|
kubectl exec -n gitea -it deployment/gitea --container gitea -- \
|
|
/usr/local/bin/gitea admin user create \
|
|
--username bakery-admin \
|
|
--password "$ADMIN_PASSWORD" \
|
|
--email admin@bakery-ia.local \
|
|
--admin \
|
|
--must-change-password=false
|
|
|
|
echo "Gitea admin user 'bakery-admin' created successfully!"
|
|
else
|
|
echo "Gitea admin user 'bakery-admin' already exists."
|
|
fi
|
|
|
|
echo ""
|
|
echo "Gitea setup complete!"
|
|
echo "Access Gitea at: http://gitea.bakery-ia.local (for dev) or http://gitea.bakewise.ai (for prod)"
|
|
echo "Make sure to add the appropriate hostname to /etc/hosts or configure DNS"
|
|
echo "Check status: kubectl get pods -n gitea"
|
|
echo "To uninstall: helm uninstall gitea -n gitea"
|
|
''',
|
|
labels=['99-cicd'],
|
|
auto_init=False, # Manual trigger only
|
|
)
|
|
|
|
|
|
# =============================================================================
|
|
# STARTUP SUMMARY
|
|
# =============================================================================
|
|
|
|
print("""
|
|
Security setup complete!
|
|
|
|
Database Security Features Active:
|
|
TLS encryption: PostgreSQL and Redis
|
|
Strong passwords: 32-character cryptographic
|
|
Persistent storage: PVCs for all databases
|
|
Column encryption: pgcrypto extension
|
|
Audit logging: PostgreSQL query logging
|
|
|
|
Internal Schedulers Active:
|
|
Alert Priority Recalculation: Hourly @ :15 (alert-processor)
|
|
Usage Tracking: Daily @ 2:00 AM UTC (tenant-service)
|
|
Disk Cleanup: Every {disk_cleanup_frequency_minutes} minutes (threshold: {disk_space_threshold_gb}GB)
|
|
|
|
Access your application:
|
|
Main Application: https://bakery-ia.local
|
|
API Endpoints: https://bakery-ia.local/api/v1/...
|
|
Local Access: https://localhost
|
|
|
|
Service Metrics:
|
|
Gateway: http://localhost:8000/metrics
|
|
Any Service: kubectl port-forward <service> 8000:8000
|
|
|
|
SigNoz (Unified Observability):
|
|
Deploy via Tilt: Trigger 'signoz-deployment' resource
|
|
Manual deploy: ./infrastructure/monitoring/signoz/deploy-signoz.sh dev
|
|
Access (if deployed): https://monitoring.bakery-ia.local
|
|
Username: admin
|
|
Password: admin
|
|
|
|
CI/CD Infrastructure:
|
|
Tekton: Trigger 'tekton-pipelines' resource
|
|
Flux: Trigger 'flux-cd' resource
|
|
Gitea: Auto-installed when USE_GITEA_REGISTRY=true, or trigger manually
|
|
|
|
Verify security:
|
|
kubectl get pvc -n bakery-ia
|
|
kubectl get secrets -n bakery-ia | grep tls
|
|
kubectl logs -n bakery-ia <db-pod> | grep SSL
|
|
|
|
Verify schedulers:
|
|
kubectl exec -it -n bakery-ia deployment/alert-processor -- curl localhost:8000/scheduler/status
|
|
kubectl logs -f -n bakery-ia -l app=tenant-service | grep "usage tracking"
|
|
|
|
Documentation:
|
|
docs/SECURITY_IMPLEMENTATION_COMPLETE.md
|
|
docs/DATABASE_SECURITY_ANALYSIS_REPORT.md
|
|
|
|
Build Optimization Active:
|
|
Services only rebuild when their code changes
|
|
Shared folder changes trigger ALL services (as expected)
|
|
Reduces unnecessary rebuilds and disk usage
|
|
Edit service code: only that service rebuilds
|
|
Edit shared/ code: all services rebuild (required)
|
|
|
|
Useful Commands:
|
|
# Work on specific services only
|
|
tilt up <service-name> <service-name>
|
|
|
|
# View logs by label
|
|
tilt logs 09-services-core
|
|
tilt logs 13-services-platform
|
|
|
|
DNS Configuration:
|
|
# To access the application via domain names, add these entries to your hosts file:
|
|
# sudo nano /etc/hosts
|
|
# Add these lines:
|
|
# 127.0.0.1 bakery-ia.local
|
|
# 127.0.0.1 monitoring.bakery-ia.local
|
|
|
|
======================================
|
|
""") |