Add new infra architecture 11

This commit is contained in:
Urtzi Alfaro
2026-01-20 22:05:10 +01:00
parent 0217ad83be
commit 2512de4173
42 changed files with 1056 additions and 874 deletions

680
Tiltfile
View File

@@ -22,14 +22,10 @@
# Docker registry configuration
# Set USE_DOCKERHUB=true environment variable to push images to Docker Hub
# Set USE_GITEA_REGISTRY=true environment variable to push images to Gitea registry
# Otherwise, uses local registry for faster builds and deployments
use_dockerhub = False # Default to False
use_gitea_registry = True # Default to False - Gitea registry not working currently
# Otherwise, uses local kind registry for faster builds and deployments
use_dockerhub = False # Use local kind registry by default
if 'USE_DOCKERHUB' in os.environ:
use_dockerhub = os.environ['USE_DOCKERHUB'].lower() == 'true'
if 'USE_GITEA_REGISTRY' in os.environ:
use_gitea_registry = os.environ['USE_GITEA_REGISTRY'].lower() == 'true'
dockerhub_username = 'uals' # Default username
if 'DOCKERHUB_USERNAME' in os.environ:
@@ -37,8 +33,8 @@ if 'DOCKERHUB_USERNAME' in os.environ:
# Base image registry configuration for Dockerfile ARGs
# This controls where the base Python image is pulled from during builds
base_registry = 'localhost:5000' # Default for local dev
python_image = 'python_3.11-slim' # Local registry uses underscores
base_registry = 'localhost:5000' # Default for local dev (kind registry)
python_image = 'python_3_11_slim' # Local registry uses underscores (matches prepull naming)
if 'BASE_REGISTRY' in os.environ:
base_registry = os.environ['BASE_REGISTRY']
@@ -50,125 +46,48 @@ if use_dockerhub:
base_registry = 'docker.io'
python_image = 'python:3.11-slim'
# For Gitea registry mode
# Gitea registry is accessed via the registry subdomain (TLS terminated at ingress)
# However, for internal cluster builds (like Kaniko), we need to use the internal service name
if use_gitea_registry:
# For external access (ingress): registry.bakery-ia.local
# For internal cluster access: gitea-http.gitea.svc.cluster.local:3000
base_registry = 'gitea-http.gitea.svc.cluster.local:3000'
python_image = 'python:3.11-slim'
# Add fallback to local registry if Gitea registry is not available
fallback_registry = 'localhost:5001'
# =============================================================================
# PREPULL BASE IMAGES STEP - CRITICAL FIRST STEP
# PREPULL BASE IMAGES - RUNS AFTER SECURITY SETUP
# =============================================================================
# Dependency order: apply-k8s-manifests -> security-setup -> ingress-status-check
# -> kind-cluster-configuration -> prepull-base-images
# Run the prepull script - if this fails, don't continue
# When using Gitea registry, make sure Gitea is available first
if use_gitea_registry:
local_resource(
'prepull-base-images',
cmd='''#!/usr/bin/env bash
echo "=========================================="
echo "PREPULLING BASE IMAGES - CRITICAL STEP"
echo "Using Gitea Registry Mode"
echo "=========================================="
# Prepull runs AFTER security setup to ensure registry is available
local_resource(
'prepull-base-images',
cmd='''#!/usr/bin/env bash
echo "=========================================="
echo "STARTING PRE PULL WITH PROPER DEPENDENCIES"
echo "=========================================="
echo ""
# Export environment variables for the prepull script
export USE_GITEA_REGISTRY=false
export USE_LOCAL_REGISTRY=true
# Run the prepull script
if ./scripts/prepull-base-images.sh; then
echo ""
# Export environment variables for the prepull script
export USE_GITEA_REGISTRY=true
export USE_LOCAL_REGISTRY=false
# Wait for Gitea registry to be accessible
echo "Waiting for Gitea registry to be accessible..."
echo "Registry URL: registry.bakery-ia.local (via ingress)"
MAX_RETRIES=30
RETRY_COUNT=0
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
# Try HTTPS via ingress (registry.bakery-ia.local routes to gitea-http:3000)
if curl -sk https://registry.bakery-ia.local/v2/ >/dev/null 2>&1; then
echo "✓ Gitea registry is accessible via HTTPS"
break
fi
# Also try directly via Gitea HTTP service within cluster
if curl -s http://gitea-http.gitea.svc.cluster.local:3000/v2/ >/dev/null 2>&1; then
echo "✓ Gitea registry is accessible via internal service"
break
fi
echo " Waiting for Gitea registry... (attempt $((RETRY_COUNT+1))/$MAX_RETRIES)"
sleep 10
RETRY_COUNT=$((RETRY_COUNT+1))
done
if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then
echo "⚠ Warning: Gitea registry not accessible after $MAX_RETRIES attempts"
echo " Falling back to local registry"
export USE_GITEA_REGISTRY=false
export USE_LOCAL_REGISTRY=true
fi
# Run the prepull script
if ./scripts/prepull-base-images.sh; then
echo ""
echo "✓ Base images prepull completed successfully"
echo "=========================================="
echo "CONTINUING WITH TILT SETUP..."
echo "=========================================="
exit 0
else
echo ""
echo "❌ Base images prepull FAILED - stopping Tilt execution"
echo "This usually happens due to Docker Hub rate limits"
echo "Please try again later or configure Docker Hub credentials"
echo "=========================================="
# Exit with error code to prevent further execution
exit 1
fi
''',
resource_deps=['gitea'], # Depend on Gitea when using Gitea registry
labels=['00-prepull'],
auto_init=True,
allow_parallel=False
)
else:
local_resource(
'prepull-base-images',
cmd='''#!/usr/bin/env bash
echo "✓ Base images prepull completed successfully"
echo "=========================================="
echo "PREPULLING BASE IMAGES - CRITICAL STEP"
echo "Using Local Registry Mode"
echo "CONTINUING WITH TILT SETUP..."
echo "=========================================="
exit 0
else
echo ""
# Export environment variables for the prepull script
export USE_GITEA_REGISTRY=false
export USE_LOCAL_REGISTRY=true
# Run the prepull script
if ./scripts/prepull-base-images.sh; then
echo ""
echo "✓ Base images prepull completed successfully"
echo "=========================================="
echo "CONTINUING WITH TILT SETUP..."
echo "=========================================="
exit 0
else
echo ""
echo "❌ Base images prepull FAILED - stopping Tilt execution"
echo "This usually happens due to Docker Hub rate limits"
echo "Please try again later or configure Docker Hub credentials"
echo "=========================================="
# Exit with error code to prevent further execution
exit 1
fi
''',
labels=['00-prepull'],
auto_init=True,
allow_parallel=False
)
echo "⚠ Base images prepull had issues"
echo "This may affect image availability for services"
echo "=========================================="
# Continue execution - images are still available locally
exit 0
fi
''',
resource_deps=['kind-cluster-configuration'], # Runs AFTER kind cluster configuration
labels=['00-prepull'],
auto_init=True,
allow_parallel=False
)
# =============================================================================
@@ -298,35 +217,23 @@ if use_dockerhub:
To disable: unset USE_DOCKERHUB or set USE_DOCKERHUB=false
""" % (dockerhub_username, base_registry, python_image))
default_registry('docker.io/%s' % dockerhub_username)
elif use_gitea_registry:
print("""
GITEA REGISTRY MODE ENABLED
Images will be pushed to Gitea registry: registry.bakery-ia.local
Base images will be pulled from internal cluster registry: %s/%s
Make sure Gitea is running and accessible
To disable: unset USE_GITEA_REGISTRY or set USE_GITEA_REGISTRY=false
To use Docker Hub: export USE_DOCKERHUB=true
""" % (base_registry, python_image))
default_registry('registry.bakery-ia.local')
else:
print("""
LOCAL REGISTRY MODE
Using local registry for faster builds: localhost:5001
LOCAL REGISTRY MODE (KIND)
Using local kind registry for faster builds: localhost:5000
Base images will be pulled from: %s/%s
This registry is created by kubernetes_restart.sh script
To use Docker Hub: export USE_DOCKERHUB=true
To use Gitea registry: export USE_GITEA_REGISTRY=true
To change base registry: export BASE_REGISTRY=<registry-url>
To change Python image: export PYTHON_IMAGE=<image:tag>
Note: When using Gitea registry, base images use internal cluster registry: gitea-http.gitea.svc.cluster.local:3000
""" % (base_registry, python_image))
default_registry('localhost:5001')
default_registry('localhost:5000')
# =============================================================================
# INGRESS HEALTH CHECK
# =============================================================================
# Check ingress status and readiness
# Check ingress status and readiness with improved logic
local_resource(
'ingress-status-check',
cmd='''
@@ -343,96 +250,31 @@ local_resource(
echo "INGRESS CONTROLLER STATUS:"
kubectl get pods -n ingress-nginx -l app.kubernetes.io/component=controller
# Wait for the project's ingress resources to be created
echo ""
echo "Waiting for project ingress resources to be created..."
# Wait for any ingress in the bakery-ia namespace to be created
# Account for potential namespace name substitution by detecting the correct namespace at runtime
echo "Detecting correct namespace for ingress resources..."
# The namespace name might have been substituted during kustomize processing
# Look for ingress resources in any namespace that could be ours
COUNT=0
MAX_COUNT=24 # 2 minutes with 5-second intervals
while [ $COUNT -lt $MAX_COUNT ]; do
# Look for ingress resources in any namespace
FOUND_INGRESS_NS=$(kubectl get ingress --all-namespaces --no-headers 2>/dev/null | grep -v "ingress-nginx" | head -1 | awk '{print $1}')
if [ -n "$FOUND_INGRESS_NS" ]; then
NAMESPACE="$FOUND_INGRESS_NS"
echo "Found ingress resources in namespace: $NAMESPACE"
break
fi
echo "Waiting for ingress resources to be created in any namespace..."
sleep 5
COUNT=$((COUNT + 1))
done
if [ $COUNT -eq $MAX_COUNT ]; then
echo "Warning: No ingress resources found after timeout."
echo "Listing all namespaces to help diagnose:"
kubectl get namespaces
echo "Listing all ingress resources:"
kubectl get ingress --all-namespaces
# Fallback to bakery-ia namespace
NAMESPACE="bakery-ia"
# Quick check: verify ingress controller is running
echo "Quick check: verifying ingress controller is running..."
if kubectl get pods -n ingress-nginx -l app.kubernetes.io/component=controller | grep -q "Running"; then
echo "✓ Ingress controller is running"
else
echo "Using detected namespace: $NAMESPACE"
echo "⚠ Ingress controller may not be running properly"
fi
# Now wait for ingress resources in the detected namespace
COUNT=0
MAX_COUNT=24 # 2 minutes with 5-second intervals
while [ $COUNT -lt $MAX_COUNT ]; do
# Check if namespace exists before querying ingress
if kubectl get namespace "$NAMESPACE" &>/dev/null; then
INGRESS_COUNT=$(kubectl get ingress -n "$NAMESPACE" --no-headers 2>/dev/null | wc -l)
if [ "$INGRESS_COUNT" -gt 0 ]; then
echo "Ingress resources found in $NAMESPACE namespace."
break
fi
fi
echo "Waiting for ingress resources in $NAMESPACE namespace to be created..."
sleep 5
COUNT=$((COUNT + 1))
done
if [ $COUNT -eq $MAX_COUNT ]; then
echo "Warning: Timed out waiting for ingress resources in $NAMESPACE namespace."
echo "Listing all namespaces to help diagnose:"
kubectl get namespaces
echo "Listing all ingress resources:"
kubectl get ingress --all-namespaces
fi
# Brief pause to allow any pending ingress resources to be processed
sleep 2
# Wait for ingress to have address assigned (be more flexible about the name)
echo "Waiting for ingress to have address assigned..."
# Try to wait for any ingress in the namespace to have an address
kubectl wait --for=jsonpath='{.status.loadBalancer.ingress[0].ip}' ingress --all -n "$NAMESPACE" --timeout=30s 2>/dev/null || echo "Ingress may not have external IP (this is OK in Kind)"
# Check ingress resources
# Check ingress resources (just report status, don't wait)
echo ""
echo "INGRESS RESOURCES:"
kubectl get ingress -A
# Check specific ingress for our namespace
echo ""
echo "BAKERY-IA INGRESS DETAILS:"
kubectl get ingress -n "$NAMESPACE" -o wide
kubectl get ingress -A 2>/dev/null || echo "No ingress resources found yet"
# Check ingress load balancer status
echo ""
echo "INGRESS LOAD BALANCER STATUS:"
kubectl get svc -n ingress-nginx ingress-nginx-controller -o wide
# Wait a bit for ingress to fully initialize
sleep 10
kubectl get svc -n ingress-nginx ingress-nginx-controller -o wide 2>/dev/null || echo "Ingress controller service not found"
# Verify ingress endpoints
echo ""
echo "INGRESS ENDPOINTS:"
kubectl get endpoints -n ingress-nginx
kubectl get endpoints -n ingress-nginx 2>/dev/null || echo "Ingress endpoints not found"
# Test connectivity to the ingress endpoints
echo ""
@@ -441,12 +283,15 @@ local_resource(
kubectl exec -n ingress-nginx deployment/ingress-nginx-controller --container controller -- \
/nginx-ingress-controller --version > /dev/null 2>&1 && echo "✓ Ingress controller accessible"
# In Kind clusters, ingresses typically don't get external IPs, so we just verify they exist
echo "In Kind clusters, ingresses don't typically get external IPs - this is expected behavior"
echo ""
echo "Ingress status check completed successfully!"
echo "Project ingress resources are ready for Gitea and other services."
echo "=========================================="
''',
resource_deps=['apply-k8s-manifests'], # Step 2 depends on Step 1
resource_deps=['security-setup'], # According to requested order: security-setup -> ingress-status-check
labels=['00-ingress-check'],
auto_init=True,
allow_parallel=False
@@ -478,18 +323,17 @@ Applying security configurations...
""")
# Apply security configurations before loading main manifests
# Security setup always depends on prepull-base-images to ensure images are cached
# When using Gitea registry, the dependency chain is:
# ingress-status-check -> gitea -> prepull-base-images -> security-setup
# When using local registry, the dependency chain is:
# prepull-base-images -> security-setup
security_resource_deps = ['prepull-base-images'] # Always depend on prepull
# Apply security configurations after applying manifests
# According to requested order: apply-k8s-manifests -> security-setup
security_resource_deps = ['apply-k8s-manifests'] # Depend on manifests first
local_resource(
'security-setup',
cmd='''
echo "Applying security secrets and configurations..."
echo "=========================================="
echo "APPLYING SECRETS AND TLS CERTIFICATIONS"
echo "=========================================="
echo "Setting up security configurations..."
# First, ensure all required namespaces exist
echo "Creating namespaces..."
@@ -507,10 +351,12 @@ local_resource(
done
# Apply common secrets and configs
echo "Applying common configurations..."
kubectl apply -f infrastructure/environments/common/configs/configmap.yaml
kubectl apply -f infrastructure/environments/common/configs/secrets.yaml
# Apply database secrets and configs
echo "Applying database security configurations..."
kubectl apply -f infrastructure/platform/storage/postgres/secrets/postgres-tls-secret.yaml
kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-init-config.yaml
kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-logging-config.yaml
@@ -525,32 +371,145 @@ local_resource(
# Apply Mail/SMTP secrets (already included in common/configs/secrets.yaml)
# Apply CI/CD secrets
kubectl apply -f infrastructure/cicd/tekton-helm/templates/secrets.yaml
# Note: infrastructure/cicd/tekton-helm/templates/secrets.yaml is a Helm template file
# and should be applied via the Helm chart deployment, not directly with kubectl
echo "Skipping infrastructure/cicd/tekton-helm/templates/secrets.yaml (Helm template file)"
echo "This file will be applied when the Tekton Helm chart is deployed"
echo "Security configurations applied"
# Apply self-signed ClusterIssuer for cert-manager (required before certificates)
echo "Applying self-signed ClusterIssuer..."
kubectl apply -f infrastructure/platform/cert-manager/selfsigned-issuer.yaml
# Wait for ClusterIssuer to be ready
echo "Waiting for ClusterIssuer to be ready..."
kubectl wait --for=condition=Ready clusterissuer/selfsigned-issuer --timeout=60s || echo "ClusterIssuer may still be provisioning..."
# Apply TLS certificates for ingress
echo "Applying TLS certificates for ingress..."
kubectl apply -f infrastructure/environments/dev/k8s-manifests/dev-certificate.yaml
# Wait for cert-manager to create the certificate
echo "Waiting for TLS certificate to be ready..."
kubectl wait --for=condition=Ready certificate/bakery-dev-tls-cert -n bakery-ia --timeout=120s || echo "Certificate may still be provisioning..."
# Verify TLS certificates are created
echo "Verifying TLS certificates..."
if kubectl get secret bakery-dev-tls-cert -n bakery-ia &>/dev/null; then
echo "✓ TLS certificate 'bakery-dev-tls-cert' found in bakery-ia namespace"
else
echo "⚠ TLS certificate 'bakery-dev-tls-cert' not found, may still be provisioning"
fi
# Verify other secrets are created
echo "Verifying security secrets..."
for secret in gitea-admin-secret; do
if kubectl get secret $secret -n gitea &>/dev/null; then
echo "✓ Secret '$secret' found in gitea namespace"
else
echo " Secret '$secret' not found in gitea namespace (will be created when Gitea is deployed)"
fi
done
echo ""
echo "Security configurations applied successfully!"
echo "TLS certificates and secrets are ready for use."
echo "=========================================="
''',
resource_deps=security_resource_deps, # Conditional dependency based on registry usage
labels=['00-security'],
auto_init=True
)
# Kind cluster configuration for registry access
local_resource(
'kind-cluster-configuration',
cmd='''
echo "=========================================="
echo "CONFIGURING KIND CLUSTER FOR REGISTRY ACCESS"
echo "=========================================="
echo "Setting up localhost:5000 access in Kind cluster..."
echo ""
# Wait for the TLS certificate to be available
echo "Waiting for TLS certificate to be ready..."
MAX_RETRIES=30
RETRY_COUNT=0
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
if kubectl get secret bakery-dev-tls-cert -n bakery-ia &>/dev/null; then
echo "TLS certificate is ready"
break
fi
echo " Waiting for TLS certificate... (attempt $((RETRY_COUNT+1))/$MAX_RETRIES)"
sleep 5
RETRY_COUNT=$((RETRY_COUNT+1))
done
if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then
echo "⚠ Warning: TLS certificate not ready after $MAX_RETRIES attempts"
echo " Proceeding with configuration anyway..."
fi
# Add localhost:5000 registry configuration to containerd
echo "Configuring containerd to access localhost:5000 registry..."
# Create the hosts.toml file for containerd to access localhost:5000 registry
if docker exec bakery-ia-local-control-plane sh -c 'cat > /etc/containerd/certs.d/localhost:5000/hosts.toml << EOF
server = "http://localhost:5000"
[host."http://kind-registry:5000"]
capabilities = ["pull", "resolve", "push"]
skip_verify = true
EOF'; then
echo "✓ Successfully created hosts.toml for localhost:5000 registry access"
else
echo "⚠ Failed to create hosts.toml for containerd"
echo " This may be because the Kind container is not running yet"
echo " The kubernetes_restart.sh script should handle this configuration"
fi
# Create the hosts.toml file for kind-registry:5000 (used by migration jobs)
if docker exec bakery-ia-local-control-plane sh -c 'cat > /etc/containerd/certs.d/kind-registry:5000/hosts.toml << EOF
server = "http://kind-registry:5000"
[host."http://kind-registry:5000"]
capabilities = ["pull", "resolve", "push"]
skip_verify = true
EOF'; then
echo "✓ Successfully created hosts.toml for kind-registry:5000 access"
else
echo "⚠ Failed to create hosts.toml for kind-registry:5000"
echo " This may be because the Kind container is not running yet"
echo " The kubernetes_restart.sh script should handle this configuration"
fi
echo ""
echo "Kind cluster configuration completed!"
echo "Registry access should now be properly configured."
echo "=========================================="
''',
resource_deps=['ingress-status-check'], # According to requested order: ingress-status-check -> kind-cluster-configuration
labels=['00-kind-config'],
auto_init=True,
allow_parallel=False
)
# Verify TLS certificates are mounted correctly
# =============================================================================
# LOAD KUBERNETES MANIFESTS
# EXECUTE OVERLAYS KUSTOMIZATIONS
# =============================================================================
# Load the main kustomize overlay for the dev environment
# Execute the main kustomize overlay for the dev environment with proper dependencies
k8s_yaml(kustomize('infrastructure/environments/dev/k8s-manifests'))
# Create a visible resource for applying Kubernetes manifests
# Create a visible resource for applying Kubernetes manifests with proper dependencies
local_resource(
'apply-k8s-manifests',
cmd='''
echo "=========================================="
echo "APPLYING KUBERNETES MANIFESTS"
echo "EXECUTING OVERLAYS KUSTOMIZATIONS"
echo "=========================================="
echo "Loading all Kubernetes resources including ingress configuration..."
echo ""
@@ -561,7 +520,7 @@ local_resource(
echo "- Security configurations"
echo "- CI/CD configurations"
echo ""
echo "Kubernetes manifests applied successfully!"
echo "Overlays kustomizations executed successfully!"
echo "=========================================="
''',
labels=['00-k8s-manifests'],
@@ -793,7 +752,7 @@ local_resource(
# Install Unbound with appropriate values
if [ "$ENVIRONMENT" = "dev" ]; then
helm upgrade --install unbound infrastructure/platform/infrastructure/unbound-helm \
helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \
-n bakery-ia \
--create-namespace \
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
@@ -949,7 +908,15 @@ local_resource(
fi
# =====================================================
# Step 5: Wait for pods and show status
# Step 5: Apply Mailu Ingress
# =====================================================
echo ""
echo "Applying Mailu ingress configuration..."
kubectl apply -f infrastructure/platform/mail/mailu-helm/mailu-ingress.yaml
echo "Mailu ingress applied for mail.bakery-ia.dev"
# =====================================================
# Step 6: Wait for pods and show status
# =====================================================
echo ""
echo "Waiting for Mailu pods to be ready..."
@@ -1092,19 +1059,62 @@ local_resource(
echo "Deploying Flux CD GitOps Toolkit..."
echo ""
# Check if Flux CLI is installed, install if missing
if ! command -v flux &> /dev/null; then
echo "Flux CLI not found, installing..."
# Determine OS and architecture
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
ARCH=$(uname -m | tr '[:upper:]' '[:lower:]')
# Convert architecture format
if [[ "$ARCH" == "x86_64" ]]; then
ARCH="amd64"
elif [[ "$ARCH" == "aarch64" ]]; then
ARCH="arm64"
fi
# Download and install Flux CLI to user's local bin
echo "Detected OS: $OS, Architecture: $ARCH"
FLUX_VERSION="2.7.5"
DOWNLOAD_URL="https://github.com/fluxcd/flux2/releases/download/v${FLUX_VERSION}/flux_${FLUX_VERSION}_${OS}_${ARCH}.tar.gz"
echo "Downloading Flux CLI from: $DOWNLOAD_URL"
mkdir -p ~/.local/bin
cd /tmp
curl -sL "$DOWNLOAD_URL" -o flux.tar.gz
tar xzf flux.tar.gz
chmod +x flux
mv flux ~/.local/bin/
# Add to PATH if not already there
export PATH="$HOME/.local/bin:$PATH"
# Verify installation
if command -v flux &> /dev/null; then
echo "Flux CLI installed successfully"
else
echo "ERROR: Failed to install Flux CLI"
exit 1
fi
else
echo "Flux CLI is already installed"
fi
# Check if Flux CRDs are installed, install if missing
if ! kubectl get crd gitrepositories.source.toolkit.fluxcd.io >/dev/null 2>&1; then
echo "Installing Flux CRDs..."
flux install --namespace=flux-system --network-policy=false
else
echo "Flux CRDs are already installed"
fi
# Check if Flux is already deployed
if helm list -n flux-system | grep -q flux-cd; then
echo "Flux CD already deployed, checking status..."
helm status flux-cd -n flux-system
else
echo "Installing Flux CD..."
# Install Flux CRDs first if not already installed
if ! kubectl get crd gitrepositories.source.toolkit.fluxcd.io >/dev/null 2>&1; then
echo "Installing Flux CRDs..."
curl -sL https://fluxcd.io/install.sh | sudo bash
flux install --namespace=flux-system --network-policy=false
fi
echo "Installing Flux CD Helm release..."
# Create the namespace if it doesn't exist
kubectl create namespace flux-system --dry-run=client -o yaml | kubectl apply -f -
@@ -1311,18 +1321,33 @@ local_resource(
echo "Setting up Tekton Pipelines for CI/CD using Helm..."
echo ""
# Check if Tekton CRDs are already installed
# Check if Tekton Pipelines CRDs are already installed
if kubectl get crd pipelines.tekton.dev >/dev/null 2>&1; then
echo " Tekton CRDs already installed"
echo " Tekton Pipelines CRDs already installed"
else
echo " Installing Tekton v0.57.0..."
echo " Installing Tekton Pipelines..."
kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
echo " Waiting for Tekton to be ready..."
echo " Waiting for Tekton Pipelines to be ready..."
kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-controller -n tekton-pipelines
kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-webhook -n tekton-pipelines
echo " Tekton installed and ready"
echo " Tekton Pipelines installed and ready"
fi
# Check if Tekton Triggers CRDs are already installed
if kubectl get crd eventlisteners.triggers.tekton.dev >/dev/null 2>&1; then
echo " Tekton Triggers CRDs already installed"
else
echo " Installing Tekton Triggers..."
kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml
kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/latest/interceptors.yaml
echo " Waiting for Tekton Triggers to be ready..."
kubectl wait --for=condition=available --timeout=180s deployment/tekton-triggers-controller -n tekton-pipelines
kubectl wait --for=condition=available --timeout=180s deployment/tekton-triggers-webhook -n tekton-pipelines
echo " Tekton Triggers installed and ready"
fi
echo ""
@@ -1356,161 +1381,28 @@ local_resource(
auto_init=False, # Manual trigger only
)
# Gitea - Auto-install when Gitea registry is enabled
gitea_enabled = use_gitea_registry # Enable Gitea when using Gitea registry
if 'ENABLE_GITEA' in os.environ:
gitea_enabled = os.environ['ENABLE_GITEA'].lower() == 'true'
# Gitea - Simple Helm installation for dev environment
local_resource(
'gitea',
cmd='''
echo "Installing Gitea via Helm..."
if gitea_enabled:
local_resource(
'gitea',
cmd='''
echo "Setting up Gitea for local Git server and container registry..."
echo ""
# Create namespace
kubectl create namespace gitea --dry-run=client -o yaml | kubectl apply -f -
# Wait for ingress controller to be ready before proceeding
echo "Waiting for ingress controller to be ready..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=controller -n ingress-nginx --timeout=300s
# Install Gitea using Helm
helm repo add gitea https://dl.gitea.io/charts 2>/dev/null || true
helm repo update gitea
helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml --wait
# Verify ingress resources are properly configured
echo "Verifying ingress configuration..."
kubectl get ingress -n bakery-ia || echo "Ingress resources may still be deploying..."
# Small delay to ensure ingress is fully operational
sleep 10
# Create namespace
kubectl create namespace gitea || true
# Create admin secret first
chmod +x infrastructure/cicd/gitea/setup-admin-secret.sh
./infrastructure/cicd/gitea/setup-admin-secret.sh
# Install Gitea using Helm
helm repo add gitea https://dl.gitea.io/charts || true
helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml
# Wait for Gitea to be ready before proceeding
echo "Waiting for Gitea to be ready..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=gitea -n gitea --timeout=300s
# Check if admin user already exists by attempting to get user list
echo "Checking if admin user already exists..."
ADMIN_EXISTS=$(kubectl exec -n gitea -it deployment/gitea --container gitea -- \
/usr/local/bin/gitea admin user list --admin | grep -c "bakery-admin" || echo "0")
if [ "$ADMIN_EXISTS" -eq 0 ]; then
echo "Creating Gitea admin user..."
# Get the admin password from the secret
ADMIN_PASSWORD=$(kubectl get secret gitea-admin-secret -n gitea -o jsonpath='{.data.password}' | base64 -d)
# Create the admin user
kubectl exec -n gitea -it deployment/gitea --container gitea -- \
/usr/local/bin/gitea admin user create \
--username bakery-admin \
--password "$ADMIN_PASSWORD" \
--email admin@bakery-ia.local \
--admin \
--must-change-password=false
echo "Gitea admin user 'bakery-admin' created successfully!"
else
echo "Gitea admin user 'bakery-admin' already exists."
fi
echo ""
echo "Gitea setup complete!"
echo "Access Gitea at: https://gitea.bakery-ia.local (for dev) or https://gitea.bakewise.ai (for prod)"
echo "Registry URL: https://registry.bakery-ia.local or gitea.bakery-ia.local:5000"
echo "Make sure to add the appropriate hostname to /etc/hosts or configure DNS"
echo "Check status: kubectl get pods -n gitea"
echo "To uninstall: helm uninstall gitea -n gitea"
''',
resource_deps=['ingress-status-check'], # Depend on ingress check to ensure routing is ready
labels=['99-cicd'],
auto_init=True, # Auto-install when enabled
allow_parallel=False
)
else:
# Manual trigger option for when Gitea registry is not enabled but user wants Gitea
local_resource(
'gitea',
cmd='''
echo "Setting up Gitea for local Git server and container registry..."
echo ""
# Wait for ingress controller to be ready before proceeding
echo "Waiting for ingress controller to be ready..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=controller -n ingress-nginx --timeout=300s
# Verify ingress resources are properly configured
echo "Verifying ingress configuration..."
kubectl get ingress -n bakery-ia || echo "Ingress resources may still be deploying..."
# Small delay to ensure ingress is fully operational
sleep 10
# Create namespace
kubectl create namespace gitea || true
# Create admin secret first
chmod +x infrastructure/cicd/gitea/setup-admin-secret.sh
./infrastructure/cicd/gitea/setup-admin-secret.sh
# Install Gitea using Helm
helm repo add gitea https://dl.gitea.io/charts || true
helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml
# Wait for Gitea to be ready before proceeding
echo "Waiting for Gitea to be ready..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=gitea -n gitea --timeout=300s
# Check if admin user already exists by attempting to get user list
echo "Checking if admin user already exists..."
ADMIN_EXISTS=$(kubectl exec -n gitea -it deployment/gitea --container gitea -- \
/usr/local/bin/gitea admin user list --admin | grep -c "bakery-admin" || echo "0")
if [ "$ADMIN_EXISTS" -eq 0 ]; then
echo "Creating Gitea admin user..."
# Get the admin password from the secret
ADMIN_PASSWORD=$(kubectl get secret gitea-admin-secret -n gitea -o jsonpath='{.data.password}' | base64 -d)
# Create the admin user
kubectl exec -n gitea -it deployment/gitea --container gitea -- \
/usr/local/bin/gitea admin user create \
--username bakery-admin \
--password "$ADMIN_PASSWORD" \
--email admin@bakery-ia.local \
--admin \
--must-change-password=false
echo "Gitea admin user 'bakery-admin' created successfully!"
else
echo "Gitea admin user 'bakery-admin' already exists."
fi
echo ""
echo "Gitea setup complete!"
echo "Access Gitea at: https://gitea.bakery-ia.local (for dev) or https://gitea.bakewise.ai (for prod)"
echo "Registry URL: https://registry.bakery-ia.local"
echo "Make sure to add the appropriate hostname to /etc/hosts or configure DNS"
echo "Check status: kubectl get pods -n gitea"
echo "To uninstall: helm uninstall gitea -n gitea"
# Sync registry credentials to bakery-ia namespace for pod image pulls
echo ""
echo "Syncing registry credentials to bakery-ia namespace..."
chmod +x infrastructure/cicd/gitea/sync-registry-secret.sh
./infrastructure/cicd/gitea/sync-registry-secret.sh
echo ""
echo "Registry secret synced! Pods in bakery-ia namespace can now pull from registry.bakery-ia.local"
''',
labels=['99-cicd'],
auto_init=False, # Manual trigger only
)
echo ""
echo "Gitea installed!"
echo "Access: https://gitea.bakery-ia.local"
echo "Status: kubectl get pods -n gitea"
''',
labels=['99-cicd'],
auto_init=False,
)
# =============================================================================

View File

@@ -2,29 +2,35 @@
**Complete guide for deploying to production for a 10-tenant pilot program**
**Last Updated:** 2026-01-11
**Last Updated:** 2026-01-20
**Target Environment:** clouding.io VPS with MicroK8s
**Estimated Cost:** €41-81/month
**Time to Deploy:** 3-5 hours (first time, including fixes)
**Status:** ⚠️ REQUIRES PRE-DEPLOYMENT FIXES - See [Production VPS Deployment Fixes](../PRODUCTION_VPS_DEPLOYMENT_FIXES.md)
**Version:** 3.0
---
## Table of Contents
1. [Executive Summary](#executive-summary)
2. [⚠️ CRITICAL: Pre-Deployment Fixes](#critical-pre-deployment-fixes)
3. [Pre-Launch Checklist](#pre-launch-checklist)
4. [VPS Provisioning](#vps-provisioning)
5. [Infrastructure Setup](#infrastructure-setup)
6. [Domain & DNS Configuration](#domain--dns-configuration)
7. [TLS/SSL Certificates](#tlsssl-certificates)
8. [Email & Communication Setup](#email--communication-setup)
9. [Kubernetes Deployment](#kubernetes-deployment)
10. [Configuration & Secrets](#configuration--secrets)
11. [Database Migrations](#database-migrations)
12. [Verification & Testing](#verification--testing)
13. [Post-Deployment](#post-deployment)
2. [Infrastructure Architecture Overview](#infrastructure-architecture-overview)
3. [⚠️ CRITICAL: Pre-Deployment Fixes](#critical-pre-deployment-fixes)
4. [Pre-Launch Checklist](#pre-launch-checklist)
5. [VPS Provisioning](#vps-provisioning)
6. [Infrastructure Setup](#infrastructure-setup)
7. [Domain & DNS Configuration](#domain--dns-configuration)
8. [TLS/SSL Certificates](#tlsssl-certificates)
9. [Email & Communication Setup](#email--communication-setup)
10. [Kubernetes Deployment](#kubernetes-deployment)
11. [Configuration & Secrets](#configuration--secrets)
12. [Database Migrations](#database-migrations)
13. [CI/CD Infrastructure Deployment](#cicd-infrastructure-deployment)
14. [Mailu Email Server Deployment](#mailu-email-server-deployment)
15. [Nominatim Geocoding Service](#nominatim-geocoding-service)
16. [SigNoz Monitoring Deployment](#signoz-monitoring-deployment)
17. [Verification & Testing](#verification--testing)
18. [Post-Deployment](#post-deployment)
---
@@ -64,6 +70,107 @@ A complete multi-tenant SaaS platform with:
---
## Infrastructure Architecture Overview
### Component Layers
The Bakery-IA platform is organized into distinct infrastructure layers, each with specific deployment dependencies.
```
┌─────────────────────────────────────────────────────────────────────────────┐
│ LAYER 6: APPLICATION │
│ Frontend │ Gateway │ 18 Microservices │ CronJobs & Workers │
├─────────────────────────────────────────────────────────────────────────────┤
│ LAYER 5: MONITORING │
│ SigNoz (Unified Observability) │ AlertManager │ OTel Collector │
├─────────────────────────────────────────────────────────────────────────────┤
│ LAYER 4: PLATFORM SERVICES (Optional) │
│ Mailu (Email) │ Nominatim (Geocoding) │ CI/CD (Tekton, Flux, Gitea) │
├─────────────────────────────────────────────────────────────────────────────┤
│ LAYER 3: DATA & STORAGE │
│ PostgreSQL (18 DBs) │ Redis │ RabbitMQ │ MinIO │
├─────────────────────────────────────────────────────────────────────────────┤
│ LAYER 2: NETWORK & SECURITY │
│ Unbound DNS │ CoreDNS │ Ingress Controller │ Cert-Manager │ TLS │
├─────────────────────────────────────────────────────────────────────────────┤
│ LAYER 1: FOUNDATION │
│ Namespaces │ Storage Classes │ RBAC │ ConfigMaps │ Secrets │
├─────────────────────────────────────────────────────────────────────────────┤
│ LAYER 0: KUBERNETES CLUSTER │
│ MicroK8s (Production) │ Kind (Local Dev) │ EKS (AWS Alternative) │
└─────────────────────────────────────────────────────────────────────────────┘
```
### Deployment Order & Dependencies
Components must be deployed in a specific order due to dependencies:
```
1. Namespaces (bakery-ia, tekton-pipelines, flux-system)
2. Cert-Manager & ClusterIssuers
3. TLS Certificates (internal + ingress)
4. Unbound DNS Resolver (required for Mailu DNSSEC)
5. CoreDNS Configuration (forward to Unbound)
6. Ingress Controller & Resources
7. Data Layer: PostgreSQL, Redis, RabbitMQ, MinIO
8. Database Migrations
9. Application Services (18 microservices)
10. Gateway & Frontend
11. (Optional) CI/CD: Gitea → Tekton → Flux
12. (Optional) Mailu Email Server
13. (Optional) Nominatim Geocoding
14. (Optional) SigNoz Monitoring
```
### Infrastructure Components Summary
| Component | Purpose | Required | Namespace |
|-----------|---------|----------|-----------|
| **MicroK8s** | Kubernetes cluster | Yes | - |
| **Cert-Manager** | TLS certificate management | Yes | cert-manager |
| **Ingress-Nginx** | External traffic routing | Yes | ingress |
| **PostgreSQL** | 18 service databases | Yes | bakery-ia |
| **Redis** | Caching & sessions | Yes | bakery-ia |
| **RabbitMQ** | Message broker | Yes | bakery-ia |
| **MinIO** | Object storage (ML models) | Yes | bakery-ia |
| **Unbound DNS** | DNSSEC resolver | For Mailu | bakery-ia |
| **Mailu** | Self-hosted email server | Optional | bakery-ia |
| **Nominatim** | Geocoding service | Optional | bakery-ia |
| **Gitea** | Git server + container registry | Optional | gitea |
| **Tekton** | CI/CD pipelines | Optional | tekton-pipelines |
| **Flux CD** | GitOps deployment | Optional | flux-system |
| **SigNoz** | Unified observability | Recommended | bakery-ia |
### Quick Reference: What to Deploy
**Minimal Production Setup:**
- Kubernetes cluster + addons
- Core infrastructure (databases, cache, broker)
- Application services
- External email (Zoho/Gmail)
**Full Production Setup (Recommended):**
- Everything above, plus:
- Mailu (self-hosted email)
- SigNoz (monitoring)
- CI/CD (Gitea + Tekton + Flux)
- Nominatim (if geocoding needed)
---
## ⚠️ CRITICAL: Pre-Deployment Configuration
**READ THIS FIRST:** The Kubernetes configuration requires updates for secure production deployment.
@@ -400,7 +507,7 @@ microk8s status --wait-ready
# Enable core addons
microk8s enable dns # DNS resolution within cluster
microk8s enable hostpath-storage # Provides microk8s-hostpath storage class
microk8s enable ingress # Nginx ingress controller
microk8s enable ingress # Nginx ingress controller (uses class "public")
microk8s enable cert-manager # Let's Encrypt SSL certificates
microk8s enable metrics-server # For HPA autoscaling
microk8s enable rbac # Role-based access control
@@ -417,13 +524,26 @@ kubectl get storageclass
# Should show: microk8s-hostpath (default)
kubectl get pods -A
# Should show pods in: kube-system, ingress-nginx, cert-manager namespaces
# Should show pods in: kube-system, ingress, cert-manager namespaces
# Verify ingress controller is running
kubectl get pods -n ingress
# Should show: nginx-ingress-microk8s-controller-xxx Running
# Verify cert-manager is running
kubectl get pods -n cert-manager
# Should show: cert-manager-xxx, cert-manager-webhook-xxx, cert-manager-cainjector-xxx
# Verify metrics-server is working
kubectl top nodes
# Should return CPU/Memory metrics
```
**Important - MicroK8s Ingress Class:**
- MicroK8s ingress addon uses class name `public` (NOT `nginx`)
- The ClusterIssuers in this repo are already configured with `class: public`
- If you see cert-manager challenges failing, verify the ingress class matches
**Optional but Recommended:**
```bash
# Enable Prometheus for additional monitoring (optional)
@@ -472,23 +592,34 @@ kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/latest/
# flux install --namespace=flux-system --network-policy=false
```
### Step 3: Configure Firewall
### Step 4: Configure Firewall
**CRITICAL:** Ports 80 and 443 must be open for Let's Encrypt HTTP-01 challenges to work.
```bash
# Allow necessary ports
ufw allow 22/tcp # SSH
ufw allow 80/tcp # HTTP
ufw allow 443/tcp # HTTPS
ufw allow 16443/tcp # Kubernetes API (optional)
ufw allow 80/tcp # HTTP - REQUIRED for Let's Encrypt HTTP-01 challenge
ufw allow 443/tcp # HTTPS - For your application traffic
ufw allow 16443/tcp # Kubernetes API (optional, for remote kubectl access)
# Enable firewall
ufw enable
# Check status
ufw status verbose
# Expected output should include:
# 80/tcp ALLOW Anywhere
# 443/tcp ALLOW Anywhere
```
### Step 4: Create Namespace
**Also check clouding.io firewall:**
- Log in to clouding.io dashboard
- Go to your VPS → Firewall settings
- Ensure ports 80 and 443 are allowed from anywhere (0.0.0.0/0)
### Step 5: Create Namespace
```bash
# Create bakery-ia namespace
@@ -502,14 +633,39 @@ kubectl get namespaces
## Domain & DNS Configuration
### Step 1: Register Domain
### Step 1: Register Domain at Namecheap
1. Go to Namecheap or Cloudflare Registrar
2. Search for your desired domain
1. Go to [Namecheap](https://www.namecheap.com)
2. Search for your desired domain (e.g., `bakewise.ia`)
3. Complete purchase (~€10-15/year)
4. Save domain credentials
### Step 2: Configure Cloudflare DNS (Recommended)
### Step 2: Configure DNS at Namecheap
1. **Access DNS settings:**
```
1. Log in to Namecheap
2. Go to Domain List → Manage → Advanced DNS
```
2. **Add DNS records pointing to your VPS:**
```
Type Host Value TTL
A @ YOUR_VPS_IP Automatic
A * YOUR_VPS_IP Automatic
```
This points both `bakewise.ia` and all subdomains (`*.bakewise.ia`) to your VPS.
3. **Test DNS propagation:**
```bash
# Wait 5-10 minutes, then test
nslookup bakewise.ia
nslookup api.bakewise.ia
nslookup mail.bakewise.ia
```
### Step 3 (Optional): Configure Cloudflare DNS
1. **Add site to Cloudflare:**
```
@@ -605,30 +761,36 @@ kubectl get secrets -n bakery-ia | grep tls
### Step 3: Configure Let's Encrypt (External SSL)
cert-manager is already enabled. Configure the ClusterIssuer:
cert-manager is already enabled via `microk8s enable cert-manager`. The ClusterIssuer is pre-configured in the repository.
**Important:** MicroK8s ingress addon uses ingress class `public` (not `nginx`). This is already configured in:
- `infrastructure/platform/cert-manager/cluster-issuer-production.yaml`
- `infrastructure/platform/cert-manager/cluster-issuer-staging.yaml`
```bash
# On VPS, create ClusterIssuer
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: admin@yourdomain.com # CHANGE THIS
privateKeySecretRef:
name: letsencrypt-production
solvers:
- http01:
ingress:
class: public
EOF
# On VPS, apply the pre-configured ClusterIssuers
kubectl apply -k infrastructure/platform/cert-manager/
# Verify ClusterIssuer is ready
# Verify ClusterIssuers are ready
kubectl get clusterissuer
kubectl describe clusterissuer letsencrypt-production
# Expected output:
# NAME READY AGE
# letsencrypt-production True 1m
# letsencrypt-staging True 1m
```
**Configuration details (already set):**
- **Email:** `admin@bakewise.ai` (receives Let's Encrypt expiry notifications)
- **Ingress class:** `public` (MicroK8s default)
- **Challenge type:** HTTP-01 (requires port 80 open)
**If you need to customize the email**, edit before applying:
```bash
# Edit the production issuer
nano infrastructure/platform/cert-manager/cluster-issuer-production.yaml
# Change: email: admin@bakewise.ai → email: your-email@yourdomain.com
```
---
@@ -2230,12 +2392,19 @@ kubectl scale deployment monitoring -n bakery-ia --replicas=0
---
**Document Version:** 2.0
**Last Updated:** 2026-01-11
**Document Version:** 2.1
**Last Updated:** 2026-01-20
**Maintained By:** DevOps Team
**Changes in v2.1:**
- Updated DNS configuration for Namecheap (primary) with Cloudflare as optional
- Clarified MicroK8s ingress class is `public` (not `nginx`)
- Updated Let's Encrypt ClusterIssuer documentation to reference pre-configured files
- Added firewall requirements for clouding.io VPS
- Emphasized port 80/443 requirements for HTTP-01 challenges
**Changes in v2.0:**
- Added critical pre-deployment fixes section
- Updated infrastructure setup for k3s instead of MicroK8s
- Updated infrastructure setup for MicroK8s
- Added required component installation (nginx-ingress, metrics-server, etc.)
- Updated configuration steps with domain replacement
- Added Docker registry secret creation

View File

@@ -1,3 +1,4 @@
{{- if .Values.createNamespace | default false }}
apiVersion: v1
kind: Namespace
metadata:
@@ -5,3 +6,4 @@ metadata:
labels:
app.kubernetes.io/name: flux
kubernetes.io/metadata.name: {{ .Values.gitRepository.namespace }}
{{- end }}

View File

@@ -6,7 +6,7 @@ gitRepository:
name: bakery-ia
namespace: flux-system
interval: 1m
url: http://gitea.bakery-ia.local/bakery-admin/bakery-ia.git
url: http://gitea-http.gitea.svc.cluster.local:3000/bakery-admin/bakery-ia.git
ref:
branch: main
secretRef:

View File

@@ -19,18 +19,23 @@ GITEA_NAMESPACE="gitea"
BAKERY_NAMESPACE="bakery-ia"
REGISTRY_HOST="registry.bakery-ia.local"
ADMIN_USERNAME="bakery-admin"
# Static password for consistent dev environment setup
# This ensures the same credentials work across environment recreations
STATIC_ADMIN_PASSWORD="pvYUkGWJijqc0QfIZEXw"
# Check if running in microk8s
if command -v microk8s &> /dev/null; then
KUBECTL="microk8s kubectl"
fi
# Get or generate password
# Get password from argument, environment variable, or use static default
if [ -n "$1" ]; then
ADMIN_PASSWORD="$1"
elif [ -n "$GITEA_ADMIN_PASSWORD" ]; then
ADMIN_PASSWORD="$GITEA_ADMIN_PASSWORD"
else
ADMIN_PASSWORD=$(openssl rand -base64 24 | tr -d '/+=' | head -c 20)
echo "Generated admin password: $ADMIN_PASSWORD"
ADMIN_PASSWORD="$STATIC_ADMIN_PASSWORD"
echo "Using static admin password for dev environment consistency"
fi
# Create namespaces if they don't exist

View File

@@ -0,0 +1,65 @@
# Gitea Helm values for Production environment
# This file overrides values.yaml for production deployment
#
# Installation:
# helm upgrade --install gitea gitea/gitea -n gitea \
# -f infrastructure/cicd/gitea/values.yaml \
# -f infrastructure/cicd/gitea/values-prod.yaml
ingress:
enabled: true
className: nginx
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "500m"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
cert-manager.io/cluster-issuer: "letsencrypt-production"
hosts:
- host: gitea.bakewise.ai
paths:
- path: /
pathType: Prefix
tls:
- secretName: gitea-tls-cert
hosts:
- gitea.bakewise.ai
apiIngress:
enabled: true
className: nginx
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "500m"
cert-manager.io/cluster-issuer: "letsencrypt-production"
hosts:
- host: registry.bakewise.ai
paths:
- path: /
pathType: Prefix
tls:
- secretName: registry-tls-cert
hosts:
- registry.bakewise.ai
gitea:
admin:
email: admin@bakewise.ai
config:
server:
DOMAIN: gitea.bakewise.ai
SSH_DOMAIN: gitea.bakewise.ai
ROOT_URL: https://gitea.bakewise.ai
# Production resources - adjust based on expected load
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 200m
memory: 512Mi
# Larger storage for production
persistence:
size: 50Gi

View File

@@ -25,7 +25,40 @@ service:
# Registry authentication and API is handled by the main HTTP service
ingress:
enabled: false # Disable Gitea's built-in ingress - use common ingress instead
enabled: true
className: nginx
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "500m"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
hosts:
- host: gitea.bakery-ia.local
paths:
- path: /
pathType: Prefix
tls:
- secretName: bakery-dev-tls-cert
hosts:
- gitea.bakery-ia.local
- registry.bakery-ia.local
# Additional ingress for container registry (same backend, different hostname)
apiIngress:
enabled: true
className: nginx
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "500m"
hosts:
- host: registry.bakery-ia.local
paths:
- path: /
pathType: Prefix
tls:
- secretName: bakery-dev-tls-cert
hosts:
- registry.bakery-ia.local
persistence:
enabled: true

View File

@@ -17,6 +17,6 @@ After Tekton is installed, this chart will deploy:
- Tasks, Pipelines, and Triggers for CI/CD
To check the status of deployed resources:
kubectl get all -n {{ .Values.namespace }}
kubectl get all -n {{ .Release.Namespace }}
For more information about Tekton, visit: https://tekton.dev/

View File

@@ -31,6 +31,10 @@ rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
# Ability to list cluster-scoped trigger resources (needed for Tekton Triggers controller)
- apiGroups: ["triggers.tekton.dev"]
resources: ["clustertriggerbindings", "clusterinterceptors"]
verbs: ["get", "list", "watch"]
---
# ClusterRole for Pipeline execution (needed for git operations and deployments)
apiVersion: rbac.authorization.k8s.io/v1
@@ -63,7 +67,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tekton-triggers-eventlistener-role
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: triggers

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: pipeline-config
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: config

View File

@@ -5,7 +5,7 @@ apiVersion: triggers.tekton.dev/v1beta1
kind: EventListener
metadata:
name: bakery-ia-event-listener
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: triggers

View File

@@ -7,7 +7,7 @@ apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: bakery-ia-ci
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: pipeline

View File

@@ -9,7 +9,7 @@ metadata:
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccounts.triggers.name }}
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: tekton-triggers-role
@@ -26,7 +26,7 @@ metadata:
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccounts.pipeline.name }}
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: tekton-pipeline-role
@@ -37,14 +37,14 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tekton-triggers-eventlistener-binding
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: triggers
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccounts.triggers.name }}
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: tekton-triggers-eventlistener-role

View File

@@ -4,7 +4,7 @@ apiVersion: v1
kind: Secret
metadata:
name: gitea-webhook-secret
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: triggers
@@ -17,11 +17,16 @@ stringData:
# Secret for Gitea container registry credentials
# Used by Kaniko to push images to Gitea registry
# References the existing gitea-admin-secret for consistency
{{- $giteaSecret := (lookup "v1" "Secret" "gitea" "gitea-admin-secret") }}
{{- $giteaPassword := "" }}
{{- if and $giteaSecret $giteaSecret.data (index $giteaSecret.data "password") }}
{{- $giteaPassword = index $giteaSecret.data "password" | b64dec }}
{{- end }}
apiVersion: v1
kind: Secret
metadata:
name: gitea-registry-credentials
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: build
@@ -29,13 +34,14 @@ metadata:
note: "Registry credentials for pushing images - references gitea-admin-secret"
type: kubernetes.io/dockerconfigjson
stringData:
{{- $registryPassword := .Values.secrets.registry.password | default $giteaPassword | default "PLACEHOLDER_PASSWORD" }}
{{- if and .Values.secrets.registry.registryUrl .Values.secrets.registry.username }}
.dockerconfigjson: |
{
"auths": {
{{ .Values.secrets.registry.registryUrl | quote }}: {
"username": {{ .Values.secrets.registry.username | quote }},
"password": {{ .Values.secrets.registry.password | default (lookup "v1" "Secret" "gitea" "gitea-admin-secret").data.password | b64dec | quote }}
"password": {{ $registryPassword | quote }}
}
}
}
@@ -49,7 +55,7 @@ apiVersion: v1
kind: Secret
metadata:
name: gitea-git-credentials
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: gitops
@@ -57,8 +63,9 @@ metadata:
note: "Git credentials for GitOps updates - references gitea-admin-secret"
type: Opaque
stringData:
{{- $gitPassword := .Values.secrets.git.password | default $giteaPassword | default "PLACEHOLDER_PASSWORD" }}
username: {{ .Values.secrets.git.username | quote }}
password: {{ .Values.secrets.git.password | default (lookup "v1" "Secret" "gitea" "gitea-admin-secret").data.password | b64dec | quote }}
password: {{ $gitPassword | quote }}
---
# Secret for Flux GitRepository access
# Used by Flux to pull from Gitea repository
@@ -75,5 +82,6 @@ metadata:
note: "Credentials for Flux GitRepository access - references gitea-admin-secret"
type: Opaque
stringData:
{{- $fluxPassword := .Values.secrets.git.password | default $giteaPassword | default "PLACEHOLDER_PASSWORD" }}
username: {{ .Values.secrets.git.username | quote }}
password: {{ .Values.secrets.git.password | default (lookup "v1" "Secret" "gitea" "gitea-admin-secret").data.password | b64dec | quote }}
password: {{ $fluxPassword | quote }}

View File

@@ -3,7 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccounts.triggers.name }}
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: triggers
@@ -13,7 +13,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccounts.pipeline.name }}
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: pipeline

View File

@@ -5,7 +5,7 @@ apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: detect-changed-services
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: detection

View File

@@ -5,7 +5,7 @@ apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: git-clone
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: source

View File

@@ -6,7 +6,7 @@ apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: kaniko-build
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: build
@@ -29,11 +29,11 @@ spec:
- name: base-registry
type: string
description: Base image registry URL (e.g., docker.io, ghcr.io/org)
default: "docker.io"
default: "gitea-http.gitea.svc.cluster.local:3000/bakery-admin"
- name: python-image
type: string
description: Python base image name and tag
default: "python:3.11-slim"
default: "python_3.11-slim"
results:
- name: build-status
description: Status of the build operation

View File

@@ -5,7 +5,7 @@ apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: pipeline-summary
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: summary

View File

@@ -5,7 +5,7 @@ apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: run-tests
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: test
@@ -23,7 +23,7 @@ spec:
default: "false"
steps:
- name: run-unit-tests
image: python:3.11-slim
image: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/python_3.11-slim:latest
workingDir: $(workspaces.source.path)
script: |
#!/bin/bash
@@ -57,7 +57,7 @@ spec:
cpu: 200m
memory: 512Mi
- name: run-integration-tests
image: python:3.11-slim
image: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/python_3.11-slim:latest
workingDir: $(workspaces.source.path)
script: |
#!/bin/bash

View File

@@ -5,7 +5,7 @@ apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: update-gitops
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: gitops

View File

@@ -5,7 +5,7 @@ apiVersion: triggers.tekton.dev/v1beta1
kind: TriggerBinding
metadata:
name: bakery-ia-trigger-binding
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: triggers

View File

@@ -5,7 +5,7 @@ apiVersion: triggers.tekton.dev/v1beta1
kind: TriggerTemplate
metadata:
name: bakery-ia-trigger-template
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.labels.app.name }}
app.kubernetes.io/component: triggers

View File

@@ -5,7 +5,7 @@
global:
# Registry configuration
registry:
url: "gitea.bakery-ia.local:5000"
url: "gitea-http.gitea.svc.cluster.local:3000/bakery-admin"
# Git configuration
git:
@@ -21,9 +21,9 @@ pipeline:
verbosity: "info"
# Base image registry configuration
# For dev: localhost:5000 with python_3.11-slim
# For prod: docker.io with python:3.11-slim
baseRegistry: "docker.io"
pythonImage: "python:3.11-slim"
# For prod: gitea registry with python_3.11-slim
baseRegistry: "gitea-http.gitea.svc.cluster.local:3000/bakery-admin"
pythonImage: "python_3.11-slim"
# Test configuration
test:
@@ -63,7 +63,8 @@ webhook:
memory: 64Mi
# Namespace for Tekton resources
namespace: "tekton-pipelines"
# Set to empty/false to skip namespace creation (namespace is created by Tekton installation)
namespace: ""
# Secrets configuration
secrets:
@@ -76,7 +77,7 @@ secrets:
registry:
username: "bakery-admin"
password: "" # Will be populated from gitea-admin-secret
registryUrl: "gitea.bakery-ia.local:5000"
registryUrl: "gitea-http.gitea.svc.cluster.local:3000"
# Git credentials for GitOps updates
# Uses the same credentials as Gitea admin for consistency

View File

@@ -25,6 +25,8 @@ spec:
- bakery-ia.local
- api.bakery-ia.local
- monitoring.bakery-ia.local
- gitea.bakery-ia.local
- registry.bakery-ia.local
- "*.bakery-ia.local"
- "mail.bakery-ia.dev"
- "*.bakery-ia.dev"

View File

@@ -40,41 +40,6 @@ patches:
value: "true"
# NOTE: nominatim patches removed - nominatim is now deployed via Helm (tilt trigger nominatim-helm)
# Add imagePullSecrets to all Deployments for Gitea registry authentication
- target:
kind: Deployment
patch: |-
- op: add
path: /spec/template/spec/imagePullSecrets
value:
- name: gitea-registry-secret
# Add imagePullSecrets to all StatefulSets for Gitea registry authentication
- target:
kind: StatefulSet
patch: |-
- op: add
path: /spec/template/spec/imagePullSecrets
value:
- name: gitea-registry-secret
# Add imagePullSecrets to all Jobs for Gitea registry authentication
- target:
kind: Job
patch: |-
- op: add
path: /spec/template/spec/imagePullSecrets
value:
- name: gitea-registry-secret
# Add imagePullSecrets to all CronJobs for Gitea registry authentication
- target:
kind: CronJob
patch: |-
- op: add
path: /spec/jobTemplate/spec/template/spec/imagePullSecrets
value:
- name: gitea-registry-secret
labels:
- includeSelectors: true
@@ -82,59 +47,58 @@ labels:
environment: development
tier: local
# Dev image overrides - use Gitea registry to avoid Docker Hub rate limits
# Dev image overrides - use Kind registry to avoid Docker Hub rate limits
# IMPORTANT: All image names must be lowercase (Docker requirement)
# The prepull-base-images.sh script pushes images to registry.bakery-ia.local/bakery-admin/
# For internal cluster access, use the Gitea service directly
# Format: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/<package-name>:<original-tag>
# The prepull-base-images.sh script pushes images to localhost:5000/ with format: <repo>_<tag>
# Format: localhost:5000/<package-name>_<tag>:latest
images:
# Database images
- name: postgres
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/postgres
newTag: "17-alpine"
newName: localhost:5000/postgres_17_alpine
newTag: latest
- name: redis
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/redis
newTag: "7.4-alpine"
newName: localhost:5000/redis_7_4_alpine
newTag: latest
- name: rabbitmq
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/rabbitmq
newTag: "4.1-management-alpine"
newName: localhost:5000/rabbitmq_4_1_management_alpine
newTag: latest
# Utility images
- name: busybox
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/busybox
newTag: "1.36"
newName: localhost:5000/busybox_1_36
newTag: latest
- name: curlimages/curl
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/curlimages-curl
newName: localhost:5000/curlimages_curl_latest
newTag: latest
- name: bitnami/kubectl
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/bitnami-kubectl
newName: localhost:5000/bitnami_kubectl_latest
newTag: latest
# Alpine variants
- name: alpine
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/alpine
newTag: "3.19"
newName: localhost:5000/alpine_3_19
newTag: latest
- name: alpine/git
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/alpine-git
newTag: "2.43.0"
# CI/CD images (cached in Gitea registry for consistency)
newName: localhost:5000/alpine_git_2_43_0
newTag: latest
# CI/CD images (cached in Kind registry for consistency)
- name: gcr.io/kaniko-project/executor
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/gcr.io-kaniko-project-executor
newTag: v1.23.0
newName: localhost:5000/gcr_io_kaniko_project_executor_v1_23_0
newTag: latest
- name: gcr.io/go-containerregistry/crane
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/gcr.io-go-containerregistry-crane
newName: localhost:5000/gcr_io_go_containerregistry_crane_latest
newTag: latest
- name: registry.k8s.io/kustomize/kustomize
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/registry.k8s.io-kustomize-kustomize
newTag: v5.3.0
newName: localhost:5000/registry_k8s_io_kustomize_kustomize_v5_3_0
newTag: latest
# Storage images
- name: minio/minio
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/minio-minio
newTag: RELEASE.2024-11-07T00-52-20Z
newName: localhost:5000/minio_minio_release_2024_11_07t00_52_20z
newTag: latest
- name: minio/mc
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/minio-mc
newTag: RELEASE.2024-11-17T19-35-25Z
newName: localhost:5000/minio_mc_release_2024_11_17t19_35_25z
newTag: latest
# NOTE: nominatim image override removed - nominatim is now deployed via Helm
# Python base image
- name: python
newName: gitea-http.gitea.svc.cluster.local:3000/bakery-admin/python
newTag: "3.11-slim"
newName: localhost:5000/python_3_11_slim
newTag: latest

View File

@@ -26,6 +26,7 @@ spec:
- mail.bakewise.ai
- monitoring.bakewise.ai
- gitea.bakewise.ai
- registry.bakewise.ai
- api.bakewise.ai
# Use Let's Encrypt production issuer

View File

@@ -16,7 +16,7 @@ spec:
solvers:
- http01:
ingress:
class: nginx
class: public
podTemplate:
spec:
nodeSelector:

View File

@@ -17,7 +17,7 @@ spec:
solvers:
- http01:
ingress:
class: nginx
class: public
podTemplate:
spec:
nodeSelector:

View File

@@ -0,0 +1,8 @@
# Self-signed ClusterIssuer for local development certificates
# This issuer can generate self-signed certificates without needing external CA
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigned-issuer
spec:
selfSigned: {}

View File

@@ -0,0 +1,77 @@
# Mailgun SMTP Credentials Secret for Mailu
#
# This secret stores Mailgun credentials for outbound email relay.
# Mailu uses Mailgun as an external SMTP relay to send all outbound emails.
#
# HOW TO CONFIGURE:
# 1. Go to https://www.mailgun.com and create an account
# 2. Add and verify your domain (e.g., bakery-ia.dev or bakewise.ai)
# 3. Go to Domain Settings > SMTP credentials
# 4. Note your SMTP credentials:
# - SMTP hostname: smtp.mailgun.org
# - Port: 587 (TLS)
# - Username: usually postmaster@yourdomain.com
# - Password: your Mailgun SMTP password (NOT API key)
# 5. Base64 encode your password:
# echo -n 'your-mailgun-smtp-password' | base64
# 6. Replace MAILGUN_SMTP_PASSWORD_BASE64 below with the encoded value
# 7. Apply this secret:
# kubectl apply -f mailgun-credentials-secret.yaml -n bakery-ia
#
# IMPORTANT NOTES:
# - Use the SMTP password from Mailgun, not the API key
# - The username is typically postmaster@yourdomain.com
# - For sandbox domains, Mailgun requires authorized recipients
# - Production domains need DNS verification (SPF, DKIM, MX records)
#
# DNS RECORDS REQUIRED FOR MAILGUN:
# You will need to add these DNS records for your domain:
# - SPF: TXT record for email authentication
# - DKIM: TXT records for email signing (Mailgun provides these)
# - MX: If you want to receive emails via Mailgun (optional for relay-only)
#
---
apiVersion: v1
kind: Secret
metadata:
name: mailu-mailgun-credentials
namespace: bakery-ia
labels:
app: mailu
component: external-relay
type: Opaque
data:
# Base64 encoded Mailgun SMTP password
# To encode: echo -n 'your-password' | base64
# To decode: echo 'encoded-value' | base64 -d
RELAY_PASSWORD: MAILGUN_SMTP_PASSWORD_BASE64
---
# Development environment secret (separate for different Mailgun domain)
apiVersion: v1
kind: Secret
metadata:
name: mailu-mailgun-credentials-dev
namespace: bakery-ia
labels:
app: mailu
component: external-relay
environment: dev
type: Opaque
data:
# Mailgun credentials for bakery-ia.dev domain
RELAY_PASSWORD: MAILGUN_DEV_SMTP_PASSWORD_BASE64
---
# Production environment secret
apiVersion: v1
kind: Secret
metadata:
name: mailu-mailgun-credentials-prod
namespace: bakery-ia
labels:
app: mailu
component: external-relay
environment: prod
type: Opaque
data:
# Mailgun credentials for bakewise.ai domain
RELAY_PASSWORD: MAILGUN_PROD_SMTP_PASSWORD_BASE64

View File

@@ -36,11 +36,17 @@ domain: "bakery-ia.dev"
hostnames:
- "mail.bakery-ia.dev"
# External relay configuration for dev
# External relay configuration for dev (Mailgun)
# All outbound emails will be relayed through Mailgun SMTP
# To configure:
# 1. Register at mailgun.com and verify your domain (bakery-ia.dev)
# 2. Get your SMTP credentials from Mailgun dashboard
# 3. Update the secret in configs/mailgun-credentials-secret.yaml
# 4. Apply the secret: kubectl apply -f configs/mailgun-credentials-secret.yaml
externalRelay:
host: "[smtp.mailgun.org]:587"
username: "postmaster@bakery-ia.dev"
password: "mailgun-api-key-replace-in-production"
username: "postmaster@bakery-ia.dev" # Your Mailgun SMTP username (usually postmaster@yourdomain)
password: "" # Will be loaded from secret - see configs/mailgun-credentials-secret.yaml
# Environment-specific configurations
persistence:

View File

@@ -2,27 +2,30 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mailu-ingress
namespace: bakery-ia # Same as Mailu's namespace
namespace: bakery-ia
labels:
app.kubernetes.io/name: mailu
app.kubernetes.io/component: ingress
annotations:
kubernetes.io/ingress.class: nginx # Or your Ingress class
nginx.ingress.kubernetes.io/proxy-body-size: "100m" # Allow larger email attachments
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # For long connections
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true" # Redirect HTTP to HTTPS
# If using Cert-Manager: cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- mail.bakery-ia.dev # or mail.bakewise.ai for prod
secretName: mail-tls-secret # Your TLS Secret
- mail.bakery-ia.dev
secretName: bakery-dev-tls-cert
rules:
- host: mail.bakery-ia.dev # or mail.bakewise.ai for prod
- host: mail.bakery-ia.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mailu-front-http # Mailu's front service (check with kubectl get svc -n bakery-ia)
name: mailu-front # Helm release name 'mailu' + component 'front'
port:
number: 80

View File

@@ -21,11 +21,17 @@ domain: "bakewise.ai"
hostnames:
- "mail.bakewise.ai"
# External relay configuration for production
# External relay configuration for production (Mailgun)
# All outbound emails will be relayed through Mailgun SMTP
# To configure:
# 1. Register at mailgun.com and verify your domain (bakewise.ai)
# 2. Get your SMTP credentials from Mailgun dashboard
# 3. Update the secret in configs/mailgun-credentials-secret.yaml
# 4. Apply the secret: kubectl apply -f configs/mailgun-credentials-secret.yaml
externalRelay:
host: "[smtp.mailgun.org]:587"
username: "postmaster@bakewise.ai"
password: "PRODUCTION_MAILGUN_API_KEY" # This should be set via secret
username: "postmaster@bakewise.ai" # Your Mailgun SMTP username
password: "" # Will be loaded from secret - see configs/mailgun-credentials-secret.yaml
# Environment-specific configurations
persistence:

View File

@@ -39,10 +39,12 @@ limits:
value: "200/day"
# External relay configuration (Mailgun)
# Mailu will relay all outbound emails through Mailgun SMTP
# Credentials should be provided via Kubernetes secret or environment-specific values
externalRelay:
host: "[smtp.mailgun.org]:587"
username: "postmaster@DOMAIN_PLACEHOLDER"
password: "mailgun-api-key-replace-in-production"
username: "" # Set in environment-specific values or via secret
password: "" # Set in environment-specific values or via secret
# Webmail configuration
webmail:

View File

@@ -1,18 +0,0 @@
---
# Service to route traffic from bakery-ia namespace to Gitea in gitea namespace
# Using ExternalName pointing to the headless service FQDN
# The ingress controller can resolve headless services via DNS (returns pod IPs)
# NOTE: Gitea's container registry is served on port 3000 (same as HTTP) at /v2/ path
apiVersion: v1
kind: Service
metadata:
name: gitea-http
namespace: bakery-ia
spec:
type: ExternalName
# Use the headless service DNS name - nginx ingress resolves this to pod IPs
externalName: gitea-http.gitea.svc.cluster.local
ports:
- name: http
port: 3000
targetPort: 3000

View File

@@ -3,7 +3,6 @@ kind: Kustomization
resources:
- ../../base
- gitea-service.yaml
namePrefix: dev-
@@ -15,30 +14,14 @@ patches:
- op: replace
path: /spec/tls/0/hosts/0
value: bakery-ia.local
- op: replace
path: /spec/tls/0/hosts/1
value: gitea.bakery-ia.local
- op: replace
path: /spec/tls/0/hosts/2
value: registry.bakery-ia.local
- op: replace
path: /spec/tls/0/hosts/3
value: mail.bakery-ia.dev
- op: replace
path: /spec/tls/0/secretName
value: bakery-dev-tls-cert
- op: replace
path: /spec/rules/0/host
value: bakery-ia.local
- op: replace
path: /spec/rules/1/host
value: gitea.bakery-ia.local
- op: replace
path: /spec/rules/2/host
value: registry.bakery-ia.local
- op: replace
path: /spec/rules/3/host
value: mail.bakery-ia.dev
- op: replace
path: /metadata/annotations/nginx.ingress.kubernetes.io~1cors-allow-origin
value: "https://localhost,https://localhost:3000,https://localhost:3001,https://127.0.0.1,https://127.0.0.1:3000,https://127.0.0.1:3001,https://bakery-ia.local,https://registry.bakery-ia.local,https://gitea.bakery-ia.local,http://localhost,http://localhost:3000,http://localhost:3001,http://127.0.0.1,http://127.0.0.1:3000"
# NOTE: Gitea and Registry ingresses are managed by Gitea Helm chart (infrastructure/cicd/gitea/values.yaml)
# NOTE: Mail ingress (mail.bakery-ia.dev) is deployed separately via mailu-helm Tilt resource

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: gitea-http
namespace: bakery-ia
spec:
type: ExternalName
externalName: gitea-http.gitea.svc.cluster.local
ports:
- port: 3000
targetPort: 3000

View File

@@ -97,6 +97,9 @@ is_registry_running() {
cleanup() {
print_status "Starting cleanup process..."
# Ensure correct Docker context is used
ensure_docker_context
# Delete Kubernetes namespace with timeout
print_status "Deleting namespace $NAMESPACE..."
if kubectl get namespace "$NAMESPACE" &>/dev/null; then
@@ -390,10 +393,81 @@ EOF
fi
}
# Function to configure Docker daemon inside Colima with insecure registries
configure_docker_daemon() {
print_status "Configuring Docker daemon with cgroup driver..."
# Create a temporary file with the Docker daemon configuration
local temp_config=$(mktemp)
cat > "$temp_config" << 'DOCKERCONFIG'
{
"exec-opts": ["native.cgroupdriver=cgroupfs"],
"features": {
"buildkit": true,
"containerd-snapshotter": true
}
}
DOCKERCONFIG
# Copy the configuration to Colima VM using stdin
if cat "$temp_config" | colima --profile "$COLIMA_PROFILE" ssh -- sudo tee /etc/docker/daemon.json > /dev/null; then
print_success "Docker daemon configuration written"
rm -f "$temp_config"
# Restart Docker service to apply the configuration
print_status "Restarting Docker service to apply configuration..."
if colima --profile "$COLIMA_PROFILE" ssh -- sudo systemctl restart docker; then
print_success "Docker service restarted successfully"
# Wait for Docker to be ready
sleep 3
return 0
else
print_error "Failed to restart Docker service"
return 1
fi
else
print_error "Failed to write Docker daemon configuration"
rm -f "$temp_config"
return 1
fi
}
# Function to ensure correct Docker context is used
ensure_docker_context() {
if ! docker version >/dev/null 2>&1; then
print_warning "Docker daemon is not accessible, attempting to set correct context..."
if is_colima_running; then
# Look for the correct Colima Docker context
COLIMA_CONTEXT=$(docker context ls --format='{{.Name}}' | grep -E "^colima($|-[[:alnum:]_-]+)" | head -1)
if [ -n "$COLIMA_CONTEXT" ]; then
print_status "Switching Docker context to $COLIMA_CONTEXT"
docker context use "$COLIMA_CONTEXT" >/dev/null 2>&1
if [ $? -eq 0 ]; then
print_success "Docker context switched to $COLIMA_CONTEXT"
sleep 2 # Give Docker a moment to establish connection
else
print_error "Failed to switch Docker context to $COLIMA_CONTEXT"
exit 1
fi
else
print_error "No Colima Docker context found. Please ensure Colima is properly configured."
exit 1
fi
fi
fi
}
# Function to handle setup with better error handling
setup() {
print_status "Starting setup process..."
# Ensure correct Docker context is used
ensure_docker_context
# Check for required config files
check_config_files
@@ -406,38 +480,8 @@ setup() {
if [ $? -eq 0 ]; then
print_success "Colima started successfully"
# Configure Docker daemon with cgroup driver and insecure registries
print_status "Configuring Docker daemon with cgroup driver and insecure registries..."
cat << 'EOFCMD' | colima --profile k8s-local ssh
sudo tee /etc/docker/daemon.json << 'EOF'
{
"exec-opts": [
"native.cgroupdriver=cgroupfs"
],
"features": {
"buildkit": true,
"containerd-snapshotter": true
},
"insecure-registries": ["registry.bakery-ia.local"]
}
EOF
EOFCMD
if [ $? -eq 0 ]; then
print_success "Docker daemon configured successfully"
# Restart Docker service to apply the configuration
print_status "Restarting Docker service to apply configuration..."
colima --profile k8s-local ssh sudo systemctl restart docker
if [ $? -eq 0 ]; then
print_success "Docker service restarted successfully"
else
print_error "Failed to restart Docker service"
fi
else
print_error "Failed to configure Docker daemon"
fi
# Configure Docker daemon with insecure registries
configure_docker_daemon
else
print_error "Failed to start Colima"
print_status "Checking Colima status..."
@@ -447,38 +491,8 @@ EOFCMD
else
print_success "Colima is already running"
# Configure Docker daemon with cgroup driver and insecure registries even if Colima was already running
print_status "Configuring Docker daemon with cgroup driver and insecure registries..."
cat << 'EOFCMD' | colima --profile k8s-local ssh
sudo tee /etc/docker/daemon.json << 'EOF'
{
"exec-opts": [
"native.cgroupdriver=cgroupfs"
],
"features": {
"buildkit": true,
"containerd-snapshotter": true
},
"insecure-registries": ["registry.bakery-ia.local"]
}
EOF
EOFCMD
if [ $? -eq 0 ]; then
print_success "Docker daemon configured successfully"
# Restart Docker service to apply the configuration
print_status "Restarting Docker service to apply configuration..."
colima --profile k8s-local ssh sudo systemctl restart docker
if [ $? -eq 0 ]; then
print_success "Docker service restarted successfully"
else
print_error "Failed to restart Docker service"
fi
else
print_error "Failed to configure Docker daemon"
fi
# Configure Docker daemon with insecure registries even if Colima was already running
configure_docker_daemon
fi
# 2. Create local registry before Kind cluster
@@ -687,6 +701,45 @@ check_prerequisites() {
exit 1
fi
# Check if Docker daemon is accessible
if ! docker version >/dev/null 2>&1; then
print_warning "Docker daemon is not accessible with current context"
# Check if Colima is running and try to set Docker context accordingly
if is_colima_running; then
print_status "Colima is running, checking for correct Docker context..."
# Look for the correct Colima Docker context
COLIMA_CONTEXT=$(docker context ls --format='{{.Name}}' | grep -E "^colima($|-[[:alnum:]_-]+)" | head -1)
if [ -n "$COLIMA_CONTEXT" ]; then
print_status "Switching Docker context to $COLIMA_CONTEXT"
docker context use "$COLIMA_CONTEXT" >/dev/null 2>&1
if [ $? -eq 0 ]; then
print_success "Docker context switched to $COLIMA_CONTEXT"
else
print_error "Failed to switch Docker context to $COLIMA_CONTEXT"
exit 1
fi
else
print_error "No Colima Docker context found. Please ensure Colima is properly configured."
exit 1
fi
else
print_warning "Docker daemon is not running and Colima is not running. Will start Colima during setup."
# For setup operations, we can continue without Docker being accessible yet
# since Colima will be started as part of the setup process
if [[ "${1:-full}" == "setup" ]]; then
print_status "Continuing with setup since this is a setup operation..."
elif [[ "${1:-full}" == "full" ]]; then
print_status "Continuing with full operation (cleanup + setup)..."
else
print_error "Docker daemon is not running and Colima is not running. Please start Docker or Colima first."
exit 1
fi
fi
fi
print_success "All prerequisites are met"
}

17
load-images-to-kind.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Load images into Kind cluster
echo "Loading images into Kind cluster..."
# Get the list of images from the local registry
IMAGES=$(docker images --format "table {{.Repository}}:{{.Tag}}" | grep localhost:5000 | grep -v "<none>" | tail -n +2)
for image in $IMAGES; do
echo "Loading image: $image"
kind load docker-image "$image" --name bakery-ia-local
if [ $? -ne 0 ]; then
echo "Failed to load image: $image"
fi
done
echo "Done loading images to Kind cluster"

View File

@@ -6,6 +6,36 @@
set -e
# Function to check if required tools are available
check_required_tools() {
local missing_tools=()
# Check for required tools
for tool in docker curl jq; do
if ! command -v "$tool" &> /dev/null; then
missing_tools+=("$tool")
fi
done
if [ ${#missing_tools[@]} -gt 0 ]; then
echo "Error: Missing required tools: ${missing_tools[*]}"
echo "Please install them before running this script."
echo ""
echo "On macOS (with Homebrew):"
echo " brew install docker curl jq"
echo ""
echo "On Ubuntu/Debian:"
echo " sudo apt-get install docker.io curl jq"
echo ""
echo "On CentOS/RHEL:"
echo " sudo yum install docker curl jq"
exit 1
fi
}
# Check for required tools
check_required_tools
echo "=========================================="
echo "Bakery-IA Base Image Pre-Pull Script"
echo "=========================================="
@@ -17,7 +47,7 @@ DOCKER_PASSWORD="dckr_pat_zzEY5Q58x1S0puraIoKEtbpue3A"
# Authenticate with Docker Hub
echo "Authenticating with Docker Hub..."
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD"
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
echo "✓ Authentication successful"
echo ""
@@ -58,91 +88,28 @@ BASE_IMAGES=(
# Registry configuration
# Read from environment variables (set by Tiltfile or manually)
# USE_LOCAL_REGISTRY=true to push images to local registry after pulling
# USE_GITEA_REGISTRY=true to push images to Gitea registry after pulling
USE_LOCAL_REGISTRY="${USE_LOCAL_REGISTRY:-false}"
USE_GITEA_REGISTRY="${USE_GITEA_REGISTRY:-true}"
USE_LOCAL_REGISTRY="${USE_LOCAL_REGISTRY:-true}"
echo "Registry configuration:"
echo " USE_LOCAL_REGISTRY=$USE_LOCAL_REGISTRY"
echo " USE_GITEA_REGISTRY=$USE_GITEA_REGISTRY"
echo ""
# Check if Gitea registry should be used instead
if [ "$USE_GITEA_REGISTRY" = "true" ]; then
# Gitea registry is accessed via HTTPS on the registry subdomain (TLS terminated at ingress)
# Docker push/pull should use: registry.bakery-ia.local
# The registry serves on port 443 (HTTPS via ingress) but Docker defaults to 443 for HTTPS
REGISTRY="registry.bakery-ia.local"
echo "Testing Gitea registry accessibility at $REGISTRY..."
# Use local registry (kind registry)
REGISTRY="localhost:5000"
# Test if Gitea registry is accessible (try HTTPS first, then HTTP)
# Note: Gitea registry might return 401 Unauthorized when not authenticated, which is expected
# We're just checking if the service is reachable
if curl -sk -o /dev/null -w "%{http_code}" https://$REGISTRY/v2/ | grep -q "^[234]"; then
echo "✓ Gitea registry accessible via HTTPS"
# Authenticate with Gitea registry if accessible
echo "Authenticating with Gitea registry..."
echo "Note: For self-signed certificates, you may need to configure Docker to trust the registry:"
echo " 1. Add to /etc/docker/daemon.json:"
echo " {\"insecure-registries\": [\"$REGISTRY\"]}"
echo " 2. Restart Docker: sudo systemctl restart docker"
echo " 3. Or use: docker --insecure-registry $REGISTRY login $REGISTRY"
# Try to authenticate (this may fail due to certificate issues)
if ! docker login $REGISTRY; then
echo "Warning: Failed to authenticate with Gitea registry"
echo "This could be due to:"
echo " - Self-signed certificate issues (see above)"
echo " - Incorrect credentials"
echo " - Registry not properly configured"
echo "You may need to run: docker login $REGISTRY"
echo "Falling back to local registry"
REGISTRY="localhost:5000"
USE_GITEA_REGISTRY="false"
USE_LOCAL_REGISTRY="true"
else
echo "✓ Gitea registry authentication successful"
fi
elif curl -s -o /dev/null -w "%{http_code}" http://$REGISTRY/v2/ | grep -q "^[234]"; then
echo "✓ Gitea registry accessible via HTTP"
# Authenticate with Gitea registry if accessible
echo "Authenticating with Gitea registry..."
echo "Note: For self-signed certificates, you may need to configure Docker to trust the registry:"
echo " 1. Add to /etc/docker/daemon.json:"
echo " {\"insecure-registries\": [\"$REGISTRY\"]}"
echo " 2. Restart Docker: sudo systemctl restart docker"
echo " 3. Or use: docker --insecure-registry $REGISTRY login $REGISTRY"
# Try to authenticate (this may fail due to certificate issues)
if ! docker login $REGISTRY; then
echo "Warning: Failed to authenticate with Gitea registry"
echo "This could be due to:"
echo " - Self-signed certificate issues (see above)"
echo " - Incorrect credentials"
echo " - Registry not properly configured"
echo "You may need to run: docker login $REGISTRY"
echo "Falling back to local registry"
REGISTRY="localhost:5000"
USE_GITEA_REGISTRY="false"
USE_LOCAL_REGISTRY="true"
else
echo "✓ Gitea registry authentication successful"
fi
# If using local registry, verify it's running
if [ "$USE_LOCAL_REGISTRY" = "true" ]; then
echo "Checking local registry at $REGISTRY..."
if curl -s http://$REGISTRY/v2/ >/dev/null 2>&1; then
echo "✓ Local registry is accessible"
else
echo "Warning: Gitea registry at $REGISTRY is not accessible, falling back to local registry"
echo "This could be because:"
echo " 1. Gitea is not running or not properly configured"
echo " 2. The ingress is not properly routing to Gitea"
echo " 3. The registry service is not exposed"
REGISTRY="localhost:5000"
USE_GITEA_REGISTRY="false"
echo "⚠ Local registry is not accessible at $REGISTRY"
echo "Will only pull images locally (no registry push)"
USE_LOCAL_REGISTRY="false"
fi
else
REGISTRY="localhost:5000"
fi
echo ""
echo "Base images to pre-pull:"
echo "----------------------------------------"
for image in "${BASE_IMAGES[@]}"; do
@@ -153,48 +120,40 @@ echo ""
echo "Starting pre-pull process..."
echo "----------------------------------------"
# Track success/failure
FAILED_IMAGES=()
SUCCESS_COUNT=0
# Pull each base image
for image in "${BASE_IMAGES[@]}"; do
echo "Pulling: $image"
# Pull the image
docker pull "$image"
if ! docker pull "$image"; then
echo " ⚠ Failed to pull $image"
FAILED_IMAGES+=("$image")
continue
fi
# Tag for registry if enabled
if [ "$USE_LOCAL_REGISTRY" = "true" ] || [ "$USE_GITEA_REGISTRY" = "true" ]; then
if [ "$USE_GITEA_REGISTRY" = "true" ]; then
# Gitea registry requires format: registry/owner/package:tag
# Convert image name to package name:
# - Replace / with - (e.g., gcr.io/kaniko-project/executor -> gcr.io-kaniko-project-executor)
# - Keep the tag if present, otherwise use original tag
# Example: gcr.io/kaniko-project/executor:v1.23.0 -> bakery-admin/gcr.io-kaniko-project-executor:v1.23.0
image_name="${image%%:*}" # Remove tag
image_tag="${image#*:}" # Get tag
if [ "$image_name" = "$image_tag" ]; then
image_tag="latest" # No tag in original, use latest
fi
# Convert image name: replace / with - and lowercase
package_name="$(echo $image_name | sed 's|/|-|g' | tr '[:upper:]' '[:lower:]')"
registry_image="$REGISTRY/bakery-admin/${package_name}:${image_tag}"
else
# Local registry format: replace / and : with _
local_repo="$(echo $image | sed 's|/|_|g' | sed 's|:|_|g' | tr '[:upper:]' '[:lower:]')"
registry_image="$REGISTRY/${local_repo}:latest"
fi
if [ "$USE_LOCAL_REGISTRY" = "true" ]; then
# Local registry format: replace /, :, -, and . with _
local_repo="$(echo $image | sed 's|/|_|g' | sed 's|:|_|g' | sed 's|-|_|g' | sed 's|\.|_|g' | tr '[:upper:]' '[:lower:]')"
registry_image="$REGISTRY/${local_repo}:latest"
docker tag "$image" "$registry_image"
echo " Tagged as: $registry_image"
# Push to registry
docker push "$registry_image"
if [ "$USE_GITEA_REGISTRY" = "true" ]; then
echo " Pushed to Gitea registry"
if docker push "$registry_image"; then
echo " ✓ Pushed to local registry"
else
echo " Pushed to local registry"
echo " ⚠ Failed to push to registry (image still available locally)"
fi
fi
echo " ✓ Successfully pulled $image"
SUCCESS_COUNT=$((SUCCESS_COUNT + 1))
echo ""
done
@@ -203,73 +162,24 @@ echo "Base Image Pre-Pull Complete!"
echo "=========================================="
echo ""
echo "Summary:"
echo " - Total images pulled: ${#BASE_IMAGES[@]}"
if [ "$USE_GITEA_REGISTRY" = "true" ]; then
echo " - Gitea registry enabled: $USE_GITEA_REGISTRY"
echo " - Registry URL: $REGISTRY"
echo " - Total images: ${#BASE_IMAGES[@]}"
echo " - Successfully pulled: $SUCCESS_COUNT"
if [ ${#FAILED_IMAGES[@]} -gt 0 ]; then
echo " - Failed: ${#FAILED_IMAGES[@]}"
echo " - Failed images: ${FAILED_IMAGES[*]}"
fi
if [ "$USE_LOCAL_REGISTRY" = "true" ]; then
echo " - Registry: Local ($REGISTRY)"
else
echo " - Local registry enabled: $USE_LOCAL_REGISTRY"
echo " - Registry URL: $REGISTRY"
echo " - Registry: None (local Docker only)"
fi
echo ""
if [ "$USE_LOCAL_REGISTRY" = "true" ] || [ "$USE_GITEA_REGISTRY" = "true" ]; then
if [ "$USE_GITEA_REGISTRY" = "true" ]; then
echo "Gitea registry contents:"
# Note: Gitea registry API might be different, using the standard registry API for now
# If Gitea registry is not accessible, this might fail
curl -s http://$REGISTRY/v2/_catalog | jq . 2>/dev/null || echo "Could not access registry contents (Gitea registry may not support this endpoint)"
else
echo "Local registry contents:"
curl -s http://$REGISTRY/v2/_catalog | jq . 2>/dev/null || echo "Could not access registry contents"
fi
echo ""
# Exit with error if any images failed
if [ ${#FAILED_IMAGES[@]} -gt 0 ]; then
echo "⚠ Some images failed to pull. This may be due to Docker Hub rate limits."
echo "Please try again later or configure Docker Hub credentials."
exit 1
fi
echo "Next steps:"
echo " 1. Run your service builds - they will use cached images"
echo " 2. For Kubernetes: Consider setting up a pull-through cache"
echo " 3. For CI/CD: Run this script before your build pipeline"
echo ""
echo "To use registry in your builds:"
if [ "$USE_GITEA_REGISTRY" = true ]; then
echo " - Update Dockerfiles to use: $REGISTRY/..."
echo " - Gitea registry URL: $REGISTRY"
else
echo " - Update Dockerfiles to use: $REGISTRY/..."
echo " - Local registry URL: $REGISTRY"
fi
echo " - Or configure Docker daemon to use registry as mirror"
echo ""
# Optional: Configure Docker daemon to use registry as mirror
if [ "$USE_LOCAL_REGISTRY" = "true" ] || [ "$USE_GITEA_REGISTRY" = "true" ]; then
if [ "$USE_GITEA_REGISTRY" = "true" ]; then
echo "To configure Docker daemon to use Gitea registry as mirror:"
echo ""
cat << EOF
{
"registry-mirrors": ["https://registry.bakery-ia.local"],
"insecure-registries": ["registry.bakery-ia.local"]
}
EOF
echo ""
echo "IMPORTANT: For Gitea registry to work properly:"
echo " 1. Gitea must be running and accessible at gitea.bakery-ia.local"
echo " 2. The registry subdomain must be properly configured in your ingress"
echo " 3. You may need to authenticate with Docker:"
echo " docker login registry.bakery-ia.local"
echo " 4. Check that the Gitea registry service is exposed on port 3000"
else
echo "To configure Docker daemon to use local registry as mirror:"
echo ""
cat << 'EOF'
{
"registry-mirrors": ["http://localhost:5000"]
}
EOF
fi
echo ""
echo "Add this to /etc/docker/daemon.json and restart Docker"
fi
echo "✓ All images pulled successfully!"

View File

@@ -10,8 +10,8 @@ metadata:
# - BASE_REGISTRY: Registry URL for base images
# - PYTHON_IMAGE: Python image name and tag
#
# Dev (default): BASE_REGISTRY=localhost:5000, PYTHON_IMAGE=python_3.11-slim
# Prod: BASE_REGISTRY=docker.io, PYTHON_IMAGE=python:3.11-slim
# Dev (default): BASE_REGISTRY=localhost:5000, PYTHON_IMAGE=python_3_11_slim
# Prod: BASE_REGISTRY=gitea-http.gitea.svc.cluster.local:3000/bakery-admin, PYTHON_IMAGE=python_3_11_slim
#
# Usage:
# skaffold dev # Uses dev settings (local registry)
@@ -35,7 +35,7 @@ build:
dockerfile: gateway/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
# Frontend (no Python base image needed)
- image: bakery/dashboard
@@ -50,7 +50,7 @@ build:
dockerfile: services/auth/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/tenant-service
context: .
@@ -58,7 +58,7 @@ build:
dockerfile: services/tenant/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/training-service
context: .
@@ -66,7 +66,7 @@ build:
dockerfile: services/training/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/forecasting-service
context: .
@@ -74,7 +74,7 @@ build:
dockerfile: services/forecasting/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/sales-service
context: .
@@ -82,7 +82,7 @@ build:
dockerfile: services/sales/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/external-service
context: .
@@ -90,7 +90,7 @@ build:
dockerfile: services/external/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/notification-service
context: .
@@ -98,7 +98,7 @@ build:
dockerfile: services/notification/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/inventory-service
context: .
@@ -106,7 +106,7 @@ build:
dockerfile: services/inventory/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/recipes-service
context: .
@@ -114,7 +114,7 @@ build:
dockerfile: services/recipes/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/suppliers-service
context: .
@@ -122,7 +122,7 @@ build:
dockerfile: services/suppliers/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/pos-service
context: .
@@ -130,7 +130,7 @@ build:
dockerfile: services/pos/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/orders-service
context: .
@@ -138,7 +138,7 @@ build:
dockerfile: services/orders/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/production-service
context: .
@@ -146,7 +146,7 @@ build:
dockerfile: services/production/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/alert-processor
context: .
@@ -154,7 +154,7 @@ build:
dockerfile: services/alert_processor/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
- image: bakery/demo-session-service
context: .
@@ -162,7 +162,7 @@ build:
dockerfile: services/demo_session/Dockerfile
buildArgs:
BASE_REGISTRY: localhost:5000
PYTHON_IMAGE: python_3.11-slim
PYTHON_IMAGE: python_3_11_slim
deploy:
kustomize:
@@ -320,8 +320,8 @@ profiles:
docker:
dockerfile: gateway/Dockerfile
buildArgs:
BASE_REGISTRY: docker.io
PYTHON_IMAGE: "python:3.11-slim"
BASE_REGISTRY: gitea-http.gitea.svc.cluster.local:3000/bakery-admin
PYTHON_IMAGE: "python_3_11_slim"
# Frontend
- image: bakery/dashboard
@@ -329,22 +329,22 @@ profiles:
docker:
dockerfile: Dockerfile.kubernetes
# Microservices - Production base images (docker.io/python:3.11-slim)
# Microservices - Production base images (gitea-http.gitea.svc.cluster.local:3000/bakery-admin/python_3.11-slim)
- image: bakery/auth-service
context: .
docker:
dockerfile: services/auth/Dockerfile
buildArgs:
BASE_REGISTRY: docker.io
PYTHON_IMAGE: "python:3.11-slim"
BASE_REGISTRY: gitea-http.gitea.svc.cluster.local:3000/bakery-admin
PYTHON_IMAGE: "python_3_11_slim"
- image: bakery/tenant-service
context: .
docker:
dockerfile: services/tenant/Dockerfile
buildArgs:
BASE_REGISTRY: docker.io
PYTHON_IMAGE: "python:3.11-slim"
BASE_REGISTRY: gitea-http.gitea.svc.cluster.local:3000/bakery-admin
PYTHON_IMAGE: "python_3_11_slim"
- image: bakery/training-service
context: .