Add new infra architecture 11

This commit is contained in:
Urtzi Alfaro
2026-01-20 22:05:10 +01:00
parent 0217ad83be
commit 2512de4173
42 changed files with 1056 additions and 874 deletions

680
Tiltfile
View File

@@ -22,14 +22,10 @@
# Docker registry configuration
# Set USE_DOCKERHUB=true environment variable to push images to Docker Hub
# Set USE_GITEA_REGISTRY=true environment variable to push images to Gitea registry
# Otherwise, uses local registry for faster builds and deployments
use_dockerhub = False # Default to False
use_gitea_registry = True # Default to False - Gitea registry not working currently
# Otherwise, uses local kind registry for faster builds and deployments
use_dockerhub = False # Use local kind registry by default
if 'USE_DOCKERHUB' in os.environ:
use_dockerhub = os.environ['USE_DOCKERHUB'].lower() == 'true'
if 'USE_GITEA_REGISTRY' in os.environ:
use_gitea_registry = os.environ['USE_GITEA_REGISTRY'].lower() == 'true'
dockerhub_username = 'uals' # Default username
if 'DOCKERHUB_USERNAME' in os.environ:
@@ -37,8 +33,8 @@ if 'DOCKERHUB_USERNAME' in os.environ:
# Base image registry configuration for Dockerfile ARGs
# This controls where the base Python image is pulled from during builds
base_registry = 'localhost:5000' # Default for local dev
python_image = 'python_3.11-slim' # Local registry uses underscores
base_registry = 'localhost:5000' # Default for local dev (kind registry)
python_image = 'python_3_11_slim' # Local registry uses underscores (matches prepull naming)
if 'BASE_REGISTRY' in os.environ:
base_registry = os.environ['BASE_REGISTRY']
@@ -50,125 +46,48 @@ if use_dockerhub:
base_registry = 'docker.io'
python_image = 'python:3.11-slim'
# For Gitea registry mode
# Gitea registry is accessed via the registry subdomain (TLS terminated at ingress)
# However, for internal cluster builds (like Kaniko), we need to use the internal service name
if use_gitea_registry:
# For external access (ingress): registry.bakery-ia.local
# For internal cluster access: gitea-http.gitea.svc.cluster.local:3000
base_registry = 'gitea-http.gitea.svc.cluster.local:3000'
python_image = 'python:3.11-slim'
# Add fallback to local registry if Gitea registry is not available
fallback_registry = 'localhost:5001'
# =============================================================================
# PREPULL BASE IMAGES STEP - CRITICAL FIRST STEP
# PREPULL BASE IMAGES - RUNS AFTER SECURITY SETUP
# =============================================================================
# Dependency order: apply-k8s-manifests -> security-setup -> ingress-status-check
# -> kind-cluster-configuration -> prepull-base-images
# Run the prepull script - if this fails, don't continue
# When using Gitea registry, make sure Gitea is available first
if use_gitea_registry:
local_resource(
'prepull-base-images',
cmd='''#!/usr/bin/env bash
echo "=========================================="
echo "PREPULLING BASE IMAGES - CRITICAL STEP"
echo "Using Gitea Registry Mode"
echo "=========================================="
# Prepull runs AFTER security setup to ensure registry is available
local_resource(
'prepull-base-images',
cmd='''#!/usr/bin/env bash
echo "=========================================="
echo "STARTING PRE PULL WITH PROPER DEPENDENCIES"
echo "=========================================="
echo ""
# Export environment variables for the prepull script
export USE_GITEA_REGISTRY=false
export USE_LOCAL_REGISTRY=true
# Run the prepull script
if ./scripts/prepull-base-images.sh; then
echo ""
# Export environment variables for the prepull script
export USE_GITEA_REGISTRY=true
export USE_LOCAL_REGISTRY=false
# Wait for Gitea registry to be accessible
echo "Waiting for Gitea registry to be accessible..."
echo "Registry URL: registry.bakery-ia.local (via ingress)"
MAX_RETRIES=30
RETRY_COUNT=0
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
# Try HTTPS via ingress (registry.bakery-ia.local routes to gitea-http:3000)
if curl -sk https://registry.bakery-ia.local/v2/ >/dev/null 2>&1; then
echo "✓ Gitea registry is accessible via HTTPS"
break
fi
# Also try directly via Gitea HTTP service within cluster
if curl -s http://gitea-http.gitea.svc.cluster.local:3000/v2/ >/dev/null 2>&1; then
echo "✓ Gitea registry is accessible via internal service"
break
fi
echo " Waiting for Gitea registry... (attempt $((RETRY_COUNT+1))/$MAX_RETRIES)"
sleep 10
RETRY_COUNT=$((RETRY_COUNT+1))
done
if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then
echo "⚠ Warning: Gitea registry not accessible after $MAX_RETRIES attempts"
echo " Falling back to local registry"
export USE_GITEA_REGISTRY=false
export USE_LOCAL_REGISTRY=true
fi
# Run the prepull script
if ./scripts/prepull-base-images.sh; then
echo ""
echo "✓ Base images prepull completed successfully"
echo "=========================================="
echo "CONTINUING WITH TILT SETUP..."
echo "=========================================="
exit 0
else
echo ""
echo "❌ Base images prepull FAILED - stopping Tilt execution"
echo "This usually happens due to Docker Hub rate limits"
echo "Please try again later or configure Docker Hub credentials"
echo "=========================================="
# Exit with error code to prevent further execution
exit 1
fi
''',
resource_deps=['gitea'], # Depend on Gitea when using Gitea registry
labels=['00-prepull'],
auto_init=True,
allow_parallel=False
)
else:
local_resource(
'prepull-base-images',
cmd='''#!/usr/bin/env bash
echo "✓ Base images prepull completed successfully"
echo "=========================================="
echo "PREPULLING BASE IMAGES - CRITICAL STEP"
echo "Using Local Registry Mode"
echo "CONTINUING WITH TILT SETUP..."
echo "=========================================="
exit 0
else
echo ""
# Export environment variables for the prepull script
export USE_GITEA_REGISTRY=false
export USE_LOCAL_REGISTRY=true
# Run the prepull script
if ./scripts/prepull-base-images.sh; then
echo ""
echo "✓ Base images prepull completed successfully"
echo "=========================================="
echo "CONTINUING WITH TILT SETUP..."
echo "=========================================="
exit 0
else
echo ""
echo "❌ Base images prepull FAILED - stopping Tilt execution"
echo "This usually happens due to Docker Hub rate limits"
echo "Please try again later or configure Docker Hub credentials"
echo "=========================================="
# Exit with error code to prevent further execution
exit 1
fi
''',
labels=['00-prepull'],
auto_init=True,
allow_parallel=False
)
echo "⚠ Base images prepull had issues"
echo "This may affect image availability for services"
echo "=========================================="
# Continue execution - images are still available locally
exit 0
fi
''',
resource_deps=['kind-cluster-configuration'], # Runs AFTER kind cluster configuration
labels=['00-prepull'],
auto_init=True,
allow_parallel=False
)
# =============================================================================
@@ -298,35 +217,23 @@ if use_dockerhub:
To disable: unset USE_DOCKERHUB or set USE_DOCKERHUB=false
""" % (dockerhub_username, base_registry, python_image))
default_registry('docker.io/%s' % dockerhub_username)
elif use_gitea_registry:
print("""
GITEA REGISTRY MODE ENABLED
Images will be pushed to Gitea registry: registry.bakery-ia.local
Base images will be pulled from internal cluster registry: %s/%s
Make sure Gitea is running and accessible
To disable: unset USE_GITEA_REGISTRY or set USE_GITEA_REGISTRY=false
To use Docker Hub: export USE_DOCKERHUB=true
""" % (base_registry, python_image))
default_registry('registry.bakery-ia.local')
else:
print("""
LOCAL REGISTRY MODE
Using local registry for faster builds: localhost:5001
LOCAL REGISTRY MODE (KIND)
Using local kind registry for faster builds: localhost:5000
Base images will be pulled from: %s/%s
This registry is created by kubernetes_restart.sh script
To use Docker Hub: export USE_DOCKERHUB=true
To use Gitea registry: export USE_GITEA_REGISTRY=true
To change base registry: export BASE_REGISTRY=<registry-url>
To change Python image: export PYTHON_IMAGE=<image:tag>
Note: When using Gitea registry, base images use internal cluster registry: gitea-http.gitea.svc.cluster.local:3000
""" % (base_registry, python_image))
default_registry('localhost:5001')
default_registry('localhost:5000')
# =============================================================================
# INGRESS HEALTH CHECK
# =============================================================================
# Check ingress status and readiness
# Check ingress status and readiness with improved logic
local_resource(
'ingress-status-check',
cmd='''
@@ -343,96 +250,31 @@ local_resource(
echo "INGRESS CONTROLLER STATUS:"
kubectl get pods -n ingress-nginx -l app.kubernetes.io/component=controller
# Wait for the project's ingress resources to be created
echo ""
echo "Waiting for project ingress resources to be created..."
# Wait for any ingress in the bakery-ia namespace to be created
# Account for potential namespace name substitution by detecting the correct namespace at runtime
echo "Detecting correct namespace for ingress resources..."
# The namespace name might have been substituted during kustomize processing
# Look for ingress resources in any namespace that could be ours
COUNT=0
MAX_COUNT=24 # 2 minutes with 5-second intervals
while [ $COUNT -lt $MAX_COUNT ]; do
# Look for ingress resources in any namespace
FOUND_INGRESS_NS=$(kubectl get ingress --all-namespaces --no-headers 2>/dev/null | grep -v "ingress-nginx" | head -1 | awk '{print $1}')
if [ -n "$FOUND_INGRESS_NS" ]; then
NAMESPACE="$FOUND_INGRESS_NS"
echo "Found ingress resources in namespace: $NAMESPACE"
break
fi
echo "Waiting for ingress resources to be created in any namespace..."
sleep 5
COUNT=$((COUNT + 1))
done
if [ $COUNT -eq $MAX_COUNT ]; then
echo "Warning: No ingress resources found after timeout."
echo "Listing all namespaces to help diagnose:"
kubectl get namespaces
echo "Listing all ingress resources:"
kubectl get ingress --all-namespaces
# Fallback to bakery-ia namespace
NAMESPACE="bakery-ia"
# Quick check: verify ingress controller is running
echo "Quick check: verifying ingress controller is running..."
if kubectl get pods -n ingress-nginx -l app.kubernetes.io/component=controller | grep -q "Running"; then
echo "✓ Ingress controller is running"
else
echo "Using detected namespace: $NAMESPACE"
echo "⚠ Ingress controller may not be running properly"
fi
# Now wait for ingress resources in the detected namespace
COUNT=0
MAX_COUNT=24 # 2 minutes with 5-second intervals
while [ $COUNT -lt $MAX_COUNT ]; do
# Check if namespace exists before querying ingress
if kubectl get namespace "$NAMESPACE" &>/dev/null; then
INGRESS_COUNT=$(kubectl get ingress -n "$NAMESPACE" --no-headers 2>/dev/null | wc -l)
if [ "$INGRESS_COUNT" -gt 0 ]; then
echo "Ingress resources found in $NAMESPACE namespace."
break
fi
fi
echo "Waiting for ingress resources in $NAMESPACE namespace to be created..."
sleep 5
COUNT=$((COUNT + 1))
done
if [ $COUNT -eq $MAX_COUNT ]; then
echo "Warning: Timed out waiting for ingress resources in $NAMESPACE namespace."
echo "Listing all namespaces to help diagnose:"
kubectl get namespaces
echo "Listing all ingress resources:"
kubectl get ingress --all-namespaces
fi
# Brief pause to allow any pending ingress resources to be processed
sleep 2
# Wait for ingress to have address assigned (be more flexible about the name)
echo "Waiting for ingress to have address assigned..."
# Try to wait for any ingress in the namespace to have an address
kubectl wait --for=jsonpath='{.status.loadBalancer.ingress[0].ip}' ingress --all -n "$NAMESPACE" --timeout=30s 2>/dev/null || echo "Ingress may not have external IP (this is OK in Kind)"
# Check ingress resources
# Check ingress resources (just report status, don't wait)
echo ""
echo "INGRESS RESOURCES:"
kubectl get ingress -A
# Check specific ingress for our namespace
echo ""
echo "BAKERY-IA INGRESS DETAILS:"
kubectl get ingress -n "$NAMESPACE" -o wide
kubectl get ingress -A 2>/dev/null || echo "No ingress resources found yet"
# Check ingress load balancer status
echo ""
echo "INGRESS LOAD BALANCER STATUS:"
kubectl get svc -n ingress-nginx ingress-nginx-controller -o wide
# Wait a bit for ingress to fully initialize
sleep 10
kubectl get svc -n ingress-nginx ingress-nginx-controller -o wide 2>/dev/null || echo "Ingress controller service not found"
# Verify ingress endpoints
echo ""
echo "INGRESS ENDPOINTS:"
kubectl get endpoints -n ingress-nginx
kubectl get endpoints -n ingress-nginx 2>/dev/null || echo "Ingress endpoints not found"
# Test connectivity to the ingress endpoints
echo ""
@@ -441,12 +283,15 @@ local_resource(
kubectl exec -n ingress-nginx deployment/ingress-nginx-controller --container controller -- \
/nginx-ingress-controller --version > /dev/null 2>&1 && echo "✓ Ingress controller accessible"
# In Kind clusters, ingresses typically don't get external IPs, so we just verify they exist
echo "In Kind clusters, ingresses don't typically get external IPs - this is expected behavior"
echo ""
echo "Ingress status check completed successfully!"
echo "Project ingress resources are ready for Gitea and other services."
echo "=========================================="
''',
resource_deps=['apply-k8s-manifests'], # Step 2 depends on Step 1
resource_deps=['security-setup'], # According to requested order: security-setup -> ingress-status-check
labels=['00-ingress-check'],
auto_init=True,
allow_parallel=False
@@ -478,18 +323,17 @@ Applying security configurations...
""")
# Apply security configurations before loading main manifests
# Security setup always depends on prepull-base-images to ensure images are cached
# When using Gitea registry, the dependency chain is:
# ingress-status-check -> gitea -> prepull-base-images -> security-setup
# When using local registry, the dependency chain is:
# prepull-base-images -> security-setup
security_resource_deps = ['prepull-base-images'] # Always depend on prepull
# Apply security configurations after applying manifests
# According to requested order: apply-k8s-manifests -> security-setup
security_resource_deps = ['apply-k8s-manifests'] # Depend on manifests first
local_resource(
'security-setup',
cmd='''
echo "Applying security secrets and configurations..."
echo "=========================================="
echo "APPLYING SECRETS AND TLS CERTIFICATIONS"
echo "=========================================="
echo "Setting up security configurations..."
# First, ensure all required namespaces exist
echo "Creating namespaces..."
@@ -507,10 +351,12 @@ local_resource(
done
# Apply common secrets and configs
echo "Applying common configurations..."
kubectl apply -f infrastructure/environments/common/configs/configmap.yaml
kubectl apply -f infrastructure/environments/common/configs/secrets.yaml
# Apply database secrets and configs
echo "Applying database security configurations..."
kubectl apply -f infrastructure/platform/storage/postgres/secrets/postgres-tls-secret.yaml
kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-init-config.yaml
kubectl apply -f infrastructure/platform/storage/postgres/configs/postgres-logging-config.yaml
@@ -525,32 +371,145 @@ local_resource(
# Apply Mail/SMTP secrets (already included in common/configs/secrets.yaml)
# Apply CI/CD secrets
kubectl apply -f infrastructure/cicd/tekton-helm/templates/secrets.yaml
# Note: infrastructure/cicd/tekton-helm/templates/secrets.yaml is a Helm template file
# and should be applied via the Helm chart deployment, not directly with kubectl
echo "Skipping infrastructure/cicd/tekton-helm/templates/secrets.yaml (Helm template file)"
echo "This file will be applied when the Tekton Helm chart is deployed"
echo "Security configurations applied"
# Apply self-signed ClusterIssuer for cert-manager (required before certificates)
echo "Applying self-signed ClusterIssuer..."
kubectl apply -f infrastructure/platform/cert-manager/selfsigned-issuer.yaml
# Wait for ClusterIssuer to be ready
echo "Waiting for ClusterIssuer to be ready..."
kubectl wait --for=condition=Ready clusterissuer/selfsigned-issuer --timeout=60s || echo "ClusterIssuer may still be provisioning..."
# Apply TLS certificates for ingress
echo "Applying TLS certificates for ingress..."
kubectl apply -f infrastructure/environments/dev/k8s-manifests/dev-certificate.yaml
# Wait for cert-manager to create the certificate
echo "Waiting for TLS certificate to be ready..."
kubectl wait --for=condition=Ready certificate/bakery-dev-tls-cert -n bakery-ia --timeout=120s || echo "Certificate may still be provisioning..."
# Verify TLS certificates are created
echo "Verifying TLS certificates..."
if kubectl get secret bakery-dev-tls-cert -n bakery-ia &>/dev/null; then
echo "✓ TLS certificate 'bakery-dev-tls-cert' found in bakery-ia namespace"
else
echo "⚠ TLS certificate 'bakery-dev-tls-cert' not found, may still be provisioning"
fi
# Verify other secrets are created
echo "Verifying security secrets..."
for secret in gitea-admin-secret; do
if kubectl get secret $secret -n gitea &>/dev/null; then
echo "✓ Secret '$secret' found in gitea namespace"
else
echo " Secret '$secret' not found in gitea namespace (will be created when Gitea is deployed)"
fi
done
echo ""
echo "Security configurations applied successfully!"
echo "TLS certificates and secrets are ready for use."
echo "=========================================="
''',
resource_deps=security_resource_deps, # Conditional dependency based on registry usage
labels=['00-security'],
auto_init=True
)
# Kind cluster configuration for registry access
local_resource(
'kind-cluster-configuration',
cmd='''
echo "=========================================="
echo "CONFIGURING KIND CLUSTER FOR REGISTRY ACCESS"
echo "=========================================="
echo "Setting up localhost:5000 access in Kind cluster..."
echo ""
# Wait for the TLS certificate to be available
echo "Waiting for TLS certificate to be ready..."
MAX_RETRIES=30
RETRY_COUNT=0
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
if kubectl get secret bakery-dev-tls-cert -n bakery-ia &>/dev/null; then
echo "TLS certificate is ready"
break
fi
echo " Waiting for TLS certificate... (attempt $((RETRY_COUNT+1))/$MAX_RETRIES)"
sleep 5
RETRY_COUNT=$((RETRY_COUNT+1))
done
if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then
echo "⚠ Warning: TLS certificate not ready after $MAX_RETRIES attempts"
echo " Proceeding with configuration anyway..."
fi
# Add localhost:5000 registry configuration to containerd
echo "Configuring containerd to access localhost:5000 registry..."
# Create the hosts.toml file for containerd to access localhost:5000 registry
if docker exec bakery-ia-local-control-plane sh -c 'cat > /etc/containerd/certs.d/localhost:5000/hosts.toml << EOF
server = "http://localhost:5000"
[host."http://kind-registry:5000"]
capabilities = ["pull", "resolve", "push"]
skip_verify = true
EOF'; then
echo "✓ Successfully created hosts.toml for localhost:5000 registry access"
else
echo "⚠ Failed to create hosts.toml for containerd"
echo " This may be because the Kind container is not running yet"
echo " The kubernetes_restart.sh script should handle this configuration"
fi
# Create the hosts.toml file for kind-registry:5000 (used by migration jobs)
if docker exec bakery-ia-local-control-plane sh -c 'cat > /etc/containerd/certs.d/kind-registry:5000/hosts.toml << EOF
server = "http://kind-registry:5000"
[host."http://kind-registry:5000"]
capabilities = ["pull", "resolve", "push"]
skip_verify = true
EOF'; then
echo "✓ Successfully created hosts.toml for kind-registry:5000 access"
else
echo "⚠ Failed to create hosts.toml for kind-registry:5000"
echo " This may be because the Kind container is not running yet"
echo " The kubernetes_restart.sh script should handle this configuration"
fi
echo ""
echo "Kind cluster configuration completed!"
echo "Registry access should now be properly configured."
echo "=========================================="
''',
resource_deps=['ingress-status-check'], # According to requested order: ingress-status-check -> kind-cluster-configuration
labels=['00-kind-config'],
auto_init=True,
allow_parallel=False
)
# Verify TLS certificates are mounted correctly
# =============================================================================
# LOAD KUBERNETES MANIFESTS
# EXECUTE OVERLAYS KUSTOMIZATIONS
# =============================================================================
# Load the main kustomize overlay for the dev environment
# Execute the main kustomize overlay for the dev environment with proper dependencies
k8s_yaml(kustomize('infrastructure/environments/dev/k8s-manifests'))
# Create a visible resource for applying Kubernetes manifests
# Create a visible resource for applying Kubernetes manifests with proper dependencies
local_resource(
'apply-k8s-manifests',
cmd='''
echo "=========================================="
echo "APPLYING KUBERNETES MANIFESTS"
echo "EXECUTING OVERLAYS KUSTOMIZATIONS"
echo "=========================================="
echo "Loading all Kubernetes resources including ingress configuration..."
echo ""
@@ -561,7 +520,7 @@ local_resource(
echo "- Security configurations"
echo "- CI/CD configurations"
echo ""
echo "Kubernetes manifests applied successfully!"
echo "Overlays kustomizations executed successfully!"
echo "=========================================="
''',
labels=['00-k8s-manifests'],
@@ -793,7 +752,7 @@ local_resource(
# Install Unbound with appropriate values
if [ "$ENVIRONMENT" = "dev" ]; then
helm upgrade --install unbound infrastructure/platform/infrastructure/unbound-helm \
helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \
-n bakery-ia \
--create-namespace \
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
@@ -949,7 +908,15 @@ local_resource(
fi
# =====================================================
# Step 5: Wait for pods and show status
# Step 5: Apply Mailu Ingress
# =====================================================
echo ""
echo "Applying Mailu ingress configuration..."
kubectl apply -f infrastructure/platform/mail/mailu-helm/mailu-ingress.yaml
echo "Mailu ingress applied for mail.bakery-ia.dev"
# =====================================================
# Step 6: Wait for pods and show status
# =====================================================
echo ""
echo "Waiting for Mailu pods to be ready..."
@@ -1092,19 +1059,62 @@ local_resource(
echo "Deploying Flux CD GitOps Toolkit..."
echo ""
# Check if Flux CLI is installed, install if missing
if ! command -v flux &> /dev/null; then
echo "Flux CLI not found, installing..."
# Determine OS and architecture
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
ARCH=$(uname -m | tr '[:upper:]' '[:lower:]')
# Convert architecture format
if [[ "$ARCH" == "x86_64" ]]; then
ARCH="amd64"
elif [[ "$ARCH" == "aarch64" ]]; then
ARCH="arm64"
fi
# Download and install Flux CLI to user's local bin
echo "Detected OS: $OS, Architecture: $ARCH"
FLUX_VERSION="2.7.5"
DOWNLOAD_URL="https://github.com/fluxcd/flux2/releases/download/v${FLUX_VERSION}/flux_${FLUX_VERSION}_${OS}_${ARCH}.tar.gz"
echo "Downloading Flux CLI from: $DOWNLOAD_URL"
mkdir -p ~/.local/bin
cd /tmp
curl -sL "$DOWNLOAD_URL" -o flux.tar.gz
tar xzf flux.tar.gz
chmod +x flux
mv flux ~/.local/bin/
# Add to PATH if not already there
export PATH="$HOME/.local/bin:$PATH"
# Verify installation
if command -v flux &> /dev/null; then
echo "Flux CLI installed successfully"
else
echo "ERROR: Failed to install Flux CLI"
exit 1
fi
else
echo "Flux CLI is already installed"
fi
# Check if Flux CRDs are installed, install if missing
if ! kubectl get crd gitrepositories.source.toolkit.fluxcd.io >/dev/null 2>&1; then
echo "Installing Flux CRDs..."
flux install --namespace=flux-system --network-policy=false
else
echo "Flux CRDs are already installed"
fi
# Check if Flux is already deployed
if helm list -n flux-system | grep -q flux-cd; then
echo "Flux CD already deployed, checking status..."
helm status flux-cd -n flux-system
else
echo "Installing Flux CD..."
# Install Flux CRDs first if not already installed
if ! kubectl get crd gitrepositories.source.toolkit.fluxcd.io >/dev/null 2>&1; then
echo "Installing Flux CRDs..."
curl -sL https://fluxcd.io/install.sh | sudo bash
flux install --namespace=flux-system --network-policy=false
fi
echo "Installing Flux CD Helm release..."
# Create the namespace if it doesn't exist
kubectl create namespace flux-system --dry-run=client -o yaml | kubectl apply -f -
@@ -1311,18 +1321,33 @@ local_resource(
echo "Setting up Tekton Pipelines for CI/CD using Helm..."
echo ""
# Check if Tekton CRDs are already installed
# Check if Tekton Pipelines CRDs are already installed
if kubectl get crd pipelines.tekton.dev >/dev/null 2>&1; then
echo " Tekton CRDs already installed"
echo " Tekton Pipelines CRDs already installed"
else
echo " Installing Tekton v0.57.0..."
echo " Installing Tekton Pipelines..."
kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
echo " Waiting for Tekton to be ready..."
echo " Waiting for Tekton Pipelines to be ready..."
kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-controller -n tekton-pipelines
kubectl wait --for=condition=available --timeout=180s deployment/tekton-pipelines-webhook -n tekton-pipelines
echo " Tekton installed and ready"
echo " Tekton Pipelines installed and ready"
fi
# Check if Tekton Triggers CRDs are already installed
if kubectl get crd eventlisteners.triggers.tekton.dev >/dev/null 2>&1; then
echo " Tekton Triggers CRDs already installed"
else
echo " Installing Tekton Triggers..."
kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml
kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/latest/interceptors.yaml
echo " Waiting for Tekton Triggers to be ready..."
kubectl wait --for=condition=available --timeout=180s deployment/tekton-triggers-controller -n tekton-pipelines
kubectl wait --for=condition=available --timeout=180s deployment/tekton-triggers-webhook -n tekton-pipelines
echo " Tekton Triggers installed and ready"
fi
echo ""
@@ -1356,161 +1381,28 @@ local_resource(
auto_init=False, # Manual trigger only
)
# Gitea - Auto-install when Gitea registry is enabled
gitea_enabled = use_gitea_registry # Enable Gitea when using Gitea registry
if 'ENABLE_GITEA' in os.environ:
gitea_enabled = os.environ['ENABLE_GITEA'].lower() == 'true'
# Gitea - Simple Helm installation for dev environment
local_resource(
'gitea',
cmd='''
echo "Installing Gitea via Helm..."
if gitea_enabled:
local_resource(
'gitea',
cmd='''
echo "Setting up Gitea for local Git server and container registry..."
echo ""
# Create namespace
kubectl create namespace gitea --dry-run=client -o yaml | kubectl apply -f -
# Wait for ingress controller to be ready before proceeding
echo "Waiting for ingress controller to be ready..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=controller -n ingress-nginx --timeout=300s
# Install Gitea using Helm
helm repo add gitea https://dl.gitea.io/charts 2>/dev/null || true
helm repo update gitea
helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml --wait
# Verify ingress resources are properly configured
echo "Verifying ingress configuration..."
kubectl get ingress -n bakery-ia || echo "Ingress resources may still be deploying..."
# Small delay to ensure ingress is fully operational
sleep 10
# Create namespace
kubectl create namespace gitea || true
# Create admin secret first
chmod +x infrastructure/cicd/gitea/setup-admin-secret.sh
./infrastructure/cicd/gitea/setup-admin-secret.sh
# Install Gitea using Helm
helm repo add gitea https://dl.gitea.io/charts || true
helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml
# Wait for Gitea to be ready before proceeding
echo "Waiting for Gitea to be ready..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=gitea -n gitea --timeout=300s
# Check if admin user already exists by attempting to get user list
echo "Checking if admin user already exists..."
ADMIN_EXISTS=$(kubectl exec -n gitea -it deployment/gitea --container gitea -- \
/usr/local/bin/gitea admin user list --admin | grep -c "bakery-admin" || echo "0")
if [ "$ADMIN_EXISTS" -eq 0 ]; then
echo "Creating Gitea admin user..."
# Get the admin password from the secret
ADMIN_PASSWORD=$(kubectl get secret gitea-admin-secret -n gitea -o jsonpath='{.data.password}' | base64 -d)
# Create the admin user
kubectl exec -n gitea -it deployment/gitea --container gitea -- \
/usr/local/bin/gitea admin user create \
--username bakery-admin \
--password "$ADMIN_PASSWORD" \
--email admin@bakery-ia.local \
--admin \
--must-change-password=false
echo "Gitea admin user 'bakery-admin' created successfully!"
else
echo "Gitea admin user 'bakery-admin' already exists."
fi
echo ""
echo "Gitea setup complete!"
echo "Access Gitea at: https://gitea.bakery-ia.local (for dev) or https://gitea.bakewise.ai (for prod)"
echo "Registry URL: https://registry.bakery-ia.local or gitea.bakery-ia.local:5000"
echo "Make sure to add the appropriate hostname to /etc/hosts or configure DNS"
echo "Check status: kubectl get pods -n gitea"
echo "To uninstall: helm uninstall gitea -n gitea"
''',
resource_deps=['ingress-status-check'], # Depend on ingress check to ensure routing is ready
labels=['99-cicd'],
auto_init=True, # Auto-install when enabled
allow_parallel=False
)
else:
# Manual trigger option for when Gitea registry is not enabled but user wants Gitea
local_resource(
'gitea',
cmd='''
echo "Setting up Gitea for local Git server and container registry..."
echo ""
# Wait for ingress controller to be ready before proceeding
echo "Waiting for ingress controller to be ready..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=controller -n ingress-nginx --timeout=300s
# Verify ingress resources are properly configured
echo "Verifying ingress configuration..."
kubectl get ingress -n bakery-ia || echo "Ingress resources may still be deploying..."
# Small delay to ensure ingress is fully operational
sleep 10
# Create namespace
kubectl create namespace gitea || true
# Create admin secret first
chmod +x infrastructure/cicd/gitea/setup-admin-secret.sh
./infrastructure/cicd/gitea/setup-admin-secret.sh
# Install Gitea using Helm
helm repo add gitea https://dl.gitea.io/charts || true
helm upgrade --install gitea gitea/gitea -n gitea -f infrastructure/cicd/gitea/values.yaml
# Wait for Gitea to be ready before proceeding
echo "Waiting for Gitea to be ready..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=gitea -n gitea --timeout=300s
# Check if admin user already exists by attempting to get user list
echo "Checking if admin user already exists..."
ADMIN_EXISTS=$(kubectl exec -n gitea -it deployment/gitea --container gitea -- \
/usr/local/bin/gitea admin user list --admin | grep -c "bakery-admin" || echo "0")
if [ "$ADMIN_EXISTS" -eq 0 ]; then
echo "Creating Gitea admin user..."
# Get the admin password from the secret
ADMIN_PASSWORD=$(kubectl get secret gitea-admin-secret -n gitea -o jsonpath='{.data.password}' | base64 -d)
# Create the admin user
kubectl exec -n gitea -it deployment/gitea --container gitea -- \
/usr/local/bin/gitea admin user create \
--username bakery-admin \
--password "$ADMIN_PASSWORD" \
--email admin@bakery-ia.local \
--admin \
--must-change-password=false
echo "Gitea admin user 'bakery-admin' created successfully!"
else
echo "Gitea admin user 'bakery-admin' already exists."
fi
echo ""
echo "Gitea setup complete!"
echo "Access Gitea at: https://gitea.bakery-ia.local (for dev) or https://gitea.bakewise.ai (for prod)"
echo "Registry URL: https://registry.bakery-ia.local"
echo "Make sure to add the appropriate hostname to /etc/hosts or configure DNS"
echo "Check status: kubectl get pods -n gitea"
echo "To uninstall: helm uninstall gitea -n gitea"
# Sync registry credentials to bakery-ia namespace for pod image pulls
echo ""
echo "Syncing registry credentials to bakery-ia namespace..."
chmod +x infrastructure/cicd/gitea/sync-registry-secret.sh
./infrastructure/cicd/gitea/sync-registry-secret.sh
echo ""
echo "Registry secret synced! Pods in bakery-ia namespace can now pull from registry.bakery-ia.local"
''',
labels=['99-cicd'],
auto_init=False, # Manual trigger only
)
echo ""
echo "Gitea installed!"
echo "Access: https://gitea.bakery-ia.local"
echo "Status: kubectl get pods -n gitea"
''',
labels=['99-cicd'],
auto_init=False,
)
# =============================================================================