Add new infra architecture 6

This commit is contained in:
Urtzi Alfaro
2026-01-19 16:31:11 +01:00
parent b78399da2c
commit 7d6845574c
58 changed files with 2360 additions and 492 deletions

View File

@@ -3,6 +3,3 @@ kind: Kustomization
resources:
- gateway-service.yaml
- nominatim/nominatim.yaml
- nominatim/nominatim-init-job.yaml
- unbound/unbound.yaml

View File

@@ -1,158 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nominatim-config
namespace: bakery-ia
labels:
app.kubernetes.io/name: nominatim
app.kubernetes.io/component: geocoding
data:
NOMINATIM_PBF_URL: "http://download.geofabrik.de/europe/spain-latest.osm.pbf"
NOMINATIM_REPLICATION_URL: "https://download.geofabrik.de/europe/spain-updates"
NOMINATIM_IMPORT_STYLE: "address"
NOMINATIM_THREADS: "4"
NOMINATIM_FLATNODE_FILE: "/nominatim-flatnode/flatnode.bin"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nominatim-data
namespace: bakery-ia
labels:
app.kubernetes.io/name: nominatim
app.kubernetes.io/component: geocoding
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nominatim-flatnode
namespace: bakery-ia
labels:
app.kubernetes.io/name: nominatim
app.kubernetes.io/component: geocoding
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nominatim
namespace: bakery-ia
labels:
app.kubernetes.io/name: nominatim
app.kubernetes.io/component: geocoding
app.kubernetes.io/part-of: bakery-ia
spec:
serviceName: nominatim-service
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: nominatim
app.kubernetes.io/component: geocoding
template:
metadata:
labels:
app.kubernetes.io/name: nominatim
app.kubernetes.io/component: geocoding
spec:
containers:
- name: nominatim
image: mediagis/nominatim:4.4
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: nominatim-data
mountPath: /var/lib/postgresql
- name: nominatim-flatnode
mountPath: /nominatim-flatnode
env:
- name: NOMINATIM_PBF_URL
valueFrom:
configMapKeyRef:
name: nominatim-config
key: NOMINATIM_PBF_URL
- name: NOMINATIM_REPLICATION_URL
valueFrom:
configMapKeyRef:
name: nominatim-config
key: NOMINATIM_REPLICATION_URL
- name: NOMINATIM_IMPORT_STYLE
valueFrom:
configMapKeyRef:
name: nominatim-config
key: NOMINATIM_IMPORT_STYLE
- name: NOMINATIM_THREADS
valueFrom:
configMapKeyRef:
name: nominatim-config
key: NOMINATIM_THREADS
- name: NOMINATIM_FLATNODE_FILE
valueFrom:
configMapKeyRef:
name: nominatim-config
key: NOMINATIM_FLATNODE_FILE
resources:
requests:
memory: "2Gi"
cpu: "1"
limits:
memory: "4Gi"
cpu: "2"
livenessProbe:
httpGet:
path: /status
port: 8080
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /status
port: 8080
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 5
volumes:
- name: nominatim-data
persistentVolumeClaim:
claimName: nominatim-data
- name: nominatim-flatnode
persistentVolumeClaim:
claimName: nominatim-flatnode
---
apiVersion: v1
kind: Service
metadata:
name: nominatim-service
namespace: bakery-ia
labels:
app.kubernetes.io/name: nominatim
app.kubernetes.io/component: geocoding
spec:
selector:
app.kubernetes.io/name: nominatim
app.kubernetes.io/component: geocoding
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: http
type: ClusterIP

View File

@@ -1,81 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: unbound-resolver
namespace: bakery-ia
labels:
app.kubernetes.io/name: unbound-resolver
app.kubernetes.io/component: dns
app.kubernetes.io/part-of: bakery-ia
spec:
replicas: 1 # Scale to 2+ in production with anti-affinity
selector:
matchLabels:
app.kubernetes.io/name: unbound-resolver
app.kubernetes.io/component: dns
template:
metadata:
labels:
app.kubernetes.io/name: unbound-resolver
app.kubernetes.io/component: dns
spec:
containers:
- name: unbound
image: mvance/unbound:latest
ports:
- containerPort: 53
name: dns-udp
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "300m"
memory: "384Mi"
readinessProbe:
exec:
command:
- sh
- -c
- drill @127.0.0.1 -p 53 +dnssec example.org || nslookup -type=A example.org 127.0.0.1
initialDelaySeconds: 10
periodSeconds: 30
livenessProbe:
exec:
command:
- sh
- -c
- drill @127.0.0.1 -p 53 +dnssec example.org || nslookup -type=A example.org 127.0.0.1
initialDelaySeconds: 30
periodSeconds: 60
securityContext:
capabilities:
add: ["NET_BIND_SERVICE"]
---
apiVersion: v1
kind: Service
metadata:
name: unbound-dns
namespace: bakery-ia
labels:
app.kubernetes.io/name: unbound-resolver
app.kubernetes.io/component: dns
spec:
type: ClusterIP
ports:
- name: dns-udp
port: 53
targetPort: 53
protocol: UDP
- name: dns-tcp
port: 53
targetPort: 53
protocol: TCP
selector:
app.kubernetes.io/name: unbound-resolver
app.kubernetes.io/component: dns

View File

@@ -0,0 +1,38 @@
# CoreDNS ConfigMap patch to forward external DNS queries to Unbound for DNSSEC validation
# This is required for Mailu Admin which requires DNSSEC-validating DNS resolver
#
# Apply with: kubectl apply -f coredns-unbound-patch.yaml
# Then restart CoreDNS: kubectl rollout restart deployment coredns -n kube-system
#
# Note: The Unbound service IP (10.104.127.213) may change when the cluster is recreated.
# The setup script will automatically update this based on the actual Unbound service IP.
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . UNBOUND_SERVICE_IP {
max_concurrent 1000
}
cache 30 {
disable success cluster.local
disable denial cluster.local
}
loop
reload
loadbalance
}

View File

@@ -0,0 +1,26 @@
# Self-signed TLS certificate secret for Mailu Front
# This is required by the Mailu Helm chart even when TLS is disabled (tls.flavor: notls)
# The Front pod mounts this secret for internal certificate handling
#
# For production, replace with proper certificates from cert-manager or Let's Encrypt
# This script generates a self-signed certificate valid for 365 days
#
# To regenerate manually:
# openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
# -keyout tls.key -out tls.crt \
# -subj "/CN=mail.bakery-ia.local/O=bakery-ia"
# kubectl create secret tls mailu-certificates \
# --cert=tls.crt --key=tls.key -n bakery-ia
apiVersion: v1
kind: Secret
metadata:
name: mailu-certificates
namespace: bakery-ia
labels:
app.kubernetes.io/name: mailu
app.kubernetes.io/component: certificates
type: kubernetes.io/tls
data:
# Placeholder - will be generated dynamically by the setup script
tls.crt: ""
tls.key: ""

View File

@@ -1,20 +1,23 @@
# Development-tuned Mailu configuration
global:
# Use the unbound service IP - will be replaced during deployment
custom_dns_servers: "unbound-dns.bakery-ia.svc.cluster.local" # Using service DNS name instead of IP
# Using Kubernetes cluster DNS for name resolution
# Unbound service is available at unbound-dns.bakery-ia.svc.cluster.local
custom_dns_servers: "10.96.0.10" # Kubernetes cluster DNS IP
# Redis configuration - use built-in Mailu Redis (no authentication needed)
externalRedis:
enabled: false
# Component-specific DNS configuration
# Admin uses Kubernetes DNS (ClusterFirst) to resolve internal services like Redis
# DNSSEC validation is handled at the application level by rspamd
admin:
dnsPolicy: "None"
dnsConfig:
nameservers:
- "unbound-dns.bakery-ia.svc.cluster.local" # Using service DNS name instead of IP
dnsPolicy: "ClusterFirst"
# RSPAMD needs Unbound for DNSSEC validation (DKIM/SPF/DMARC checks)
# Using ClusterFirst with search domains + Kubernetes DNS which can forward to Unbound
rspamd:
dnsPolicy: "None"
dnsConfig:
nameservers:
- "unbound-dns.bakery-ia.svc.cluster.local" # Using service DNS name instead of IP
dnsPolicy: "ClusterFirst"
# Domain configuration for dev
domain: "bakery-ia.local"
@@ -96,7 +99,7 @@ ingress:
# TLS flavor for dev (may use self-signed)
tls:
flavor: "cert"
flavor: "notls" # Disable TLS for development
# Welcome message (disabled in dev)
welcomeMessage:

View File

@@ -1,20 +1,20 @@
# Production-tuned Mailu configuration
global:
# Use the unbound service IP - will be replaced during deployment
custom_dns_servers: "unbound-dns.bakery-ia.svc.cluster.local" # Using service DNS name instead of IP
# Using Kubernetes cluster DNS for name resolution
custom_dns_servers: "10.96.0.10" # Kubernetes cluster DNS IP
# Component-specific DNS configuration
# Redis configuration - use built-in Mailu Redis (no authentication needed for internal)
externalRedis:
enabled: false
# DNS configuration for production
# Use Kubernetes DNS (ClusterFirst) which forwards to Unbound via CoreDNS
# This is configured automatically by the mailu-helm Tilt resource
admin:
dnsPolicy: "None"
dnsConfig:
nameservers:
- "unbound-dns.bakery-ia.svc.cluster.local" # Using service DNS name instead of IP
dnsPolicy: "ClusterFirst"
rspamd:
dnsPolicy: "None"
dnsConfig:
nameservers:
- "unbound-dns.bakery-ia.svc.cluster.local" # Using service DNS name instead of IP
dnsPolicy: "ClusterFirst"
# Domain configuration for production
domain: "bakewise.ai"

View File

@@ -0,0 +1,260 @@
#!/bin/bash
# =============================================================================
# Mailu Production Deployment Script
# =============================================================================
# This script automates the deployment of Mailu mail server for production.
# It handles:
# 1. Unbound DNS deployment (for DNSSEC validation)
# 2. CoreDNS configuration (forward to Unbound)
# 3. TLS certificate secret creation
# 4. Mailu Helm deployment
# 5. Admin user creation
#
# Usage:
# ./deploy-mailu-prod.sh [--domain DOMAIN] [--admin-password PASSWORD]
#
# Example:
# ./deploy-mailu-prod.sh --domain bakewise.ai --admin-password 'SecurePass123!'
# =============================================================================
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
DOMAIN="${DOMAIN:-bakewise.ai}"
ADMIN_PASSWORD="${ADMIN_PASSWORD:-}"
NAMESPACE="bakery-ia"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
MAILU_HELM_DIR="$(dirname "$SCRIPT_DIR")"
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--domain)
DOMAIN="$2"
shift 2
;;
--admin-password)
ADMIN_PASSWORD="$2"
shift 2
;;
--help)
echo "Usage: $0 [--domain DOMAIN] [--admin-password PASSWORD]"
echo ""
echo "Options:"
echo " --domain Domain for Mailu (default: bakewise.ai)"
echo " --admin-password Password for admin@DOMAIN user"
echo ""
exit 0
;;
*)
echo -e "${RED}Unknown option: $1${NC}"
exit 1
;;
esac
done
print_step() {
echo -e "\n${BLUE}==>${NC} ${GREEN}$1${NC}"
}
print_warning() {
echo -e "${YELLOW}WARNING:${NC} $1"
}
print_error() {
echo -e "${RED}ERROR:${NC} $1"
}
print_success() {
echo -e "${GREEN}${NC} $1"
}
# =============================================================================
# Step 0: Prerequisites Check
# =============================================================================
print_step "Step 0: Checking prerequisites..."
if ! command -v kubectl &> /dev/null; then
print_error "kubectl not found. Please install kubectl."
exit 1
fi
if ! command -v helm &> /dev/null; then
print_error "helm not found. Please install helm."
exit 1
fi
if ! kubectl get namespace "$NAMESPACE" &>/dev/null; then
print_warning "Namespace $NAMESPACE does not exist. Creating..."
kubectl create namespace "$NAMESPACE"
fi
print_success "Prerequisites check passed"
# =============================================================================
# Step 1: Deploy Unbound DNS Resolver
# =============================================================================
print_step "Step 1: Deploying Unbound DNS resolver..."
if kubectl get deployment unbound -n "$NAMESPACE" &>/dev/null; then
print_success "Unbound already deployed"
else
helm upgrade --install unbound "$MAILU_HELM_DIR/../../networking/dns/unbound-helm" \
-n "$NAMESPACE" \
-f "$MAILU_HELM_DIR/../../networking/dns/unbound-helm/values.yaml" \
-f "$MAILU_HELM_DIR/../../networking/dns/unbound-helm/prod/values.yaml" \
--timeout 5m \
--wait
print_success "Unbound deployed"
fi
# Wait for Unbound to be ready
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=unbound -n "$NAMESPACE" --timeout=120s
# Get Unbound service IP
UNBOUND_IP=$(kubectl get svc unbound-dns -n "$NAMESPACE" -o jsonpath='{.spec.clusterIP}')
echo "Unbound DNS service IP: $UNBOUND_IP"
# =============================================================================
# Step 2: Configure CoreDNS to Forward to Unbound
# =============================================================================
print_step "Step 2: Configuring CoreDNS for DNSSEC validation..."
# Check current CoreDNS forward configuration
CURRENT_FORWARD=$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' | grep -o 'forward \. [0-9.]*' | awk '{print $3}' || echo "")
if [ "$CURRENT_FORWARD" != "$UNBOUND_IP" ]; then
echo "Updating CoreDNS to forward to Unbound ($UNBOUND_IP)..."
kubectl patch configmap coredns -n kube-system --type merge -p "{
\"data\": {
\"Corefile\": \".:53 {\\n errors\\n health {\\n lameduck 5s\\n }\\n ready\\n kubernetes cluster.local in-addr.arpa ip6.arpa {\\n pods insecure\\n fallthrough in-addr.arpa ip6.arpa\\n ttl 30\\n }\\n prometheus :9153\\n forward . $UNBOUND_IP {\\n max_concurrent 1000\\n }\\n cache 30 {\\n disable success cluster.local\\n disable denial cluster.local\\n }\\n loop\\n reload\\n loadbalance\\n}\\n\"
}
}"
# Restart CoreDNS
kubectl rollout restart deployment coredns -n kube-system
kubectl rollout status deployment coredns -n kube-system --timeout=60s
print_success "CoreDNS configured to forward to Unbound"
else
print_success "CoreDNS already configured for Unbound"
fi
# =============================================================================
# Step 3: Create TLS Certificate Secret
# =============================================================================
print_step "Step 3: Creating TLS certificate secret..."
if kubectl get secret mailu-certificates -n "$NAMESPACE" &>/dev/null; then
print_success "TLS certificate secret already exists"
else
TEMP_DIR=$(mktemp -d)
cd "$TEMP_DIR"
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
-keyout tls.key -out tls.crt \
-subj "/CN=mail.$DOMAIN/O=$DOMAIN" 2>/dev/null
kubectl create secret tls mailu-certificates \
--cert=tls.crt \
--key=tls.key \
-n "$NAMESPACE"
rm -rf "$TEMP_DIR"
print_success "TLS certificate secret created"
fi
# =============================================================================
# Step 4: Deploy Mailu via Helm
# =============================================================================
print_step "Step 4: Deploying Mailu via Helm..."
# Add Mailu Helm repository
helm repo add mailu https://mailu.github.io/helm-charts 2>/dev/null || true
helm repo update mailu
# Deploy Mailu
helm upgrade --install mailu mailu/mailu \
-n "$NAMESPACE" \
-f "$MAILU_HELM_DIR/values.yaml" \
-f "$MAILU_HELM_DIR/prod/values.yaml" \
--timeout 10m
print_success "Mailu Helm release deployed"
# =============================================================================
# Step 5: Wait for Pods to be Ready
# =============================================================================
print_step "Step 5: Waiting for Mailu pods to be ready..."
echo "This may take 5-10 minutes (ClamAV takes time to initialize)..."
# Wait for admin pod first (it's the key dependency)
kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=admin -n "$NAMESPACE" --timeout=300s || {
print_error "Admin pod failed to start. Checking logs..."
kubectl logs -n "$NAMESPACE" -l app.kubernetes.io/component=admin --tail=50
exit 1
}
print_success "Admin pod is ready"
# Show pod status
echo ""
echo "Mailu Pod Status:"
kubectl get pods -n "$NAMESPACE" | grep mailu
# =============================================================================
# Step 6: Create Admin User
# =============================================================================
print_step "Step 6: Creating admin user..."
if [ -z "$ADMIN_PASSWORD" ]; then
# Generate a random password
ADMIN_PASSWORD=$(openssl rand -base64 16 | tr -d '/+=' | head -c 16)
echo -e "${YELLOW}Generated admin password: $ADMIN_PASSWORD${NC}"
echo -e "${YELLOW}Please save this password securely!${NC}"
fi
kubectl exec -n "$NAMESPACE" deployment/mailu-admin -- \
flask mailu admin admin "$DOMAIN" "$ADMIN_PASSWORD" 2>/dev/null || {
print_warning "Admin user may already exist or failed to create"
}
print_success "Admin user configured"
# =============================================================================
# Summary
# =============================================================================
echo ""
echo "=============================================="
echo -e "${GREEN}Mailu Deployment Complete!${NC}"
echo "=============================================="
echo ""
echo "Admin Credentials:"
echo " Email: admin@$DOMAIN"
echo " Password: $ADMIN_PASSWORD"
echo ""
echo "Access URLs (configure Ingress/DNS first):"
echo " Admin Panel: https://mail.$DOMAIN/admin"
echo " Webmail: https://mail.$DOMAIN/webmail"
echo " SMTP: mail.$DOMAIN:587 (STARTTLS)"
echo " IMAP: mail.$DOMAIN:993 (SSL)"
echo ""
echo "Next Steps:"
echo " 1. Configure DNS records (A, MX, SPF, DMARC)"
echo " 2. Get DKIM key: kubectl exec -n $NAMESPACE deployment/mailu-admin -- cat /dkim/$DOMAIN.dkim.pub"
echo " 3. Add DKIM TXT record to DNS"
echo " 4. Configure Ingress for mail.$DOMAIN"
echo ""
echo "To check pod status:"
echo " kubectl get pods -n $NAMESPACE | grep mailu"
echo ""

View File

@@ -3,8 +3,9 @@
# Global DNS configuration for DNSSEC validation
global:
# This will be replaced with the actual Unbound service IP during deployment
custom_dns_servers: "unbound-dns.bakery-ia.svc.cluster.local" # Using service DNS name instead of IP
# Using Unbound DNS resolver directly for DNSSEC validation
# Unbound service is available at unbound-dns.bakery-ia.svc.cluster.local
custom_dns_servers: "10.104.127.213" # Unbound service IP
# Domain configuration
domain: "DOMAIN_PLACEHOLDER"
@@ -25,7 +26,7 @@ postmaster: "admin"
# TLS configuration
tls:
flavor: "cert"
flavor: "notls" # Disable TLS for development
# Limits configuration
limits:
@@ -64,24 +65,24 @@ logLevel: "INFO"
# Network configuration
subnet: "10.42.0.0/16"
# Redis configuration - using external Redis (shared cluster Redis)
# Redis configuration - using internal Redis (built-in)
externalRedis:
enabled: true
host: "redis-service.bakery-ia.svc.cluster.local"
port: 6380
enabled: false
# host: "redis-service.bakery-ia.svc.cluster.local"
# port: 6380
adminQuotaDbId: 15
adminRateLimitDbId: 15
rspamdDbId: 15
# Database configuration - using external database
# Database configuration - using default SQLite (built-in)
externalDatabase:
enabled: true
type: "postgresql"
host: "postgres-service.bakery-ia.svc.cluster.local"
port: 5432
database: "mailu"
username: "mailu"
password: "E8Kz47YmVzDlHGs1M9wAbJzxcKnGONCT"
enabled: false
# type: "postgresql"
# host: "postgres-service.bakery-ia.svc.cluster.local"
# port: 5432
# database: "mailu"
# username: "mailu"
# password: "E8Kz47YmVzDlHGs1M9wAbJzxcKnGONCT"
# Persistence configuration
persistence:
@@ -210,16 +211,8 @@ networkPolicy:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
# DNS Policy Configuration for DNSSEC validation
# These settings ensure Mailu components use the Unbound DNS resolver
dnsPolicy: "None"
dnsConfig:
nameservers:
- "unbound-dns.bakery-ia.svc.cluster.local" # Points to the Unbound service in the bakery-ia namespace
options:
- name: ndots
value: "5"
- name: timeout
value: "5"
- name: attempts
value: "3"
# DNS Policy Configuration
# Use Kubernetes DNS (ClusterFirst) for internal service resolution
# DNSSEC validation for email is handled by rspamd component
# Note: For production with DNSSEC needs, configure CoreDNS to forward to Unbound
dnsPolicy: "ClusterFirst"

View File

@@ -0,0 +1,18 @@
apiVersion: v2
name: unbound
description: A Helm chart for deploying Unbound DNS resolver for Bakery-IA
type: application
version: 0.1.0
appVersion: "1.19.1"
maintainers:
- name: Bakery-IA Team
email: devops@bakery-ia.com
keywords:
- dns
- resolver
- caching
- unbound
home: https://www.nlnetlabs.nl/projects/unbound/
sources:
- https://github.com/NLnetLabs/unbound
- https://hub.docker.com/r/mvance/unbound

View File

@@ -0,0 +1,36 @@
# Development values for unbound DNS resolver
# Using same configuration as production for consistency
# Use official image for development (same as production)
image:
repository: "mvance/unbound"
tag: "latest"
pullPolicy: "IfNotPresent"
# Resource settings (slightly lower than production for dev)
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "300m"
memory: "384Mi"
# Single replica for development (can be scaled if needed)
replicaCount: 1
# Development annotations
podAnnotations:
environment: "development"
managed-by: "helm"
# Probe settings (same as production but slightly faster)
probes:
readiness:
initialDelaySeconds: 10
periodSeconds: 30
command: "drill @127.0.0.1 -p 53 example.org || echo 'DNS query test'"
liveness:
initialDelaySeconds: 30
periodSeconds: 60
command: "drill @127.0.0.1 -p 53 example.org || echo 'DNS query test'"

View File

@@ -0,0 +1,50 @@
# Production-specific values for unbound DNS resolver
# Overrides for the production environment
# Use official image for production
image:
repository: "mvance/unbound"
tag: "latest"
pullPolicy: "IfNotPresent"
# Production resource settings (higher limits for reliability)
resources:
requests:
cpu: "200m"
memory: "256Mi"
limits:
cpu: "500m"
memory: "512Mi"
# Production-specific settings
replicaCount: 2
# Production annotations
podAnnotations:
environment: "production"
critical: "true"
# Anti-affinity for high availability in production
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- unbound
topologyKey: "kubernetes.io/hostname"
# Production probe settings (more conservative)
probes:
readiness:
initialDelaySeconds: 15
periodSeconds: 30
command: "drill @127.0.0.1 -p 53 example.org || echo 'DNS query test'"
liveness:
initialDelaySeconds: 45
periodSeconds: 60
command: "drill @127.0.0.1 -p 53 example.org || echo 'DNS query test'"

View File

@@ -0,0 +1,63 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "unbound.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "unbound.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "unbound.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "unbound.labels" -}}
helm.sh/chart: {{ include "unbound.chart" . }}
{{ include "unbound.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "unbound.selectorLabels" -}}
app.kubernetes.io/name: {{ include "unbound.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: dns
app.kubernetes.io/part-of: bakery-ia
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "unbound.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "unbound.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "unbound.fullname" . }}
namespace: {{ .Values.global.namespace }}
labels:
{{- include "unbound.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "unbound.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "unbound.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "unbound.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: dns-udp
containerPort: {{ .Values.service.ports.dnsUdp }}
protocol: UDP
- name: dns-tcp
containerPort: {{ .Values.service.ports.dnsTcp }}
protocol: TCP
{{- if .Values.probes.readiness.enabled }}
readinessProbe:
exec:
command:
- sh
- -c
- {{ .Values.probes.readiness.command | quote }}
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.probes.readiness.periodSeconds }}
{{- end }}
{{- if .Values.probes.liveness.enabled }}
livenessProbe:
exec:
command:
- sh
- -c
- {{ .Values.probes.liveness.command | quote }}
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
periodSeconds: {{ .Values.probes.liveness.periodSeconds }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.env }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.extraInitContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.extraContainers }}
containers:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.global.dnsServiceName }}
namespace: {{ .Values.global.namespace }}
labels:
{{- include "unbound.labels" . | nindent 4 }}
{{- with .Values.serviceAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- name: dns-udp
port: {{ .Values.service.ports.dnsUdp }}
targetPort: {{ .Values.service.ports.dnsUdp }}
protocol: UDP
- name: dns-tcp
port: {{ .Values.service.ports.dnsTcp }}
targetPort: {{ .Values.service.ports.dnsTcp }}
protocol: TCP
selector:
{{- include "unbound.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "unbound.serviceAccountName" . }}
namespace: {{ .Values.global.namespace }}
labels:
{{- include "unbound.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,95 @@
# Default values for unbound DNS resolver
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Global settings
global:
# DNS service name for other services to reference
dnsServiceName: "unbound-dns"
namespace: "bakery-ia"
# Unbound image configuration
image:
repository: "mvance/unbound"
tag: "latest"
pullPolicy: "IfNotPresent"
# Deployment configuration
replicaCount: 1
# Resource limits and requests
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "300m"
memory: "384Mi"
# Security context
securityContext:
capabilities:
add: ["NET_BIND_SERVICE"]
# Service configuration
service:
type: "ClusterIP"
ports:
dnsUdp: 53
dnsTcp: 53
# Health probes configuration
probes:
readiness:
enabled: true
initialDelaySeconds: 10
periodSeconds: 30
command: "drill @127.0.0.1 -p 53 example.org || echo 'DNS query test'"
liveness:
enabled: true
initialDelaySeconds: 30
periodSeconds: 60
command: "drill @127.0.0.1 -p 53 example.org || echo 'DNS query test'"
# Additional environment variables
env: {}
# Additional volume mounts
volumeMounts: []
# Additional volumes
volumes: []
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}
# Pod annotations
podAnnotations: {}
# Service annotations
serviceAnnotations: {}
# Custom unbound configuration
config:
enabled: false
# Additional containers (sidecars)
extraContainers: []
# Additional init containers
extraInitContainers: []
# Service account configuration
serviceAccount:
create: false
annotations: {}
name: ""
# Pod security context
podSecurityContext: {}

View File

@@ -0,0 +1,19 @@
apiVersion: v2
name: nominatim
description: A Helm chart for deploying Nominatim geocoding service for Bakery-IA
type: application
version: 0.1.0
appVersion: "4.4"
maintainers:
- name: Bakery-IA Team
email: devops@bakery-ia.com
keywords:
- geocoding
- nominatim
- openstreetmap
- maps
- address
home: https://nominatim.org/
sources:
- https://github.com/mediagis/nominatim-docker
- https://hub.docker.com/r/mediagis/nominatim

View File

@@ -0,0 +1,38 @@
# Development values for Nominatim geocoding service
# Disabled by default in dev to save resources
# Use local registry image for development
image:
repository: "localhost:5000/mediagis_nominatim_4.4"
tag: "latest"
pullPolicy: "IfNotPresent"
# Disabled in dev (set to 0 replicas)
replicaCount: 0
# Init job disabled in dev
initJob:
enabled: false
# Lower resources for dev (when enabled)
resources:
requests:
cpu: "500m"
memory: "1Gi"
limits:
cpu: "1"
memory: "2Gi"
# Smaller PVCs for dev
persistence:
data:
enabled: true
size: "10Gi"
flatnode:
enabled: true
size: "5Gi"
# Development annotations
podAnnotations:
environment: "development"
managed-by: "helm"

View File

@@ -0,0 +1,45 @@
# Production values for Nominatim geocoding service
# Full configuration for production deployment
# Use official Docker Hub image for production
image:
repository: "mediagis/nominatim"
tag: "4.4"
pullPolicy: "IfNotPresent"
# Single replica for production (can be scaled if needed)
replicaCount: 1
# Init job enabled in production
initJob:
enabled: true
resources:
requests:
cpu: "4"
memory: "8Gi"
limits:
cpu: "8"
memory: "16Gi"
# Production resources
resources:
requests:
cpu: "1"
memory: "2Gi"
limits:
cpu: "2"
memory: "4Gi"
# Full-size PVCs for production
persistence:
data:
enabled: true
size: "50Gi"
flatnode:
enabled: true
size: "20Gi"
# Production annotations
podAnnotations:
environment: "production"
managed-by: "helm"

View File

@@ -0,0 +1,87 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "nominatim.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "nominatim.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "nominatim.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "nominatim.labels" -}}
helm.sh/chart: {{ include "nominatim.chart" . }}
{{ include "nominatim.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "nominatim.selectorLabels" -}}
app.kubernetes.io/name: {{ include "nominatim.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: geocoding
app.kubernetes.io/part-of: bakery-ia
{{- end -}}
{{/*
ConfigMap name
*/}}
{{- define "nominatim.configMapName" -}}
{{- printf "%s-config" (include "nominatim.fullname" .) -}}
{{- end -}}
{{/*
Service name
*/}}
{{- define "nominatim.serviceName" -}}
{{- default (printf "%s-service" (include "nominatim.fullname" .)) .Values.service.name -}}
{{- end -}}
{{/*
Data PVC name
*/}}
{{- define "nominatim.dataPvcName" -}}
{{- printf "%s-data" (include "nominatim.fullname" .) -}}
{{- end -}}
{{/*
Flatnode PVC name
*/}}
{{- define "nominatim.flatnodePvcName" -}}
{{- printf "%s-flatnode" (include "nominatim.fullname" .) -}}
{{- end -}}
{{/*
Init job name
*/}}
{{- define "nominatim.initJobName" -}}
{{- printf "%s-init" (include "nominatim.fullname" .) -}}
{{- end -}}

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "nominatim.configMapName" . }}
namespace: {{ .Values.global.namespace }}
labels:
{{- include "nominatim.labels" . | nindent 4 }}
data:
NOMINATIM_PBF_URL: {{ .Values.config.pbfUrl | quote }}
NOMINATIM_REPLICATION_URL: {{ .Values.config.replicationUrl | quote }}
NOMINATIM_IMPORT_STYLE: {{ .Values.config.importStyle | quote }}
NOMINATIM_THREADS: {{ .Values.config.threads | quote }}
NOMINATIM_FLATNODE_FILE: {{ .Values.config.flatnodeFile | quote }}

View File

@@ -1,24 +1,25 @@
{{- if .Values.initJob.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
name: nominatim-init
namespace: bakery-ia
name: {{ include "nominatim.initJobName" . }}
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: nominatim-init
{{- include "nominatim.labels" . | nindent 4 }}
app.kubernetes.io/component: data-init
app.kubernetes.io/part-of: bakery-ia
spec:
ttlSecondsAfterFinished: 86400
ttlSecondsAfterFinished: {{ .Values.initJob.ttlSecondsAfterFinished }}
template:
metadata:
labels:
app.kubernetes.io/name: nominatim-init
app.kubernetes.io/name: {{ include "nominatim.initJobName" . }}
app.kubernetes.io/component: data-init
spec:
restartPolicy: OnFailure
containers:
- name: nominatim-import
image: mediagis/nominatim:4.4
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- sh
- -c
@@ -50,34 +51,30 @@ spec:
- name: NOMINATIM_PBF_URL
valueFrom:
configMapKeyRef:
name: nominatim-config
name: {{ include "nominatim.configMapName" . }}
key: NOMINATIM_PBF_URL
- name: NOMINATIM_IMPORT_STYLE
valueFrom:
configMapKeyRef:
name: nominatim-config
name: {{ include "nominatim.configMapName" . }}
key: NOMINATIM_IMPORT_STYLE
- name: NOMINATIM_THREADS
valueFrom:
configMapKeyRef:
name: nominatim-config
name: {{ include "nominatim.configMapName" . }}
key: NOMINATIM_THREADS
- name: NOMINATIM_FLATNODE_FILE
valueFrom:
configMapKeyRef:
name: nominatim-config
name: {{ include "nominatim.configMapName" . }}
key: NOMINATIM_FLATNODE_FILE
resources:
requests:
memory: "8Gi"
cpu: "4"
limits:
memory: "16Gi"
cpu: "8"
{{- toYaml .Values.initJob.resources | nindent 10 }}
volumes:
- name: nominatim-data
persistentVolumeClaim:
claimName: nominatim-data
claimName: {{ include "nominatim.dataPvcName" . }}
- name: nominatim-flatnode
persistentVolumeClaim:
claimName: nominatim-flatnode
claimName: {{ include "nominatim.flatnodePvcName" . }}
{{- end }}

View File

@@ -0,0 +1,37 @@
{{- if .Values.persistence.data.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "nominatim.dataPvcName" . }}
namespace: {{ .Values.global.namespace }}
labels:
{{- include "nominatim.labels" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.persistence.data.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.data.size }}
{{- if .Values.persistence.data.storageClassName }}
storageClassName: {{ .Values.persistence.data.storageClassName }}
{{- end }}
{{- end }}
---
{{- if .Values.persistence.flatnode.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "nominatim.flatnodePvcName" . }}
namespace: {{ .Values.global.namespace }}
labels:
{{- include "nominatim.labels" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.persistence.flatnode.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.flatnode.size }}
{{- if .Values.persistence.flatnode.storageClassName }}
storageClassName: {{ .Values.persistence.flatnode.storageClassName }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "nominatim.serviceName" . }}
namespace: {{ .Values.global.namespace }}
labels:
{{- include "nominatim.labels" . | nindent 4 }}
{{- with .Values.serviceAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
{{- include "nominatim.selectorLabels" . | nindent 4 }}
ports:
- port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}
protocol: TCP
name: http
type: {{ .Values.service.type }}

View File

@@ -0,0 +1,113 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "nominatim.fullname" . }}
namespace: {{ .Values.global.namespace }}
labels:
{{- include "nominatim.labels" . | nindent 4 }}
spec:
serviceName: {{ include "nominatim.serviceName" . }}
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "nominatim.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "nominatim.selectorLabels" . | nindent 8 }}
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: nominatim
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 10 }}
{{- end }}
ports:
- containerPort: {{ .Values.service.port }}
name: http
volumeMounts:
- name: nominatim-data
mountPath: /var/lib/postgresql
- name: nominatim-flatnode
mountPath: /nominatim-flatnode
env:
- name: NOMINATIM_PBF_URL
valueFrom:
configMapKeyRef:
name: {{ include "nominatim.configMapName" . }}
key: NOMINATIM_PBF_URL
- name: NOMINATIM_REPLICATION_URL
valueFrom:
configMapKeyRef:
name: {{ include "nominatim.configMapName" . }}
key: NOMINATIM_REPLICATION_URL
- name: NOMINATIM_IMPORT_STYLE
valueFrom:
configMapKeyRef:
name: {{ include "nominatim.configMapName" . }}
key: NOMINATIM_IMPORT_STYLE
- name: NOMINATIM_THREADS
valueFrom:
configMapKeyRef:
name: {{ include "nominatim.configMapName" . }}
key: NOMINATIM_THREADS
- name: NOMINATIM_FLATNODE_FILE
valueFrom:
configMapKeyRef:
name: {{ include "nominatim.configMapName" . }}
key: NOMINATIM_FLATNODE_FILE
{{- range $key, $value := .Values.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 10 }}
{{- if .Values.probes.liveness.enabled }}
livenessProbe:
httpGet:
path: {{ .Values.probes.liveness.path }}
port: {{ .Values.probes.liveness.port }}
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
periodSeconds: {{ .Values.probes.liveness.periodSeconds }}
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
{{- end }}
{{- if .Values.probes.readiness.enabled }}
readinessProbe:
httpGet:
path: {{ .Values.probes.readiness.path }}
port: {{ .Values.probes.readiness.port }}
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.probes.readiness.periodSeconds }}
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
{{- end }}
volumes:
- name: nominatim-data
persistentVolumeClaim:
claimName: {{ include "nominatim.dataPvcName" . }}
- name: nominatim-flatnode
persistentVolumeClaim:
claimName: {{ include "nominatim.flatnodePvcName" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,113 @@
# Default values for Nominatim geocoding service
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Global settings
global:
namespace: "bakery-ia"
# Nominatim image configuration
image:
repository: "mediagis/nominatim"
tag: "4.4"
pullPolicy: "IfNotPresent"
# StatefulSet configuration
replicaCount: 1
# Nominatim configuration
config:
# Spain OSM data source
pbfUrl: "http://download.geofabrik.de/europe/spain-latest.osm.pbf"
# Updates replication source
replicationUrl: "https://download.geofabrik.de/europe/spain-updates"
# Import style (address for geocoding-focused usage)
importStyle: "address"
# Number of threads for indexing
threads: "4"
# Flatnode file path
flatnodeFile: "/nominatim-flatnode/flatnode.bin"
# Service configuration
service:
type: "ClusterIP"
port: 8080
name: "nominatim-service"
# Resource limits and requests for main service
resources:
requests:
cpu: "1"
memory: "2Gi"
limits:
cpu: "2"
memory: "4Gi"
# Init job resource limits (higher for initial import)
initJob:
enabled: true
resources:
requests:
cpu: "4"
memory: "8Gi"
limits:
cpu: "8"
memory: "16Gi"
# Time to keep job after completion (86400 = 1 day)
ttlSecondsAfterFinished: 86400
# Persistent Volume Claims
persistence:
data:
enabled: true
size: "50Gi"
accessMode: "ReadWriteOnce"
# storageClassName: "" # Use default storage class
flatnode:
enabled: true
size: "20Gi"
accessMode: "ReadWriteOnce"
# storageClassName: "" # Use default storage class
# Health probes configuration
probes:
liveness:
enabled: true
path: "/status"
port: 8080
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readiness:
enabled: true
path: "/status"
port: 8080
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 5
# Additional environment variables
env: {}
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}
# Pod annotations
podAnnotations: {}
# Service annotations
serviceAnnotations: {}
# Pod security context
podSecurityContext: {}
# Container security context
securityContext: {}