Files
bakery-ia/scripts/backup-databases.sh

162 lines
4.4 KiB
Bash
Raw Normal View History

Add comprehensive Kubernetes migration guide from local to production This commit adds complete documentation and tooling for migrating from local development (Kind/Colima on macOS) to production deployment (MicroK8s on Ubuntu VPS at Clouding.io). Documentation added: - K8S-MIGRATION-GUIDE.md: Comprehensive step-by-step migration guide covering all phases from VPS setup to post-deployment operations - MIGRATION-CHECKLIST.md: Quick reference checklist for migration tasks - MIGRATION-SUMMARY.md: High-level overview and key changes summary Configuration updates: - Added storage-patch.yaml for MicroK8s storage class compatibility (changes from 'standard' to 'microk8s-hostpath') - Updated prod/kustomization.yaml to include storage patch Helper scripts: - deploy-production.sh: Interactive deployment script with validation - tag-and-push-images.sh: Automated image tagging and registry push - backup-databases.sh: Database backup script for production Key differences addressed: - Ingress: MicroK8s addon vs custom NGINX - Storage: MicroK8s hostpath vs Kind standard storage - Registry: Container registry configuration for production - SSL: Let's Encrypt production certificates - Domains: Real domain configuration vs localhost - Resources: Production-grade resource limits and scaling The migration guide covers: - VPS setup and MicroK8s installation - Configuration adaptations required - Container registry setup options - SSL certificate configuration - Monitoring and backup setup - Troubleshooting common issues - Security hardening checklist - Rollback procedures All existing Kubernetes manifests remain unchanged and compatible.
2026-01-02 14:57:09 +00:00
#!/bin/bash
# Database Backup Script for Bakery IA
# This script backs up all PostgreSQL databases in the Kubernetes cluster
# Designed to run on the VPS via cron
set -e
# Configuration
BACKUP_ROOT="/backups"
NAMESPACE="bakery-ia"
RETENTION_DAYS=7
DATE=$(date +%Y-%m-%d_%H-%M-%S)
BACKUP_DIR="${BACKUP_ROOT}/${DATE}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Logging
log() {
echo "[$(date +'%Y-%m-%d %H:%M:%S')] $1"
}
log_error() {
echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR: $1${NC}"
}
log_success() {
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')] SUCCESS: $1${NC}"
}
log_warning() {
echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] WARNING: $1${NC}"
}
# Create backup directory
mkdir -p "$BACKUP_DIR"
log "Starting database backup to $BACKUP_DIR"
# Get all database pods
DB_PODS=$(kubectl get pods -n "$NAMESPACE" -l app.kubernetes.io/component=database -o jsonpath='{.items[*].metadata.name}')
if [ -z "$DB_PODS" ]; then
log_error "No database pods found in namespace $NAMESPACE"
exit 1
fi
log "Found database pods: $DB_PODS"
# Backup counter
SUCCESS_COUNT=0
FAILED_COUNT=0
FAILED_DBS=()
# Backup each database
for pod in $DB_PODS; do
log "Backing up database: $pod"
# Get database name from pod labels
DB_NAME=$(kubectl get pod "$pod" -n "$NAMESPACE" -o jsonpath='{.metadata.labels.app\.kubernetes\.io/name}')
if [ -z "$DB_NAME" ]; then
DB_NAME=$pod
fi
BACKUP_FILE="${BACKUP_DIR}/${DB_NAME}.sql"
# Perform backup
if kubectl exec -n "$NAMESPACE" "$pod" -- pg_dumpall -U postgres > "$BACKUP_FILE" 2>/dev/null; then
FILE_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Backed up $DB_NAME ($FILE_SIZE)"
((SUCCESS_COUNT++))
else
log_error "Failed to backup $DB_NAME"
FAILED_DBS+=("$DB_NAME")
((FAILED_COUNT++))
rm -f "$BACKUP_FILE" # Remove partial backup
fi
done
# Also backup Redis if present
REDIS_POD=$(kubectl get pods -n "$NAMESPACE" -l app.kubernetes.io/name=redis -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [ -n "$REDIS_POD" ]; then
log "Backing up Redis: $REDIS_POD"
REDIS_BACKUP="${BACKUP_DIR}/redis.rdb"
if kubectl exec -n "$NAMESPACE" "$REDIS_POD" -- redis-cli --rdb /tmp/dump.rdb SAVE > /dev/null 2>&1 && \
kubectl cp "$NAMESPACE/$REDIS_POD:/tmp/dump.rdb" "$REDIS_BACKUP" > /dev/null 2>&1; then
FILE_SIZE=$(du -h "$REDIS_BACKUP" | cut -f1)
log_success "Backed up Redis ($FILE_SIZE)"
((SUCCESS_COUNT++))
else
log_warning "Failed to backup Redis (non-critical)"
fi
fi
# Create backup metadata
cat > "${BACKUP_DIR}/backup-info.txt" <<EOF
Backup Date: $(date)
Namespace: $NAMESPACE
Success Count: $SUCCESS_COUNT
Failed Count: $FAILED_COUNT
Failed Databases: ${FAILED_DBS[@]:-none}
Kubernetes Cluster: $(kubectl config current-context)
EOF
# Compress backup
log "Compressing backup..."
COMPRESSED_FILE="${BACKUP_ROOT}/backup-${DATE}.tar.gz"
if tar -czf "$COMPRESSED_FILE" -C "$BACKUP_ROOT" "$(basename "$BACKUP_DIR")"; then
COMPRESSED_SIZE=$(du -h "$COMPRESSED_FILE" | cut -f1)
log_success "Backup compressed: $COMPRESSED_FILE ($COMPRESSED_SIZE)"
# Remove uncompressed backup
rm -rf "$BACKUP_DIR"
else
log_error "Failed to compress backup"
exit 1
fi
# Cleanup old backups
log "Cleaning up backups older than $RETENTION_DAYS days..."
DELETED_COUNT=$(find "$BACKUP_ROOT" -name "backup-*.tar.gz" -mtime "+$RETENTION_DAYS" -delete -print | wc -l)
if [ "$DELETED_COUNT" -gt 0 ]; then
log "Deleted $DELETED_COUNT old backup(s)"
fi
# Calculate total backup size
TOTAL_SIZE=$(du -sh "$BACKUP_ROOT" | cut -f1)
# Summary
echo ""
log "========================================="
log "Backup Summary"
log "========================================="
log "Successful backups: $SUCCESS_COUNT"
log "Failed backups: $FAILED_COUNT"
log "Backup location: $COMPRESSED_FILE"
log "Backup size: $COMPRESSED_SIZE"
log "Total backup storage: $TOTAL_SIZE"
log "========================================="
if [ $FAILED_COUNT -gt 0 ]; then
log_error "Some backups failed: ${FAILED_DBS[*]}"
exit 1
fi
log_success "Backup completed successfully!"
# Optional: Send notification
# Uncomment and configure if you want email/slack notifications
# send_notification "Bakery IA Backup Completed" "Successfully backed up $SUCCESS_COUNT databases. Size: $COMPRESSED_SIZE"
exit 0