Fix Demo enterprise
This commit is contained in:
275
kubernetes_restart.sh
Executable file
275
kubernetes_restart.sh
Executable file
@@ -0,0 +1,275 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Function to wait for pods with retry logic
|
||||
wait_for_pods() {
|
||||
local namespace=$1
|
||||
local selector=$2
|
||||
local timeout=$3
|
||||
local max_retries=30
|
||||
local retry_count=0
|
||||
|
||||
print_status "Waiting for pods with selector '$selector' in namespace '$namespace'..."
|
||||
|
||||
while [ $retry_count -lt $max_retries ]; do
|
||||
# Check if any pods exist first
|
||||
if kubectl get pods -n "$namespace" --selector="$selector" 2>/dev/null | grep -v "No resources found" | grep -v "NAME" > /dev/null; then
|
||||
# Pods exist, now wait for them to be ready
|
||||
if kubectl wait --namespace "$namespace" \
|
||||
--for=condition=ready pod \
|
||||
--selector="$selector" \
|
||||
--timeout="${timeout}s" 2>/dev/null; then
|
||||
print_success "Pods are ready"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
retry_count=$((retry_count + 1))
|
||||
print_status "Waiting for pods to be created... (attempt $retry_count/$max_retries)"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
print_error "Timed out waiting for pods after $((max_retries * 5)) seconds"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to handle cleanup
|
||||
cleanup() {
|
||||
print_status "Starting cleanup process..."
|
||||
|
||||
# Delete Kubernetes namespace with timeout
|
||||
print_status "Deleting namespace bakery-ia..."
|
||||
if kubectl get namespace bakery-ia &>/dev/null; then
|
||||
kubectl delete namespace bakery-ia 2>/dev/null &
|
||||
PID=$!
|
||||
sleep 2
|
||||
if ps -p $PID &>/dev/null; then
|
||||
print_warning "kubectl delete namespace command taking too long, forcing termination..."
|
||||
kill $PID 2>/dev/null
|
||||
fi
|
||||
print_success "Namespace deletion attempted"
|
||||
else
|
||||
print_status "Namespace bakery-ia not found"
|
||||
fi
|
||||
|
||||
# Delete Kind cluster
|
||||
print_status "Deleting Kind cluster..."
|
||||
if kind get clusters | grep -q "bakery-ia-local"; then
|
||||
kind delete cluster --name bakery-ia-local
|
||||
print_success "Kind cluster deleted"
|
||||
else
|
||||
print_status "Kind cluster bakery-ia-local not found"
|
||||
fi
|
||||
|
||||
# Stop Colima
|
||||
print_status "Stopping Colima..."
|
||||
if colima list | grep -q "k8s-local"; then
|
||||
colima stop --profile k8s-local
|
||||
print_success "Colima stopped"
|
||||
else
|
||||
print_status "Colima profile k8s-local not found"
|
||||
fi
|
||||
|
||||
print_success "Cleanup completed!"
|
||||
echo "----------------------------------------"
|
||||
}
|
||||
|
||||
# Function to check for required configuration files
|
||||
check_config_files() {
|
||||
print_status "Checking for required configuration files..."
|
||||
|
||||
# Check for kind-config.yaml
|
||||
if [ ! -f kind-config.yaml ]; then
|
||||
print_error "kind-config.yaml not found in current directory!"
|
||||
print_error "Please ensure kind-config.yaml exists with your cluster configuration."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for encryption directory if referenced in config
|
||||
if grep -q "infrastructure/kubernetes/encryption" kind-config.yaml; then
|
||||
if [ ! -d "./infrastructure/kubernetes/encryption" ]; then
|
||||
print_warning "Encryption directory './infrastructure/kubernetes/encryption' not found"
|
||||
print_warning "Some encryption configurations may not work properly"
|
||||
fi
|
||||
fi
|
||||
|
||||
print_success "Configuration files check completed"
|
||||
}
|
||||
|
||||
# Function to handle setup
|
||||
setup() {
|
||||
print_status "Starting setup process..."
|
||||
|
||||
# Check for required config files
|
||||
check_config_files
|
||||
|
||||
# 1. Start Colima with adequate resources
|
||||
print_status "Starting Colima with 6 CPU, 12GB memory, 120GB disk..."
|
||||
colima start --cpu 6 --memory 12 --disk 120 --runtime docker --profile k8s-local
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Colima started successfully"
|
||||
else
|
||||
print_error "Failed to start Colima"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 2. Create Kind cluster using existing configuration
|
||||
print_status "Creating Kind cluster with existing configuration..."
|
||||
|
||||
if [ -f kind-config.yaml ]; then
|
||||
print_status "Using existing kind-config.yaml file"
|
||||
|
||||
# Extract cluster name from config for verification
|
||||
CLUSTER_NAME=$(grep -E "name:\s*" kind-config.yaml | head -1 | sed 's/name:\s*//' | tr -d '[:space:]' || echo "bakery-ia-local")
|
||||
|
||||
print_status "Creating cluster: $CLUSTER_NAME"
|
||||
kind create cluster --config kind-config.yaml
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Kind cluster created successfully"
|
||||
else
|
||||
print_error "Failed to create Kind cluster"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
print_error "kind-config.yaml file not found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 3. Install NGINX Ingress Controller
|
||||
print_status "Installing NGINX Ingress Controller..."
|
||||
|
||||
# Apply the ingress-nginx manifest
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "NGINX Ingress Controller manifest applied"
|
||||
else
|
||||
print_error "Failed to apply NGINX Ingress Controller manifest"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Wait for ingress-nginx pods to be ready with retry logic
|
||||
wait_for_pods "ingress-nginx" "app.kubernetes.io/component=controller" 300
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "NGINX Ingress Controller failed to become ready"
|
||||
print_status "Checking pod status for debugging..."
|
||||
kubectl get pods -n ingress-nginx
|
||||
kubectl describe pods -n ingress-nginx
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 4. Configure permanent localhost access
|
||||
print_status "Configuring localhost access via NodePort..."
|
||||
|
||||
# Check if service exists
|
||||
if kubectl get svc ingress-nginx-controller -n ingress-nginx &>/dev/null; then
|
||||
# Patch the service to expose NodePorts
|
||||
kubectl patch svc ingress-nginx-controller \
|
||||
-n ingress-nginx \
|
||||
--type merge \
|
||||
-p '{"spec":{"type":"NodePort","ports":[{"name":"http","port":80,"targetPort":"http","nodePort":30080},{"name":"https","port":443,"targetPort":"https","nodePort":30443}]}}'
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "NodePort configuration applied"
|
||||
else
|
||||
print_error "Failed to patch Ingress service"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
print_error "Ingress NGINX controller service not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 5. Verify port mappings from kind-config.yaml
|
||||
print_status "Verifying port mappings from configuration..."
|
||||
|
||||
# Extract ports from kind-config.yaml
|
||||
HTTP_HOST_PORT=$(grep -A1 "containerPort: 30080" kind-config.yaml | grep "hostPort:" | awk '{print $2}' || echo "80")
|
||||
HTTPS_HOST_PORT=$(grep -A1 "containerPort: 30443" kind-config.yaml | grep "hostPort:" | awk '{print $2}' || echo "443")
|
||||
|
||||
# Print cluster info
|
||||
echo ""
|
||||
print_success "Setup completed successfully!"
|
||||
echo "----------------------------------------"
|
||||
print_status "Cluster Information:"
|
||||
echo " - Colima profile: k8s-local"
|
||||
echo " - Kind cluster: $CLUSTER_NAME"
|
||||
echo " - Direct port mappings (from kind-config.yaml):"
|
||||
echo " Frontend: localhost:3000 -> container:30300"
|
||||
echo " Gateway: localhost:8000 -> container:30800"
|
||||
echo " - Ingress access:"
|
||||
echo " HTTP: localhost:${HTTP_HOST_PORT} -> ingress:30080"
|
||||
echo " HTTPS: localhost:${HTTPS_HOST_PORT} -> ingress:30443"
|
||||
echo " - NodePort access:"
|
||||
echo " HTTP: localhost:30080"
|
||||
echo " HTTPS: localhost:30443"
|
||||
echo "----------------------------------------"
|
||||
print_status "To access your applications:"
|
||||
echo " - Use Ingress via: http://localhost:${HTTP_HOST_PORT}"
|
||||
echo " - Direct NodePort: http://localhost:30080"
|
||||
echo "----------------------------------------"
|
||||
}
|
||||
|
||||
# Function to show usage
|
||||
usage() {
|
||||
echo "Usage: $0 [option]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " cleanup Clean up all resources (namespace, cluster, colima)"
|
||||
echo " setup Set up the complete environment"
|
||||
echo " full Clean up first, then set up (default)"
|
||||
echo " help Show this help message"
|
||||
echo ""
|
||||
echo "Requirements:"
|
||||
echo " - kind-config.yaml must exist in current directory"
|
||||
echo " - For encryption: ./infrastructure/kubernetes/encryption directory"
|
||||
}
|
||||
|
||||
# Main script logic
|
||||
case "${1:-full}" in
|
||||
"cleanup")
|
||||
cleanup
|
||||
;;
|
||||
"setup")
|
||||
setup
|
||||
;;
|
||||
"full")
|
||||
cleanup
|
||||
setup
|
||||
;;
|
||||
"help"|"-h"|"--help")
|
||||
usage
|
||||
;;
|
||||
*)
|
||||
print_warning "Unknown option: $1"
|
||||
echo ""
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
Reference in New Issue
Block a user