Update monitoring packages to latest versions

- Updated all OpenTelemetry packages to latest versions:
  - opentelemetry-api: 1.27.0 → 1.39.1
  - opentelemetry-sdk: 1.27.0 → 1.39.1
  - opentelemetry-exporter-otlp-proto-grpc: 1.27.0 → 1.39.1
  - opentelemetry-exporter-otlp-proto-http: 1.27.0 → 1.39.1
  - opentelemetry-instrumentation-fastapi: 0.48b0 → 0.60b1
  - opentelemetry-instrumentation-httpx: 0.48b0 → 0.60b1
  - opentelemetry-instrumentation-redis: 0.48b0 → 0.60b1
  - opentelemetry-instrumentation-sqlalchemy: 0.48b0 → 0.60b1

- Removed prometheus-client==0.23.1 from all services
- Unified all services to use the same monitoring package versions

Generated by Mistral Vibe.
Co-Authored-By: Mistral Vibe <vibe@mistral.ai>
This commit is contained in:
Urtzi Alfaro
2026-01-08 19:25:52 +01:00
parent dfb7e4b237
commit 29d19087f1
129 changed files with 5718 additions and 1821 deletions

158
Tiltfile
View File

@@ -16,9 +16,28 @@
# Ensure we're running in the correct context
allow_k8s_contexts('kind-bakery-ia-local')
# Use local registry for faster builds and deployments
# This registry is created by kubernetes_restart.sh script
default_registry('localhost:5001')
# Docker registry configuration
# Set USE_DOCKERHUB=true environment variable to push images to Docker Hub
# Otherwise, uses local registry for faster builds and deployments
use_dockerhub = os.getenv('USE_DOCKERHUB', 'false').lower() == 'true'
dockerhub_username = os.getenv('DOCKERHUB_USERNAME', 'uals')
if use_dockerhub:
print("""
🐳 DOCKER HUB MODE ENABLED
Images will be pushed to Docker Hub: docker.io/%s
Make sure you're logged in: docker login
To disable: unset USE_DOCKERHUB or set USE_DOCKERHUB=false
""" % dockerhub_username)
default_registry('docker.io/%s' % dockerhub_username)
else:
print("""
🏠 LOCAL REGISTRY MODE
Using local registry for faster builds: localhost:5001
This registry is created by kubernetes_restart.sh script
To use Docker Hub: export USE_DOCKERHUB=true
""")
default_registry('localhost:5001')
# =============================================================================
# SECURITY & INITIAL SETUP
@@ -312,50 +331,96 @@ k8s_resource('nominatim', labels=['01-infrastructure'])
# MONITORING RESOURCES - SigNoz (Unified Observability)
# =============================================================================
# Note: SigNoz Helm chart is complex for local dev
# For development, access SigNoz manually or use production Helm deployment
# To deploy SigNoz manually: ./infrastructure/helm/deploy-signoz.sh dev
# Deploy SigNoz using Helm with automatic deployment and progress tracking
local_resource(
'signoz-info',
'signoz-deploy',
cmd='''
echo "📊 SigNoz Monitoring Information"
echo "📊 Deploying SigNoz Monitoring Stack..."
echo ""
echo "SigNoz Helm deployment is disabled for local development due to complexity."
# Check if SigNoz is already deployed
if helm list -n signoz | grep -q signoz; then
echo "✅ SigNoz already deployed, checking status..."
helm status signoz -n signoz
else
echo "🚀 Installing SigNoz..."
# Add SigNoz Helm repository if not already added
helm repo add signoz https://charts.signoz.io 2>/dev/null || true
helm repo update signoz
# Install SigNoz with custom values in the bakery-ia namespace
helm upgrade --install signoz signoz/signoz \
-n bakery-ia \
-f infrastructure/helm/signoz-values-dev.yaml \
--timeout 10m \
--wait
echo ""
echo "✅ SigNoz deployment completed"
fi
echo ""
echo "Options:"
echo "1. Deploy manually: ./infrastructure/helm/deploy-signoz.sh dev"
echo "2. Use production deployment: ./infrastructure/helm/deploy-signoz.sh prod"
echo "3. Skip monitoring for local development (use application metrics only)"
echo "📈 SigNoz Access Information:"
echo " URL: https://monitoring.bakery-ia.local/signoz"
echo " Username: admin"
echo " Password: admin"
echo ""
echo "For simpler local monitoring, consider using just Prometheus+Grafana"
echo "or access metrics directly from services at /metrics endpoints."
echo "🔧 OpenTelemetry Collector Endpoints:"
echo " gRPC: localhost:4317"
echo " HTTP: localhost:4318"
echo ""
echo "💡 To check pod status: kubectl get pods -n signoz"
''',
labels=['05-monitoring'],
auto_init=False,
trigger_mode=TRIGGER_MODE_MANUAL,
allow_parallel=False
)
# Track SigNoz pods in Tilt UI using workload tracking
# These will automatically discover pods once SigNoz is deployed
local_resource(
'signoz-status',
cmd='''
echo "📊 SigNoz Status Check"
echo ""
# Check pod status
echo "Current SigNoz pods:"
kubectl get pods -n bakery-ia -l app.kubernetes.io/instance=signoz -o wide 2>/dev/null || echo "No pods found"
echo ""
echo "SigNoz Services:"
kubectl get svc -n bakery-ia -l app.kubernetes.io/instance=signoz 2>/dev/null || echo "No services found"
# Check if all pods are ready
TOTAL_PODS=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/instance=signoz --no-headers 2>/dev/null | wc -l | tr -d ' ')
READY_PODS=$(kubectl get pods -n bakery-ia -l app.kubernetes.io/instance=signoz --field-selector=status.phase=Running --no-headers 2>/dev/null | wc -l | tr -d ' ')
if [ "$TOTAL_PODS" -gt 0 ]; then
echo ""
echo "Pod Status: $READY_PODS/$TOTAL_PODS ready"
if [ "$READY_PODS" -eq "$TOTAL_PODS" ]; then
echo "✅ All SigNoz pods are running!"
echo ""
echo "Access SigNoz at: https://monitoring.bakery-ia.local/signoz"
echo "Credentials: admin / admin"
else
echo "⏳ Waiting for pods to become ready..."
fi
fi
''',
labels=['05-monitoring'],
resource_deps=['signoz-deploy'],
auto_init=False,
trigger_mode=TRIGGER_MODE_MANUAL
)
# SigNoz ingress (only if manually deployed)
# Uncomment and trigger manually if you deploy SigNoz
# local_resource(
# 'signoz-ingress',
# cmd='''
# echo "🌐 Applying SigNoz ingress..."
# kubectl apply -f infrastructure/kubernetes/overlays/dev/signoz-ingress.yaml
# echo "✅ SigNoz ingress configured"
# ''',
# labels=['05-monitoring'],
# auto_init=False,
# trigger_mode=TRIGGER_MODE_MANUAL
# )
# Note: SigNoz components are managed by Helm and deployed outside of kustomize
# They will appear automatically once deployed, but we don't track them explicitly in Tilt
# to avoid startup errors. View them with: kubectl get pods -n signoz
# Optional exporters (in monitoring namespace)
k8s_resource('node-exporter', labels=['05-monitoring'])
k8s_resource('postgres-exporter', resource_deps=['auth-db'], labels=['05-monitoring'])
# Optional exporters (in monitoring namespace) - DISABLED since using SigNoz
# k8s_resource('node-exporter', labels=['05-monitoring'])
# k8s_resource('postgres-exporter', resource_deps=['auth-db'], labels=['05-monitoring'])
# =============================================================================
# DATABASE RESOURCES
@@ -571,16 +636,20 @@ Internal Schedulers Active:
⏰ Usage Tracking: Daily @ 2:00 AM UTC (tenant-service)
Access your application:
Main Application: https://localhost
API Endpoints: https://localhost/api/v1/...
Main Application: https://bakery-ia.local
API Endpoints: https://bakery-ia.local/api/v1/...
Local Access: https://localhost
Service Metrics:
Gateway: http://localhost:8000/metrics
Any Service: kubectl port-forward <service> 8000:8000
SigNoz (Optional - see SIGNOZ_DEPLOYMENT_RECOMMENDATIONS.md):
Deploy manually: ./infrastructure/helm/deploy-signoz.sh dev
Access (if deployed): https://localhost/signoz
SigNoz (Unified Observability):
Deploy via Tilt: Trigger 'signoz-deployment' resource
Manual deploy: ./infrastructure/helm/deploy-signoz.sh dev
Access (if deployed): https://monitoring.bakery-ia.local/signoz
Username: admin
Password: admin
Verify security:
kubectl get pvc -n bakery-ia
@@ -603,5 +672,12 @@ Useful Commands:
tilt logs 09-services-core
tilt logs 13-services-platform
DNS Configuration:
# To access the application via domain names, add these entries to your hosts file:
# sudo nano /etc/hosts
# Add these lines:
# 127.0.0.1 bakery-ia.local
# 127.0.0.1 monitoring.bakery-ia.local
======================================
""")