Compare commits
79 Commits
aeff6b1537
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a26c62f13 | ||
| 1cd5500b80 | |||
|
|
83f84de610 | ||
| 6c6a9fc58c | |||
|
|
e0be1b22f9 | ||
| c8dc021e13 | |||
|
|
b44d7c8c1b | ||
| 0e95bdc468 | |||
|
|
8568aea9a8 | ||
| fc26876eb0 | |||
|
|
c4e8397a77 | ||
| c56c558618 | |||
| 08f84e951a | |||
|
|
a65789974b | ||
| 85c756d89a | |||
|
|
c98f9b99f5 | ||
| fbbe3e711f | |||
|
|
b3cddbbc0a | ||
| c5a2dabd12 | |||
| c82e1824a0 | |||
|
|
28a2b48f09 | ||
| 9d87c40a28 | |||
|
|
4b27ba61ba | ||
| 8736715540 | |||
|
|
1045d70eef | ||
| 427baf0e71 | |||
|
|
5126e064e0 | ||
| 68e07355eb | |||
| 4701b6a5b1 | |||
| bd6db544ba | |||
| c3defa3a10 | |||
| e26481e5e8 | |||
| 10428b0c4f | |||
| 8c9fbbc335 | |||
| aeb56b3eb9 | |||
| bf4fe43672 | |||
| dd16693b39 | |||
| dac79a4ad6 | |||
|
|
ae996cedfd | ||
|
|
7b60e47a08 | ||
| 4c38352e18 | |||
| 6f282eff4c | |||
| 0ee81b9522 | |||
| d2a98b90da | |||
| c28b625a5d | |||
| 851be67a3a | |||
| fefccafde0 | |||
| fc565753ec | |||
| d7f263db83 | |||
| e72a8cecbb | |||
| ecdad596bf | |||
| a83e6691aa | |||
| c4a2f1dc3d | |||
| 321afbc5d6 | |||
| 9328d65a73 | |||
| a0128c3f7e | |||
| 429b97b27a | |||
| 6fdfff6488 | |||
| ea48f52173 | |||
| 81bbd7e88a | |||
| 1af45739f3 | |||
| be59cec3ca | |||
| 0affc247ce | |||
| 5e0cef6691 | |||
| 8e6083e5af | |||
| 741112be63 | |||
| 4699bcbb7a | |||
| 179d11968e | |||
| be4ad40c3d | |||
| ae5571f9ab | |||
| 7645e184e2 | |||
| b17cdc4b47 | |||
| 6aa3e9424b | |||
| 56d4aec5c4 | |||
| 0183f3ab72 | |||
| 6505044f24 | |||
| ac2e8cebf9 | |||
| 89ec45a7c1 | |||
| 8dc422e0e5 |
@@ -2,7 +2,7 @@
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document outlines the recommended architecture for deploying Mailu email services across development and production environments for the Bakery-IA project. The solution addresses DNSSEC validation requirements while maintaining consistency across different Kubernetes platforms.
|
||||
This document outlines the recommended architecture for deploying Mailu email services across development and production environments for the Bakery-IA project. The solution addresses DNSSEC validation requirements using CoreDNS with DNS-over-TLS while maintaining consistency across different Kubernetes platforms.
|
||||
|
||||
## Environment Overview
|
||||
|
||||
@@ -25,124 +25,76 @@ This document outlines the recommended architecture for deploying Mailu email se
|
||||
|
||||
## Architectural Solution
|
||||
|
||||
### Unified DNS Resolution Strategy
|
||||
### DNS Resolution Strategy
|
||||
|
||||
**Recommended Approach**: Deploy Unbound as a dedicated DNSSEC-validating resolver pod in both environments
|
||||
**Approach**: Use CoreDNS with DNS-over-TLS to Cloudflare (1.1.1.1) for DNSSEC validation
|
||||
|
||||
#### Benefits:
|
||||
- ✅ Leverages existing Kubernetes DNS infrastructure
|
||||
- ✅ No additional pods required (uses CoreDNS already in cluster)
|
||||
- ✅ DNSSEC validation provided by Cloudflare's DNS-over-TLS
|
||||
- ✅ Consistent behavior across dev and prod
|
||||
- ✅ Meets Mailu's DNSSEC requirements
|
||||
- ✅ Privacy-preserving (no external DNS queries)
|
||||
- ✅ Avoids rate-limiting from public DNS providers
|
||||
- ✅ Full control over DNS resolution
|
||||
- ✅ Simple and reliable
|
||||
|
||||
### Implementation Components
|
||||
|
||||
#### 1. Unbound Deployment Manifest
|
||||
#### 1. CoreDNS Configuration with DNS-over-TLS
|
||||
|
||||
```yaml
|
||||
# unbound.yaml - Cross-environment compatible
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: unbound-resolver
|
||||
namespace: mailu
|
||||
labels:
|
||||
app: unbound
|
||||
component: dns
|
||||
spec:
|
||||
replicas: 1 # Scale to 2+ in production with anti-affinity
|
||||
selector:
|
||||
matchLabels:
|
||||
app: unbound
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: unbound
|
||||
component: dns
|
||||
spec:
|
||||
containers:
|
||||
- name: unbound
|
||||
image: mvance/unbound:latest
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns-udp
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
limits:
|
||||
cpu: "300m"
|
||||
memory: "384Mi"
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["drill", "@127.0.0.1", "-p", "53", "+dnssec", "example.org"]
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["NET_BIND_SERVICE"]
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: unbound-dns
|
||||
namespace: mailu
|
||||
spec:
|
||||
selector:
|
||||
app: unbound
|
||||
ports:
|
||||
- name: dns-udp
|
||||
port: 53
|
||||
targetPort: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
targetPort: 53
|
||||
protocol: TCP
|
||||
# CoreDNS Corefile configuration for DNSSEC via DNS-over-TLS
|
||||
.:53 {
|
||||
errors
|
||||
health {
|
||||
lameduck 5s
|
||||
}
|
||||
ready
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
}
|
||||
prometheus :9153
|
||||
forward . tls://1.1.1.1 tls://1.0.0.1 {
|
||||
tls_servername cloudflare-dns.com
|
||||
health_check 5s
|
||||
}
|
||||
cache 30 {
|
||||
disable success cluster.local
|
||||
disable denial cluster.local
|
||||
}
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
```
|
||||
|
||||
#### 2. Mailu Configuration (values.yaml)
|
||||
|
||||
```yaml
|
||||
# Production-tuned Mailu configuration
|
||||
dnsPolicy: None
|
||||
dnsConfig:
|
||||
nameservers:
|
||||
- "10.152.183.x" # Replace with actual unbound service IP
|
||||
global:
|
||||
# Using Kubernetes CoreDNS for DNS resolution
|
||||
# CoreDNS is configured with DNS-over-TLS (Cloudflare) for DNSSEC validation
|
||||
custom_dns_servers: "10.152.183.10" # MicroK8s CoreDNS IP (adjust for your cluster)
|
||||
|
||||
# Component-specific DNS configuration
|
||||
# DNS configuration - use Kubernetes DNS (ClusterFirst)
|
||||
# CoreDNS provides DNSSEC validation via DNS-over-TLS to Cloudflare
|
||||
admin:
|
||||
dnsPolicy: None
|
||||
dnsConfig:
|
||||
nameservers:
|
||||
- "10.152.183.x"
|
||||
dnsPolicy: "ClusterFirst"
|
||||
|
||||
rspamd:
|
||||
dnsPolicy: None
|
||||
dnsConfig:
|
||||
nameservers:
|
||||
- "10.152.183.x"
|
||||
dnsPolicy: "ClusterFirst"
|
||||
|
||||
# Environment-specific configurations
|
||||
persistence:
|
||||
enabled: true
|
||||
# Development: use default storage class
|
||||
# Production: use microk8s-hostpath or longhorn
|
||||
storageClass: "standard"
|
||||
storageClass: "" # Use cluster default
|
||||
|
||||
replicas: 1 # Increase in production as needed
|
||||
|
||||
# Security settings
|
||||
secretKey: "generate-strong-key-here"
|
||||
|
||||
# Ingress configuration
|
||||
# Use existing Bakery-IA ingress controller
|
||||
```
|
||||
|
||||
### Environment-Specific Adaptations
|
||||
@@ -157,23 +109,21 @@ secretKey: "generate-strong-key-here"
|
||||
|
||||
**Deployment:**
|
||||
```bash
|
||||
# Apply unbound
|
||||
kubectl apply -f unbound.yaml
|
||||
|
||||
# Get unbound service IP
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n mailu -o jsonpath='{.spec.clusterIP}')
|
||||
# Get CoreDNS service IP
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
|
||||
# Deploy Mailu with dev-specific values
|
||||
helm upgrade --install mailu mailu/mailu \
|
||||
--namespace mailu \
|
||||
-f values-dev.yaml \
|
||||
--set dnsConfig.nameservers[0]=$UNBOUND_IP
|
||||
--namespace bakery-ia \
|
||||
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
||||
-f infrastructure/platform/mail/mailu-helm/dev/values.yaml \
|
||||
--set global.custom_dns_servers=$COREDNS_IP
|
||||
```
|
||||
|
||||
#### Production (MicroK8s/Ubuntu)
|
||||
|
||||
**Enhancements:**
|
||||
- Use Longhorn or OpenEBS for storage
|
||||
- Use microk8s-hostpath for storage
|
||||
- Enable monitoring and logging
|
||||
- Configure proper ingress with TLS
|
||||
- Set up backup solutions
|
||||
@@ -181,19 +131,17 @@ helm upgrade --install mailu mailu/mailu \
|
||||
**Deployment:**
|
||||
```bash
|
||||
# Enable required MicroK8s addons
|
||||
microk8s enable dns storage ingress metallb
|
||||
microk8s enable dns storage ingress
|
||||
|
||||
# Apply unbound
|
||||
kubectl apply -f unbound.yaml
|
||||
|
||||
# Get unbound service IP
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n mailu -o jsonpath='{.spec.clusterIP}')
|
||||
# Get CoreDNS service IP
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
|
||||
# Deploy Mailu with production values
|
||||
helm upgrade --install mailu mailu/mailu \
|
||||
--namespace mailu \
|
||||
-f values-prod.yaml \
|
||||
--set dnsConfig.nameservers[0]=$UNBOUND_IP
|
||||
--namespace bakery-ia \
|
||||
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
||||
-f infrastructure/platform/mail/mailu-helm/prod/values.yaml \
|
||||
--set global.custom_dns_servers=$COREDNS_IP
|
||||
```
|
||||
|
||||
## Verification Procedures
|
||||
@@ -201,52 +149,39 @@ helm upgrade --install mailu mailu/mailu \
|
||||
### DNSSEC Validation Test
|
||||
|
||||
```bash
|
||||
# From within a Mailu pod
|
||||
kubectl exec -it -n mailu deploy/mailu-admin -- bash
|
||||
# Test DNS resolution from within a Mailu pod
|
||||
kubectl exec -it -n bakery-ia deploy/mailu-admin -- bash
|
||||
|
||||
# Test DNSSEC validation
|
||||
dig @unbound-dns +short +dnssec +adflag example.org A
|
||||
# Test DNSSEC validation (via CoreDNS -> Cloudflare DNS-over-TLS)
|
||||
dig +short +dnssec +adflag example.org A
|
||||
|
||||
# Should show AD flag in response
|
||||
# Should show AD flag in response indicating DNSSEC validation
|
||||
```
|
||||
|
||||
### Service Health Checks
|
||||
|
||||
```bash
|
||||
# Check unbound service
|
||||
kubectl get pods -n mailu -l app=unbound
|
||||
kubectl logs -n mailu -l app=unbound
|
||||
# Check CoreDNS is running
|
||||
kubectl get pods -n kube-system -l k8s-app=kube-dns
|
||||
|
||||
# Check Mailu components
|
||||
kubectl get pods -n mailu
|
||||
kubectl logs -n mailu -l app.kubernetes.io/name=mailu
|
||||
kubectl get pods -n bakery-ia | grep mailu
|
||||
kubectl logs -n bakery-ia -l app.kubernetes.io/name=mailu
|
||||
```
|
||||
|
||||
## Monitoring and Maintenance
|
||||
|
||||
### Production Monitoring Setup
|
||||
|
||||
```yaml
|
||||
# Example monitoring configuration for production
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: unbound-monitor
|
||||
namespace: mailu
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: unbound
|
||||
endpoints:
|
||||
- port: dns-tcp
|
||||
interval: 30s
|
||||
path: /metrics
|
||||
```
|
||||
CoreDNS exposes Prometheus metrics on port 9153 by default. Monitor:
|
||||
- DNS query latency
|
||||
- DNS query success/failure rates
|
||||
- DNS cache hit ratio
|
||||
|
||||
### Backup Strategy
|
||||
|
||||
**Production:**
|
||||
- Daily Velero backups of Mailu namespace
|
||||
- Daily Velero backups of bakery-ia namespace
|
||||
- Weekly database dumps
|
||||
- Monthly full cluster snapshots
|
||||
|
||||
@@ -258,16 +193,21 @@ spec:
|
||||
|
||||
### Common Issues and Solutions
|
||||
|
||||
**Issue: DNSSEC validation failures**
|
||||
- Verify unbound pod logs
|
||||
- Check network policies
|
||||
- Test DNS resolution from within pods
|
||||
**Issue: DNS resolution failures**
|
||||
- Verify CoreDNS pods are running
|
||||
- Check CoreDNS logs: `kubectl logs -n kube-system -l k8s-app=kube-dns`
|
||||
- Test DNS resolution: `kubectl run -it --rm dns-test --image=busybox -- nslookup google.com`
|
||||
|
||||
**Issue: Mailu pods failing to start**
|
||||
- Confirm DNS configuration in values.yaml
|
||||
- Verify unbound service is reachable
|
||||
- Verify CoreDNS service is reachable
|
||||
- Check resource availability
|
||||
|
||||
**Issue: DNSSEC validation errors**
|
||||
- Ensure CoreDNS is configured with DNS-over-TLS
|
||||
- Test with: `dig +dnssec example.org`
|
||||
- Verify Cloudflare DNS is reachable
|
||||
|
||||
**Issue: Performance problems**
|
||||
- Monitor CPU/memory usage
|
||||
- Adjust resource limits
|
||||
@@ -327,12 +267,12 @@ spec:
|
||||
|
||||
## Conclusion
|
||||
|
||||
This architecture provides a robust, consistent solution for deploying Mailu across development and production environments. By using Unbound as a dedicated DNSSEC-validating resolver, we ensure compliance with Mailu's requirements while maintaining flexibility and reliability across different Kubernetes platforms.
|
||||
This architecture provides a robust, consistent solution for deploying Mailu across development and production environments. By using CoreDNS with DNS-over-TLS to Cloudflare, we ensure compliance with Mailu's DNSSEC requirements while maintaining simplicity and reliability.
|
||||
|
||||
The solution is designed to be:
|
||||
- **Simple**: Uses existing Kubernetes DNS infrastructure
|
||||
- **Consistent**: Same core architecture across environments
|
||||
- **Reliable**: Production-grade availability and monitoring
|
||||
- **Efficient**: Optimized resource usage
|
||||
- **Maintainable**: Clear documentation and troubleshooting guides
|
||||
- **Reliable**: Production-grade availability
|
||||
- **Efficient**: No additional pods required for DNS
|
||||
|
||||
This approach aligns with the Bakery-IA project's requirements for a secure, reliable email infrastructure that can be consistently deployed across different environments.
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
5. [Phase 2: Domain & DNS Configuration](#phase-2-domain--dns-configuration)
|
||||
6. [Phase 3: Deploy Foundation Layer](#phase-3-deploy-foundation-layer)
|
||||
7. [Phase 4: Deploy CI/CD Infrastructure](#phase-4-deploy-cicd-infrastructure)
|
||||
- [Step 4.6: Configure Gitea Webhook](#step-46-configure-gitea-webhook-for-cicd-pipeline)
|
||||
- [Step 4.7: Configure PipelineRun Cleanup](#step-47-configure-pipelinerun-cleanup-optional-but-recommended)
|
||||
8. [Phase 5: Pre-Pull and Push Base Images to Gitea Registry](#phase-5-pre-pull-and-push-base-images-to-gitea-registry)
|
||||
- [Step 5.1: Pre-Pull Base Images](#step-51-pre-pull-base-images-and-push-to-registry)
|
||||
- [Step 5.2: Verify Images in Registry](#step-52-verify-images-in-gitea-registry)
|
||||
@@ -29,6 +31,7 @@
|
||||
- [Step 5.6: Verify Service Images](#step-56-verify-all-service-images-are-available)
|
||||
9. [Phase 6: Deploy Application Services](#phase-6-deploy-application-services)
|
||||
10. [Phase 7: Deploy Optional Services](#phase-7-deploy-optional-services)
|
||||
- [Step 7.5: Deploy Kubernetes Infrastructure Monitoring](#step-75-deploy-kubernetes-infrastructure-monitoring-required-for-signoz-infrastructure-view)
|
||||
11. [Phase 8: Verification & Validation](#phase-8-verification--validation)
|
||||
12. [Post-Deployment Operations](#post-deployment-operations)
|
||||
13. [Troubleshooting Guide](#troubleshooting-guide)
|
||||
@@ -49,7 +52,7 @@ A complete multi-tenant SaaS platform consisting of:
|
||||
| **Cache** | Redis with TLS |
|
||||
| **Message Broker** | RabbitMQ |
|
||||
| **Object Storage** | MinIO (S3-compatible) |
|
||||
| **Email** | Mailu (self-hosted) with Mailgun relay |
|
||||
| **Email** | Mailu (self-hosted) with MailerSend relay |
|
||||
| **Monitoring** | SigNoz (unified observability) |
|
||||
| **CI/CD** | Gitea + Tekton + Flux CD |
|
||||
| **Security** | TLS everywhere, RBAC, Network Policies |
|
||||
@@ -71,7 +74,7 @@ A complete multi-tenant SaaS platform consisting of:
|
||||
│ PostgreSQL (18 DBs) │ Redis │ RabbitMQ │ MinIO │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ LAYER 2: NETWORK & SECURITY │
|
||||
│ Unbound DNS │ CoreDNS │ Ingress Controller │ Cert-Manager │ TLS │
|
||||
│ CoreDNS (DNS-over-TLS) │ Ingress Controller │ Cert-Manager │ TLS │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ LAYER 1: FOUNDATION │
|
||||
│ Namespaces │ Storage Classes │ RBAC │ ConfigMaps │ Secrets │
|
||||
@@ -113,7 +116,7 @@ Phase 8: Verification & Validation
|
||||
|---------|----------|-------------|
|
||||
| VPS (20GB RAM, 8 vCPU, 200GB SSD) | clouding.io | €40-80 |
|
||||
| Domain | Namecheap/Cloudflare | ~€1.25 (€15/year) |
|
||||
| Email Relay | Mailgun (free tier) | €0 |
|
||||
| Email Relay | MailerSend (free tier: 3K emails/month) | €0 |
|
||||
| SSL Certificates | Let's Encrypt | €0 |
|
||||
| DNS | Cloudflare | €0 |
|
||||
| **Total** | | **€41-81/month** |
|
||||
@@ -137,7 +140,7 @@ Phase 8: Verification & Validation
|
||||
- [ ] **VPS Provider** (clouding.io, Hetzner, DigitalOcean, etc.)
|
||||
- [ ] **Domain Registrar** (Namecheap, Cloudflare, etc.)
|
||||
- [ ] **Cloudflare Account** (recommended for DNS)
|
||||
- [ ] **Mailgun Account** (for email relay, optional)
|
||||
- [ ] **MailerSend Account** (for email relay - https://mailersend.com, optional)
|
||||
- [ ] **Stripe Account** (for payments)
|
||||
|
||||
### Local Machine Requirements
|
||||
@@ -379,7 +382,7 @@ Add these DNS records pointing to your VPS IP (`200.234.233.87`):
|
||||
| A | registry | 200.234.233.87 | Auto |
|
||||
| A | api | 200.234.233.87 | Auto |
|
||||
| MX | @ | mail.bakewise.ai | 10 |
|
||||
| TXT | @ | v=spf1 mx a -all | Auto |
|
||||
| TXT | @ | v=spf1 include:mailersend.net mx a ~all | Auto |
|
||||
| TXT | _dmarc | v=DMARC1; p=reject; rua=mailto:admin@bakewise.ai | Auto |
|
||||
|
||||
### Step 2.2: Verify DNS Propagation
|
||||
@@ -654,6 +657,138 @@ flux get sources git -n flux-system
|
||||
flux get kustomizations -n flux-system
|
||||
```
|
||||
|
||||
### Step 4.6: Configure Gitea Webhook for CI/CD Pipeline
|
||||
|
||||
> **Important:** The Tekton EventListener is exposed via an internal Kubernetes service. For Gitea (running in the same cluster) to trigger pipelines, you need to configure a webhook pointing to the EventListener service.
|
||||
|
||||
```bash
|
||||
# Get the EventListener service details
|
||||
kubectl get svc -n tekton-pipelines | grep el-bakery-ia
|
||||
|
||||
# Expected output:
|
||||
# el-bakery-ia-event-listener ClusterIP 10.x.x.x <none> 8080/TCP,9000/TCP
|
||||
```
|
||||
|
||||
**Configure Webhook in Gitea UI:**
|
||||
|
||||
1. Navigate to: `https://gitea.bakewise.ai/bakery-admin/bakery-ia/settings/hooks`
|
||||
2. Click **"Add Webhook"** → **"Gitea"**
|
||||
3. Configure the webhook:
|
||||
|
||||
| Setting | Value |
|
||||
|---------|-------|
|
||||
| **Target URL** | `http://el-bakery-ia-event-listener.tekton-pipelines.svc.cluster.local:8080` |
|
||||
| **HTTP Method** | POST |
|
||||
| **Content Type** | application/json |
|
||||
| **Secret** | (leave empty or use `$TEKTON_WEBHOOK_TOKEN` from Step 4.4) |
|
||||
| **Trigger On** | Push Events |
|
||||
| **Branch Filter** | `main` |
|
||||
| **Active** | ✅ Checked |
|
||||
|
||||
4. Click **"Add Webhook"**
|
||||
5. Click **"Test Delivery"** to verify connectivity
|
||||
|
||||
**Verify Webhook Works:**
|
||||
|
||||
```bash
|
||||
# Watch for new PipelineRuns after pushing a commit
|
||||
kubectl get pipelineruns -n tekton-pipelines --watch
|
||||
|
||||
# Or make a test push
|
||||
cd /root/bakery-ia
|
||||
git commit --allow-empty -m "Test CI/CD trigger"
|
||||
git push origin main
|
||||
|
||||
# Check if pipeline was triggered
|
||||
kubectl get pipelineruns -n tekton-pipelines
|
||||
```
|
||||
|
||||
**Alternative: External Webhook URL (if DNS issues)**
|
||||
|
||||
If Gitea cannot resolve the internal service DNS, an ingress was created for external access:
|
||||
|
||||
```bash
|
||||
# Verify EventListener ingress exists
|
||||
kubectl get ingress -n tekton-pipelines
|
||||
|
||||
# Use external URL instead:
|
||||
# Target URL: https://tekton-webhook.bakewise.ai
|
||||
```
|
||||
|
||||
> **DNS Note:** The internal URL (`http://el-bakery-ia-event-listener.tekton-pipelines.svc.cluster.local:8080`) should work for Gitea pods in the same cluster. If you encounter DNS resolution errors, use the external ingress URL.
|
||||
|
||||
### Step 4.7: Configure PipelineRun Cleanup (Optional but Recommended)
|
||||
|
||||
> **Purpose:** Completed PipelineRuns and TaskRuns accumulate over time and consume etcd storage. Configure automatic pruning to keep only recent runs.
|
||||
|
||||
**Option A: Configure Tekton Pruner (Recommended)**
|
||||
|
||||
```bash
|
||||
# Create a CronJob to prune old PipelineRuns and TaskRuns
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: tekton-resource-pruner
|
||||
namespace: tekton-pipelines
|
||||
spec:
|
||||
schedule: "0 2 * * *" # Run daily at 2 AM
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: tekton-pipeline-sa
|
||||
containers:
|
||||
- name: pruner
|
||||
image: bitnami/kubectl:latest
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "Pruning PipelineRuns older than 7 days..."
|
||||
kubectl delete pipelineruns -n tekton-pipelines \
|
||||
--field-selector=status.completionTime!="" \
|
||||
--selector='tekton.dev/pipeline=bakery-ia-ci' \
|
||||
--sort-by=.metadata.creationTimestamp \
|
||||
| head -n -5 || true
|
||||
echo "Pruning completed TaskRuns..."
|
||||
kubectl get taskruns -n tekton-pipelines \
|
||||
-o jsonpath='{range .items[?(@.status.completionTime)]}{.metadata.name}{"\n"}{end}' \
|
||||
| head -n -10 \
|
||||
| xargs -r kubectl delete taskrun -n tekton-pipelines || true
|
||||
echo "Cleanup complete"
|
||||
restartPolicy: OnFailure
|
||||
EOF
|
||||
```
|
||||
|
||||
**Option B: Manual Cleanup**
|
||||
|
||||
```bash
|
||||
# Delete all completed PipelineRuns (keep last 5)
|
||||
kubectl get pipelineruns -n tekton-pipelines --sort-by=.metadata.creationTimestamp -o name | head -n -5 | xargs -r kubectl delete -n tekton-pipelines
|
||||
|
||||
# Delete all completed TaskRuns (keep last 10)
|
||||
kubectl get taskruns -n tekton-pipelines --sort-by=.metadata.creationTimestamp -o name | head -n -10 | xargs -r kubectl delete -n tekton-pipelines
|
||||
|
||||
# Delete PipelineRuns older than 7 days
|
||||
kubectl get pipelineruns -n tekton-pipelines -o json | \
|
||||
jq -r '.items[] | select(.metadata.creationTimestamp | fromdateiso8601 < (now - 604800)) | .metadata.name' | \
|
||||
xargs -r kubectl delete pipelinerun -n tekton-pipelines
|
||||
```
|
||||
|
||||
**Check Current Resource Usage:**
|
||||
|
||||
```bash
|
||||
# Count PipelineRuns
|
||||
kubectl get pipelineruns -n tekton-pipelines --no-headers | wc -l
|
||||
|
||||
# Count TaskRuns
|
||||
kubectl get taskruns -n tekton-pipelines --no-headers | wc -l
|
||||
|
||||
# Check etcd storage (if metrics-server is enabled)
|
||||
kubectl top pods -n kube-system -l component=etcd
|
||||
```
|
||||
|
||||
|
||||
## Phase 5: Pre-Pull and Push Base Images to Gitea Registry
|
||||
|
||||
@@ -1045,38 +1180,84 @@ kubectl exec -n bakery-ia deployment/gateway -- curl -s http://localhost:8000/he
|
||||
|
||||
## Phase 7: Deploy Optional Services
|
||||
|
||||
### Step 7.1: Deploy Unbound DNS (Required for Mailu)
|
||||
### Step 7.1: Configure CoreDNS with DNS-over-TLS for DNSSEC
|
||||
|
||||
> **DNS Architecture:** CoreDNS is configured to use DNS-over-TLS with Cloudflare (1.1.1.1) for DNSSEC validation.
|
||||
> This provides DNSSEC support for Mailu without requiring additional DNS pods.
|
||||
|
||||
```bash
|
||||
# Deploy Unbound DNS resolver
|
||||
helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \
|
||||
-n bakery-ia \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/prod/values.yaml \
|
||||
--timeout 5m \
|
||||
--wait
|
||||
# Check if CoreDNS is already configured with DNS-over-TLS
|
||||
kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' | grep -o 'tls://1.1.1.1' || echo "Not configured"
|
||||
|
||||
# Get Unbound service IP
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n bakery-ia -o jsonpath='{.spec.clusterIP}')
|
||||
echo "Unbound DNS IP: $UNBOUND_IP"
|
||||
```
|
||||
# If not configured, update CoreDNS to use DNS-over-TLS with Cloudflare
|
||||
cat > /tmp/coredns-corefile.yaml << 'EOF'
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
health {
|
||||
lameduck 5s
|
||||
}
|
||||
ready
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
}
|
||||
prometheus :9153
|
||||
forward . tls://1.1.1.1 tls://1.0.0.1 {
|
||||
tls_servername cloudflare-dns.com
|
||||
health_check 5s
|
||||
}
|
||||
cache 30 {
|
||||
disable success cluster.local
|
||||
disable denial cluster.local
|
||||
}
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
EOF
|
||||
|
||||
### Step 7.2: Configure CoreDNS for DNSSEC
|
||||
kubectl apply -f /tmp/coredns-corefile.yaml
|
||||
|
||||
```bash
|
||||
# Patch CoreDNS to forward to Unbound
|
||||
kubectl patch configmap coredns -n kube-system --type merge -p "{
|
||||
\"data\": {
|
||||
\"Corefile\": \".:53 {\\n errors\\n health {\\n lameduck 5s\\n }\\n ready\\n kubernetes cluster.local in-addr.arpa ip6.arpa {\\n pods insecure\\n fallthrough in-addr.arpa ip6.arpa\\n ttl 30\\n }\\n prometheus :9153\\n forward . $UNBOUND_IP {\\n max_concurrent 1000\\n }\\n cache 30\\n loop\\n reload\\n loadbalance\\n}\\n\"
|
||||
}
|
||||
}"
|
||||
|
||||
# Restart CoreDNS
|
||||
# Restart CoreDNS to apply changes
|
||||
kubectl rollout restart deployment coredns -n kube-system
|
||||
kubectl rollout status deployment coredns -n kube-system --timeout=60s
|
||||
|
||||
# Verify CoreDNS is running
|
||||
kubectl get pods -n kube-system -l k8s-app=kube-dns
|
||||
# Expected: 1/1 Running
|
||||
|
||||
# Get CoreDNS service IP (will be used for Mailu)
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
echo "CoreDNS IP: $COREDNS_IP"
|
||||
# Save this IP - you'll need it for Step 7.2
|
||||
|
||||
# Test DNS resolution is working
|
||||
kubectl run -it --rm dns-test --image=busybox --restart=Never -- nslookup google.com
|
||||
# Expected: Should resolve google.com successfully
|
||||
```
|
||||
|
||||
### Step 7.3: Deploy Mailu Email Server
|
||||
**Troubleshooting CoreDNS:**
|
||||
|
||||
```bash
|
||||
# Check CoreDNS logs
|
||||
kubectl logs -n kube-system -l k8s-app=kube-dns
|
||||
|
||||
# Check CoreDNS configuration
|
||||
kubectl get configmap coredns -n kube-system -o yaml
|
||||
|
||||
# Verify DNS-over-TLS is working
|
||||
kubectl run -it --rm dns-test --image=busybox --restart=Never -- nslookup cloudflare.com
|
||||
```
|
||||
|
||||
### Step 7.2: Deploy Mailu Email Server
|
||||
|
||||
```bash
|
||||
# Add Mailu Helm repository
|
||||
@@ -1084,27 +1265,41 @@ helm repo add mailu https://mailu.github.io/helm-charts
|
||||
helm repo update
|
||||
|
||||
# Apply Mailu configuration secrets
|
||||
# These are pre-configured with secure defaults
|
||||
kubectl apply -f infrastructure/platform/mail/mailu-helm/configs/mailu-admin-credentials-secret.yaml -n bakery-ia
|
||||
kubectl apply -f infrastructure/platform/mail/mailu-helm/configs/mailu-certificates-secret.yaml -n bakery-ia
|
||||
|
||||
# Get CoreDNS service IP dynamically
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
echo "Using CoreDNS IP: $COREDNS_IP"
|
||||
|
||||
# Install Mailu with production configuration
|
||||
# The Helm chart uses the pre-configured secrets for admin credentials and TLS certificates
|
||||
# The --set flag dynamically passes the CoreDNS IP for DNS resolution
|
||||
# DNSSEC validation is provided by CoreDNS via DNS-over-TLS to Cloudflare
|
||||
helm upgrade --install mailu mailu/mailu \
|
||||
-n bakery-ia \
|
||||
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
||||
-f infrastructure/platform/mail/mailu-helm/prod/values.yaml \
|
||||
--set global.custom_dns_servers="$COREDNS_IP" \
|
||||
--timeout 10m
|
||||
|
||||
# Wait for Mailu to be ready
|
||||
# Wait for Mailu to be ready (may take 5-10 minutes)
|
||||
kubectl wait --for=condition=available --timeout=600s deployment/mailu-front -n bakery-ia
|
||||
|
||||
# Deploy the Mailu ingress for mail.bakewise.ai
|
||||
# Note: Use prod/mailu-ingress.yaml for production, dev/mailu-ingress.yaml for development
|
||||
kubectl apply -f infrastructure/platform/mail/mailu-helm/prod/mailu-ingress.yaml
|
||||
|
||||
# Verify Mailu ingress is created
|
||||
kubectl get ingress mailu-ingress -n bakery-ia
|
||||
|
||||
# Verify Mailu pods are running
|
||||
kubectl get pods -n bakery-ia | grep mailu
|
||||
|
||||
# Get the admin password from the pre-configured secret
|
||||
# Get the admin password
|
||||
MAILU_ADMIN_PASSWORD=$(kubectl get secret mailu-admin-credentials -n bakery-ia -o jsonpath='{.data.password}' | base64 -d)
|
||||
echo "============================================"
|
||||
echo "Mailu Admin Password: $MAILU_ADMIN_PASSWORD"
|
||||
echo "============================================"
|
||||
echo "⚠️ SAVE THIS PASSWORD SECURELY!"
|
||||
|
||||
# Check Mailu initialization status
|
||||
@@ -1113,53 +1308,60 @@ kubectl logs -n bakery-ia deployment/mailu-front --tail=10
|
||||
|
||||
> **Important Notes about Mailu Deployment:**
|
||||
>
|
||||
> 1. **Pre-Configured Secrets:** Mailu uses pre-configured secrets for admin credentials and TLS certificates. These are defined in the configuration files.
|
||||
> 1. **Pre-Configured Secrets:** Mailu uses pre-configured secrets for admin credentials and TLS certificates.
|
||||
>
|
||||
> 2. **Password Management:** The admin password is stored in `mailu-admin-credentials-secret.yaml`. For production, you should update this with a secure password before deployment.
|
||||
> 2. **Password Management:** Update `mailu-admin-credentials-secret.yaml` with a secure password before deployment.
|
||||
>
|
||||
> 3. **TLS Certificates:** The self-signed certificates in `mailu-certificates-secret.yaml` are for initial setup. For production, replace these with proper certificates from cert-manager (see Step 7.3.1).
|
||||
> 3. **TLS Certificates:** Self-signed certificates are used internally. External traffic uses Let's Encrypt via Ingress.
|
||||
>
|
||||
> 4. **Initialization Time:** Mailu may take 5-10 minutes to fully initialize. During this time, some pods may restart as the system configures itself.
|
||||
> 4. **Initialization Time:** Mailu may take 5-10 minutes to fully initialize. Pods may restart during setup.
|
||||
>
|
||||
> 5. **Accessing Mailu:**
|
||||
> - Webmail: `https://mail.bakewise.ai/webmail`
|
||||
> - Admin Interface: `https://mail.bakewise.ai/admin`
|
||||
> - Username: `admin@bakewise.ai`
|
||||
> - Password: (from `mailu-admin-credentials-secret.yaml`)
|
||||
> - Password: (from secret above)
|
||||
>
|
||||
> 6. **Mailgun Relay:** The production configuration includes Mailgun SMTP relay. Configure your Mailgun credentials in `mailu-mailgun-credentials-secret.yaml` before deployment.
|
||||
> 6. **MailerSend Relay:** Configure credentials in `mailersend-credentials-secret.yaml` before deployment.
|
||||
|
||||
### Step 7.3.1: Mailu Configuration Notes
|
||||
|
||||
> **Important Information about Mailu Certificates:**
|
||||
> **Certificate Architecture:**
|
||||
>
|
||||
> 1. **Dual Certificate Architecture:**
|
||||
> - **Internal Communication:** Uses self-signed certificates (`mailu-certificates-secret.yaml`)
|
||||
> - **External Communication:** Uses Let's Encrypt certificates via NGINX Ingress (`bakery-ia-prod-tls-cert`)
|
||||
> ```
|
||||
> External Client → NGINX Ingress (Let's Encrypt) → Internal Network → Mailu Services (Self-signed)
|
||||
> ```
|
||||
>
|
||||
> 2. **No Certificate Replacement Needed:** The self-signed certificates are only used for internal communication between Mailu services. External clients connect through the NGINX Ingress Controller which uses the publicly trusted Let's Encrypt certificates.
|
||||
>
|
||||
> 3. **Certificate Flow:**
|
||||
> ```
|
||||
> External Client → NGINX Ingress (Let's Encrypt) → Internal Network → Mailu Services (Self-signed)
|
||||
> ```
|
||||
>
|
||||
> 4. **Security:** This architecture is secure because:
|
||||
> - External connections use publicly trusted certificates
|
||||
> - Internal connections are still encrypted (even if self-signed)
|
||||
> - Ingress terminates TLS, reducing load on Mailu services
|
||||
>
|
||||
> 5. **Mailgun Relay Configuration:** For outbound email delivery, configure your Mailgun credentials:
|
||||
> ```bash
|
||||
> # Edit the Mailgun credentials secret
|
||||
> nano infrastructure/platform/mail/mailu-helm/configs/mailu-mailgun-credentials-secret.yaml
|
||||
>
|
||||
> # Apply the secret
|
||||
> kubectl apply -f infrastructure/platform/mail/mailu-helm/configs/mailu-mailgun-credentials-secret.yaml -n bakery-ia
|
||||
>
|
||||
> # Restart Mailu to pick up the new relay configuration
|
||||
> kubectl rollout restart deployment -n bakery-ia -l app.kubernetes.io/instance=mailu
|
||||
> ```
|
||||
> - **External:** Uses publicly trusted Let's Encrypt certificates via NGINX Ingress
|
||||
> - **Internal:** Uses self-signed certificates for inter-service communication
|
||||
> - **No replacement needed:** This dual-certificate architecture is intentional and secure
|
||||
|
||||
**Configure MailerSend Relay (for outbound email):**
|
||||
|
||||
```bash
|
||||
# 1. Sign up at https://accounts.mailersend.com/signup
|
||||
# 2. Add your domain (bakewise.ai) and verify DNS records
|
||||
# 3. Generate SMTP credentials: Email -> Domains -> SMTP -> Generate new user
|
||||
|
||||
# Edit the MailerSend credentials secret
|
||||
nano infrastructure/platform/mail/mailu-helm/configs/mailersend-credentials-secret.yaml
|
||||
|
||||
# Apply the secret
|
||||
kubectl apply -f infrastructure/platform/mail/mailu-helm/configs/mailersend-credentials-secret.yaml -n bakery-ia
|
||||
|
||||
# Restart Mailu to pick up the new relay configuration
|
||||
kubectl rollout restart deployment -n bakery-ia -l app.kubernetes.io/instance=mailu
|
||||
```
|
||||
|
||||
**MailerSend DNS Records (add to Cloudflare):**
|
||||
|
||||
| Type | Name | Value |
|
||||
|------|------|-------|
|
||||
| TXT (SPF) | @ | v=spf1 include:mailersend.net mx a ~all |
|
||||
| TXT (DKIM) | mlsend._domainkey | (from MailerSend dashboard) |
|
||||
| TXT (DKIM) | mlsend2._domainkey | (from MailerSend dashboard) |
|
||||
|
||||
> **Note:** MailerSend free tier includes 3,000 emails/month (12,000 with verified domain).
|
||||
|
||||
### Step 7.4: Deploy SigNoz Monitoring
|
||||
|
||||
@@ -1184,6 +1386,79 @@ kubectl wait --for=condition=available --timeout=600s deployment/signoz-frontend
|
||||
kubectl get pods -n bakery-ia -l app.kubernetes.io/instance=signoz
|
||||
```
|
||||
|
||||
### Step 7.5: Deploy Kubernetes Infrastructure Monitoring (Required for SigNoz Infrastructure View)
|
||||
|
||||
> **Purpose:** Deploy the official SigNoz k8s-infra chart to enable comprehensive Kubernetes infrastructure metrics in SigNoz. This replaces the need for separate kube-state-metrics and node-exporter deployments. ❌ Removed legacy components: kube-state-metrics and node-exporter.
|
||||
|
||||
**Components Deployed:**
|
||||
|
||||
| Component | Purpose | Metrics |
|
||||
|-----------|---------|---------|
|
||||
| **SigNoz k8s-infra** | Unified Kubernetes infrastructure monitoring | Host metrics (CPU, Memory, Disk, Network), Kubelet metrics (Pod/container usage), Cluster metrics (Deployments, Pods, Nodes), Kubernetes events |
|
||||
|
||||
**Deploy using the official SigNoz k8s-infra chart:**
|
||||
|
||||
```bash
|
||||
# Add SigNoz Helm repository (if not already added)
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
helm repo update
|
||||
|
||||
# Install the k8s-infra chart
|
||||
helm upgrade --install k8s-infra signoz/k8s-infra \
|
||||
-n bakery-ia \
|
||||
-f infrastructure/monitoring/signoz/k8s-infra-values-prod.yaml \
|
||||
--timeout 10m
|
||||
|
||||
# Wait for the DaemonSet to be ready
|
||||
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=signoz-agent -n bakery-ia --timeout=300s
|
||||
```
|
||||
|
||||
**Verify k8s-infra deployment:**
|
||||
|
||||
```bash
|
||||
# Check if the k8s-infra agent is running (should see one pod per node)
|
||||
kubectl get pods -n bakery-ia -l app.kubernetes.io/name=signoz-agent
|
||||
|
||||
# Expected output (one pod per cluster node):
|
||||
# signoz-agent-xxxxx 1/1 Running 0 1m
|
||||
# signoz-agent-yyyyy 1/1 Running 0 1m
|
||||
```
|
||||
|
||||
**Verify metrics in SigNoz:**
|
||||
|
||||
After a few minutes, you should see:
|
||||
- **Infrastructure → Kubernetes**: Pod status, deployments, nodes, PVCs
|
||||
- **Infrastructure → Hosts**: CPU, memory, disk, network usage
|
||||
|
||||
**Important Notes:**
|
||||
|
||||
1. **Legacy Components Removal:** If you previously had kube-state-metrics or node-exporter deployed, you should remove them to avoid duplicate metrics:
|
||||
```bash
|
||||
# Remove legacy components if they exist
|
||||
helm uninstall kube-state-metrics -n bakery-ia 2>/dev/null || true
|
||||
helm uninstall node-exporter-prometheus-node-exporter -n bakery-ia 2>/dev/null || true
|
||||
```
|
||||
|
||||
2. **Configuration:** The k8s-infra chart is configured via `k8s-infra-values-prod.yaml` which specifies:
|
||||
- Connection to your SigNoz OTel collector endpoint
|
||||
- Collection intervals and presets for different metric types
|
||||
- Resource limits for the monitoring agents
|
||||
|
||||
**Troubleshooting:**
|
||||
|
||||
```bash
|
||||
# Check k8s-infra agent logs
|
||||
kubectl logs -l app.kubernetes.io/name=signoz-agent -n bakery-ia --tail=50
|
||||
|
||||
# Verify the agent can connect to SigNoz collector
|
||||
kubectl logs -l app.kubernetes.io/name=signoz-agent -n bakery-ia | grep -i error
|
||||
```
|
||||
|
||||
> **Files Location:**
|
||||
> - Helm values: `infrastructure/monitoring/signoz/k8s-infra-values-prod.yaml`
|
||||
> - Helm values: `infrastructure/monitoring/signoz/k8s-infra-values-dev.yaml`
|
||||
> - Documentation: `infrastructure/monitoring/signoz/README.md`
|
||||
|
||||
---
|
||||
|
||||
## Phase 8: Verification & Validation
|
||||
@@ -1255,30 +1530,119 @@ kubectl exec -n bakery-ia deployment/redis -- redis-cli ping
|
||||
|
||||
### Configure Stripe Keys (Required Before Going Live)
|
||||
|
||||
Before accepting payments, configure your Stripe credentials:
|
||||
**IMPORTANT**: Before going live, you MUST replace test keys with live Stripe keys.
|
||||
|
||||
#### Step 1: Get Your Live Stripe Keys
|
||||
|
||||
1. Go to [Stripe Dashboard](https://dashboard.stripe.com/apikeys)
|
||||
2. Make sure you're in **Live mode** (toggle in top right)
|
||||
3. Copy your **Publishable key** (starts with `pk_live_`)
|
||||
4. Copy your **Secret key** (starts with `sk_live_`)
|
||||
5. Get your **Webhook signing secret** from Stripe webhook settings
|
||||
|
||||
#### Step 2: Update Configuration Files
|
||||
|
||||
```bash
|
||||
# Edit ConfigMap for publishable key
|
||||
# 1. Update the common configmap with your live publishable key
|
||||
nano infrastructure/environments/common/configs/configmap.yaml
|
||||
# Add: VITE_STRIPE_PUBLISHABLE_KEY: "pk_live_XXXXXXXXXXXX"
|
||||
|
||||
# Encode your secret keys
|
||||
echo -n "sk_live_XXXXXXXXXX" | base64 # Your secret key
|
||||
echo -n "whsec_XXXXXXXXXX" | base64 # Your webhook secret
|
||||
# Find and replace these lines:
|
||||
VITE_STRIPE_PUBLISHABLE_KEY: "pk_test_51QuxKyIzCdnBmAVTGM8fvXYkItrBUILz6lHYwhAva6ZAH1HRi0e8zDRgZ4X3faN0zEABp5RHjCVBmMJL3aKXbaC200fFrSNnPl"
|
||||
VITE_STRIPE_ACCOUNT_ID: "acct_1QuxKsIucMC6K1cg"
|
||||
|
||||
# Edit Secrets
|
||||
# Replace with your live key and account ID:
|
||||
VITE_STRIPE_PUBLISHABLE_KEY: "pk_live_your_publishable_key_here"
|
||||
VITE_STRIPE_ACCOUNT_ID: "acct_1QuxKsIucMC6K1cg" # Keep your account ID, just remove "test_" prefix if needed
|
||||
|
||||
# 2. Encode your live secret keys (required for Kubernetes secrets)
|
||||
echo -n "sk_live_your_secret_key_here" | base64
|
||||
# Example output: c2tfbGl2ZV95b3VyX3NlY3JldF9rZXlfaGVyZQ==
|
||||
|
||||
echo -n "whsec_your_webhook_secret_here" | base64
|
||||
# Example output: d2hzZWNfeW91cl93ZWJob29rX3NlY3JldF9oZXJl
|
||||
|
||||
# 3. Update the secrets file
|
||||
nano infrastructure/environments/common/configs/secrets.yaml
|
||||
# Add to payment-secrets section:
|
||||
# STRIPE_SECRET_KEY: <base64-encoded>
|
||||
# STRIPE_WEBHOOK_SECRET: <base64-encoded>
|
||||
|
||||
# Apply the updated configuration
|
||||
kubectl apply -k infrastructure/environments/prod/k8s-manifests
|
||||
# Find the payment-secrets section and update:
|
||||
STRIPE_SECRET_KEY: c2tfbGl2ZV95b3VyX3NlY3JldF9rZXlfaGVyZQ== # Replace with your encoded live secret key
|
||||
STRIPE_WEBHOOK_SECRET: d2hzZWNfeW91cl93ZWJob29rX3NlY3JldF9oZXJl # Replace with your encoded webhook secret
|
||||
|
||||
# Restart services that use Stripe
|
||||
kubectl rollout restart deployment/payment-service -n bakery-ia
|
||||
# 4. Update production kustomization
|
||||
nano infrastructure/environments/prod/k8s-manifests/kustomization.yaml
|
||||
|
||||
# Find and update the Stripe configuration patch:
|
||||
- op: replace
|
||||
path: /data/VITE_STRIPE_PUBLISHABLE_KEY
|
||||
value: "pk_live_your_publishable_key_here"
|
||||
- op: add
|
||||
path: /data/VITE_STRIPE_ACCOUNT_ID
|
||||
value: "acct_1QuxKsIucMC6K1cg"
|
||||
```
|
||||
|
||||
#### Step 3: Apply Configuration and Restart Services
|
||||
|
||||
```bash
|
||||
# Apply the updated configuration
|
||||
kubectl apply -k infrastructure/environments/prod/k8s-manifests/
|
||||
|
||||
# Restart services that use Stripe (order matters)
|
||||
kubectl rollout restart deployment/tenant-service -n bakery-ia
|
||||
kubectl rollout restart deployment/gateway -n bakery-ia
|
||||
kubectl rollout restart deployment/frontend -n bakery-ia
|
||||
|
||||
# Monitor the restart process
|
||||
kubectl get pods -n bakery-ia -w
|
||||
```
|
||||
|
||||
#### Step 4: Verify Stripe Configuration
|
||||
|
||||
```bash
|
||||
# Check that the configmap was updated correctly
|
||||
kubectl get configmap bakery-config -n bakery-ia -o yaml | grep STRIPE
|
||||
|
||||
# Check that secrets are properly encoded
|
||||
kubectl get secret payment-secrets -n bakery-ia -o yaml | grep STRIPE
|
||||
|
||||
# Test a small payment (€1.00) with a real card
|
||||
# Use Stripe test cards first: 4242 4242 4242 4242
|
||||
```
|
||||
|
||||
#### Step 5: Update Stripe Webhooks (Critical)
|
||||
|
||||
```bash
|
||||
# 1. Update your Stripe webhook endpoint to use the live URL:
|
||||
# https://bakewise.ai/api/webhooks/stripe
|
||||
|
||||
# 2. Update the webhook signing secret in Stripe dashboard
|
||||
# to match what you configured in secrets.yaml
|
||||
|
||||
# 3. Test webhooks:
|
||||
stripe trigger payment_intent.succeeded
|
||||
stripe trigger invoice.paid
|
||||
```
|
||||
|
||||
#### Step 6: PCI Compliance Checklist
|
||||
|
||||
Before going live, ensure:
|
||||
- [ ] All payment pages use HTTPS (check your ingress TLS configuration)
|
||||
- [ ] No card data is logged or stored in your databases
|
||||
- [ ] Your server meets PCI DSS requirements
|
||||
- [ ] You have a vulnerability management process
|
||||
- [ ] Regular security audits are scheduled
|
||||
|
||||
#### Step 7: Go Live Checklist
|
||||
|
||||
- [ ] Stripe live keys configured in all services
|
||||
- [ ] Webhooks tested and working
|
||||
- [ ] PCI compliance verified
|
||||
- [ ] Test payments successful in live mode
|
||||
- [ ] Refund process tested
|
||||
- [ ] Customer support ready for payment issues
|
||||
- [ ] Monitoring set up for payment failures
|
||||
|
||||
**WARNING**: Once you switch to live keys, real money will be processed. Start with small test transactions and monitor closely.
|
||||
|
||||
### Backup Strategy
|
||||
|
||||
```bash
|
||||
@@ -1457,3 +1821,79 @@ This guide provides a complete, step-by-step process for deploying Bakery-IA to
|
||||
4. **Scalable:** Designed for 10-100+ tenants with clear scaling path
|
||||
|
||||
For questions or issues, refer to the troubleshooting guide or consult the support resources listed above.
|
||||
|
||||
### Email System Configuration
|
||||
|
||||
#### Setting Up email-secrets Properly
|
||||
|
||||
**Important:** The `email-secrets` must be configured to use the Mailu admin account credentials for proper email functionality.
|
||||
|
||||
**Recommended Approach:**
|
||||
|
||||
1. **Use Mailu Admin Account** (instead of creating separate postmaster account):
|
||||
|
||||
```bash
|
||||
# Get the admin password from mailu-admin-credentials
|
||||
ADMIN_PASSWORD=$(kubectl get secret mailu-admin-credentials -n bakery-ia -o jsonpath='{.data.password}' | base64 -d)
|
||||
|
||||
# Update email-secrets to use admin account
|
||||
kubectl edit secret email-secrets -n bakery-ia
|
||||
|
||||
# Change the values to:
|
||||
# SMTP_USER: admin@bakewise.ai
|
||||
# SMTP_PASSWORD: [the admin password you retrieved]
|
||||
```
|
||||
|
||||
2. **Alternative: Create Postmaster Account** (if you prefer separate accounts):
|
||||
|
||||
```bash
|
||||
# Log in to Mailu admin panel
|
||||
# URL: https://mail.bakewise.ai/admin
|
||||
# Username: admin@bakewise.ai
|
||||
# Password: [from mailu-admin-credentials]
|
||||
|
||||
# Navigate to Users -> Create New User
|
||||
# Email: postmaster@bakewise.ai
|
||||
# Password: [generate secure password]
|
||||
# Role: Admin (or create custom role with email sending permissions)
|
||||
|
||||
# Update email-secrets with the postmaster credentials
|
||||
kubectl edit secret email-secrets -n bakery-ia
|
||||
```
|
||||
|
||||
**Verifying Email Configuration:**
|
||||
|
||||
```bash
|
||||
# Test email sending via notification service
|
||||
kubectl exec -n bakery-ia deployment/notification-service -it -- bash
|
||||
|
||||
# Inside the container:
|
||||
python -c "
|
||||
from app.services.email_service import EmailService
|
||||
from app.core.config import settings
|
||||
es = EmailService()
|
||||
print('Testing email service...')
|
||||
result = await es.health_check()
|
||||
print(f'Email service healthy: {result}')
|
||||
"
|
||||
```
|
||||
|
||||
**Troubleshooting Email Issues:**
|
||||
|
||||
```bash
|
||||
# Check Mailu logs
|
||||
kubectl logs -n bakery-ia deployment/mailu-postfix | tail -50
|
||||
|
||||
# Check notification service logs
|
||||
kubectl logs -n bakery-ia deployment/notification-service | grep -i email | tail -20
|
||||
|
||||
# Test SMTP connection manually
|
||||
kubectl run -it --rm smtp-test --image=alpine --
|
||||
apk add openssl &&
|
||||
openssl s_client -connect mailu-postfix:587 -starttls smtp
|
||||
```
|
||||
|
||||
**DOVEADM_PASSWORD Note:**
|
||||
- This is for IMAP administration (rarely used)
|
||||
- Only needed if you require advanced mailbox management
|
||||
- Can be safely removed if not using IMAP admin features
|
||||
|
||||
241
Tiltfile
241
Tiltfile
@@ -46,6 +46,10 @@ if use_dockerhub:
|
||||
base_registry = 'docker.io'
|
||||
python_image = 'python:3.11-slim'
|
||||
|
||||
# Git commit hash for migration job names (extracted from manifest to match CI/CD updates)
|
||||
# We read from a manifest file rather than git HEAD because CI/CD commits may not be checked out locally
|
||||
git_commit_short = str(local("sed -n 's/.*name: auth-migration-\\([a-f0-9]*\\).*/\\1/p' infrastructure/services/microservices/auth/migrations/auth-migration-job.yaml | head -1", quiet=True)).strip()
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# PREPULL BASE IMAGES - RUNS AFTER SECURITY SETUP
|
||||
@@ -728,100 +732,20 @@ k8s_resource('rabbitmq', resource_deps=['security-setup'], labels=['01-infrastru
|
||||
k8s_resource('minio', resource_deps=['security-setup'], labels=['01-infrastructure'])
|
||||
k8s_resource('minio-bucket-init', resource_deps=['minio'], labels=['01-infrastructure'])
|
||||
|
||||
# Unbound DNSSEC Resolver - Infrastructure component for Mailu DNS validation
|
||||
# CoreDNS DNSSEC Configuration - Infrastructure component for Mailu DNS validation
|
||||
local_resource(
|
||||
'unbound-helm',
|
||||
'coredns-dnssec',
|
||||
cmd='''
|
||||
echo "Deploying Unbound DNS resolver via Helm..."
|
||||
echo "Configuring CoreDNS with DNS-over-TLS for DNSSEC validation..."
|
||||
echo ""
|
||||
|
||||
# Check if Unbound is already deployed
|
||||
if helm list -n bakery-ia | grep -q unbound; then
|
||||
echo "Unbound already deployed, checking status..."
|
||||
helm status unbound -n bakery-ia
|
||||
else
|
||||
echo "Installing Unbound..."
|
||||
# Check if CoreDNS is already configured with DNS-over-TLS
|
||||
CURRENT_FORWARD=$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' 2>/dev/null | grep -o 'tls://1.1.1.1' || echo "")
|
||||
|
||||
# Determine environment (dev or prod) based on context
|
||||
ENVIRONMENT="dev"
|
||||
if [[ "$(kubectl config current-context)" == *"prod"* ]]; then
|
||||
ENVIRONMENT="prod"
|
||||
fi
|
||||
if [ -z "$CURRENT_FORWARD" ]; then
|
||||
echo "Updating CoreDNS to use DNS-over-TLS with Cloudflare..."
|
||||
|
||||
echo "Environment detected: $ENVIRONMENT"
|
||||
|
||||
# Install Unbound with appropriate values
|
||||
if [ "$ENVIRONMENT" = "dev" ]; then
|
||||
helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \
|
||||
-n bakery-ia \
|
||||
--create-namespace \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/dev/values.yaml \
|
||||
--timeout 5m \
|
||||
--wait
|
||||
else
|
||||
helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \
|
||||
-n bakery-ia \
|
||||
--create-namespace \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/prod/values.yaml \
|
||||
--timeout 5m \
|
||||
--wait
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Unbound deployment completed"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Unbound DNS Service Information:"
|
||||
echo " Service Name: unbound-dns.bakery-ia.svc.cluster.local"
|
||||
echo " Ports: UDP/TCP 53"
|
||||
echo " Used by: Mailu for DNS validation"
|
||||
echo ""
|
||||
echo "To check pod status: kubectl get pods -n bakery-ia | grep unbound"
|
||||
''',
|
||||
resource_deps=['security-setup'],
|
||||
labels=['01-infrastructure'],
|
||||
auto_init=True # Auto-deploy with Tilt startup
|
||||
)
|
||||
|
||||
# Mail Infrastructure (Mailu) - Manual trigger for Helm deployment
|
||||
local_resource(
|
||||
'mailu-helm',
|
||||
cmd='''
|
||||
echo "Deploying Mailu via Helm..."
|
||||
echo ""
|
||||
|
||||
# =====================================================
|
||||
# Step 1: Ensure Unbound is deployed and get its IP
|
||||
# =====================================================
|
||||
echo "Checking Unbound DNS resolver..."
|
||||
if ! kubectl get svc unbound-dns -n bakery-ia &>/dev/null; then
|
||||
echo "ERROR: Unbound DNS service not found!"
|
||||
echo "Please deploy Unbound first by triggering 'unbound-helm' resource"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n bakery-ia -o jsonpath='{.spec.clusterIP}')
|
||||
echo "Unbound DNS service IP: $UNBOUND_IP"
|
||||
|
||||
# =====================================================
|
||||
# Step 2: Configure CoreDNS to forward to Unbound
|
||||
# =====================================================
|
||||
echo ""
|
||||
echo "Configuring CoreDNS to forward external queries to Unbound for DNSSEC validation..."
|
||||
|
||||
# Check current CoreDNS forward configuration
|
||||
CURRENT_FORWARD=$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' | grep -o 'forward \\. [0-9.]*' | awk '{print $3}')
|
||||
|
||||
if [ "$CURRENT_FORWARD" != "$UNBOUND_IP" ]; then
|
||||
echo "Updating CoreDNS to forward to Unbound ($UNBOUND_IP)..."
|
||||
|
||||
# Change to project root to ensure correct file paths
|
||||
cd /Users/urtzialfaro/Documents/bakery-ia
|
||||
|
||||
# Create a temporary Corefile with the forwarding configuration
|
||||
# Create a temporary Corefile with the DNS-over-TLS configuration
|
||||
TEMP_COREFILE=$(mktemp)
|
||||
cat > "$TEMP_COREFILE" << EOF
|
||||
.:53 {
|
||||
@@ -836,8 +760,9 @@ local_resource(
|
||||
ttl 30
|
||||
}
|
||||
prometheus :9153
|
||||
forward . $UNBOUND_IP {
|
||||
max_concurrent 1000
|
||||
forward . tls://1.1.1.1 tls://1.0.0.1 {
|
||||
tls_servername cloudflare-dns.com
|
||||
health_check 5s
|
||||
}
|
||||
cache 30 {
|
||||
disable success cluster.local
|
||||
@@ -871,13 +796,44 @@ EOF
|
||||
kubectl rollout restart deployment coredns -n kube-system
|
||||
echo "Waiting for CoreDNS to restart..."
|
||||
kubectl rollout status deployment coredns -n kube-system --timeout=60s
|
||||
echo "CoreDNS configured successfully"
|
||||
echo "CoreDNS configured successfully with DNS-over-TLS"
|
||||
else
|
||||
echo "CoreDNS already configured to forward to Unbound"
|
||||
echo "CoreDNS already configured with DNS-over-TLS"
|
||||
fi
|
||||
|
||||
# Get CoreDNS service IP
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
|
||||
echo ""
|
||||
echo "CoreDNS DNSSEC Configuration:"
|
||||
echo " CoreDNS IP: $COREDNS_IP"
|
||||
echo " Upstream: Cloudflare DNS-over-TLS (1.1.1.1, 1.0.0.1)"
|
||||
echo " DNSSEC: Validated by Cloudflare"
|
||||
echo " Used by: Mailu for DNS validation"
|
||||
echo ""
|
||||
echo "To check CoreDNS status: kubectl get pods -n kube-system -l k8s-app=kube-dns"
|
||||
''',
|
||||
resource_deps=['security-setup'],
|
||||
labels=['01-infrastructure'],
|
||||
auto_init=True # Auto-deploy with Tilt startup
|
||||
)
|
||||
|
||||
# Mail Infrastructure (Mailu) - Manual trigger for Helm deployment
|
||||
local_resource(
|
||||
'mailu-helm',
|
||||
cmd='''
|
||||
echo "Deploying Mailu via Helm..."
|
||||
echo ""
|
||||
|
||||
# =====================================================
|
||||
# Step 3: Create self-signed TLS certificate for Mailu Front
|
||||
# Step 1: Get CoreDNS service IP
|
||||
# =====================================================
|
||||
echo "Getting CoreDNS service IP..."
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
echo "CoreDNS service IP: $COREDNS_IP"
|
||||
|
||||
# =====================================================
|
||||
# Step 2: Create self-signed TLS certificate for Mailu Front
|
||||
# =====================================================
|
||||
echo ""
|
||||
echo "Checking Mailu TLS certificates..."
|
||||
@@ -905,7 +861,7 @@ EOF
|
||||
fi
|
||||
|
||||
# =====================================================
|
||||
# Step 4: Deploy Mailu via Helm
|
||||
# Step 3: Deploy Mailu via Helm
|
||||
# =====================================================
|
||||
echo ""
|
||||
|
||||
@@ -938,6 +894,7 @@ EOF
|
||||
--create-namespace \
|
||||
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
||||
-f infrastructure/platform/mail/mailu-helm/dev/values.yaml \
|
||||
--set global.custom_dns_servers="$COREDNS_IP" \
|
||||
--timeout 10m
|
||||
else
|
||||
helm upgrade --install mailu mailu/mailu \
|
||||
@@ -945,6 +902,7 @@ EOF
|
||||
--create-namespace \
|
||||
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
||||
-f infrastructure/platform/mail/mailu-helm/prod/values.yaml \
|
||||
--set global.custom_dns_servers="$COREDNS_IP" \
|
||||
--timeout 10m
|
||||
fi
|
||||
|
||||
@@ -953,7 +911,7 @@ EOF
|
||||
fi
|
||||
|
||||
# =====================================================
|
||||
# Step 5: Apply Mailu Ingress
|
||||
# Step 4: Apply Mailu Ingress
|
||||
# =====================================================
|
||||
echo ""
|
||||
echo "Applying Mailu ingress configuration..."
|
||||
@@ -962,7 +920,7 @@ EOF
|
||||
echo "Mailu ingress applied for mail.bakery-ia.dev"
|
||||
|
||||
# =====================================================
|
||||
# Step 6: Wait for pods and show status
|
||||
# Step 5: Wait for pods and show status
|
||||
# =====================================================
|
||||
echo ""
|
||||
echo "Waiting for Mailu pods to be ready..."
|
||||
@@ -975,16 +933,20 @@ EOF
|
||||
echo ""
|
||||
echo "Mailu Access Information:"
|
||||
echo " Admin Panel: https://mail.bakery-ia.dev/admin"
|
||||
echo " Webmail: https://mail.bakery-ia.ldev/webmail"
|
||||
echo " Webmail: https://mail.bakery-ia.dev/webmail"
|
||||
echo " SMTP: mail.bakery-ia.dev:587 (STARTTLS)"
|
||||
echo " IMAP: mail.bakery-ia.dev:993 (SSL/TLS)"
|
||||
echo ""
|
||||
echo "DNS Configuration:"
|
||||
echo " CoreDNS IP: $COREDNS_IP"
|
||||
echo " DNSSEC: Provided via DNS-over-TLS (Cloudflare)"
|
||||
echo ""
|
||||
echo "To create admin user:"
|
||||
echo " Admin user created automatically via initialAccount feature in Helm values"
|
||||
echo ""
|
||||
echo "To check pod status: kubectl get pods -n bakery-ia | grep mailu"
|
||||
''',
|
||||
resource_deps=['unbound-helm'], # Ensure Unbound is deployed first
|
||||
resource_deps=['coredns-dnssec'], # Ensure CoreDNS DNSSEC is configured first
|
||||
labels=['01-infrastructure'],
|
||||
auto_init=False, # Manual trigger only
|
||||
)
|
||||
@@ -1231,79 +1193,80 @@ k8s_resource('demo-session-db', resource_deps=['security-setup'], labels=['06-da
|
||||
# =============================================================================
|
||||
# MIGRATION JOBS
|
||||
# =============================================================================
|
||||
# Migration job names include git commit hash (set by CI/CD in manifests)
|
||||
|
||||
# Core Service Migrations
|
||||
k8s_resource('auth-migration', resource_deps=['auth-db'], labels=['07-migrations'])
|
||||
k8s_resource('tenant-migration', resource_deps=['tenant-db'], labels=['07-migrations'])
|
||||
k8s_resource('auth-migration-' + git_commit_short, resource_deps=['auth-db'], labels=['07-migrations'])
|
||||
k8s_resource('tenant-migration-' + git_commit_short, resource_deps=['tenant-db'], labels=['07-migrations'])
|
||||
|
||||
# Data & Analytics Migrations
|
||||
k8s_resource('training-migration', resource_deps=['training-db'], labels=['07-migrations'])
|
||||
k8s_resource('forecasting-migration', resource_deps=['forecasting-db'], labels=['07-migrations'])
|
||||
k8s_resource('ai-insights-migration', resource_deps=['ai-insights-db'], labels=['07-migrations'])
|
||||
k8s_resource('training-migration-' + git_commit_short, resource_deps=['training-db'], labels=['07-migrations'])
|
||||
k8s_resource('forecasting-migration-' + git_commit_short, resource_deps=['forecasting-db'], labels=['07-migrations'])
|
||||
k8s_resource('ai-insights-migration-' + git_commit_short, resource_deps=['ai-insights-db'], labels=['07-migrations'])
|
||||
|
||||
# Operations Migrations
|
||||
k8s_resource('sales-migration', resource_deps=['sales-db'], labels=['07-migrations'])
|
||||
k8s_resource('inventory-migration', resource_deps=['inventory-db'], labels=['07-migrations'])
|
||||
k8s_resource('production-migration', resource_deps=['production-db'], labels=['07-migrations'])
|
||||
k8s_resource('procurement-migration', resource_deps=['procurement-db'], labels=['07-migrations'])
|
||||
k8s_resource('distribution-migration', resource_deps=['distribution-db'], labels=['07-migrations'])
|
||||
k8s_resource('sales-migration-' + git_commit_short, resource_deps=['sales-db'], labels=['07-migrations'])
|
||||
k8s_resource('inventory-migration-' + git_commit_short, resource_deps=['inventory-db'], labels=['07-migrations'])
|
||||
k8s_resource('production-migration-' + git_commit_short, resource_deps=['production-db'], labels=['07-migrations'])
|
||||
k8s_resource('procurement-migration-' + git_commit_short, resource_deps=['procurement-db'], labels=['07-migrations'])
|
||||
k8s_resource('distribution-migration-' + git_commit_short, resource_deps=['distribution-db'], labels=['07-migrations'])
|
||||
|
||||
# Supporting Service Migrations
|
||||
k8s_resource('recipes-migration', resource_deps=['recipes-db'], labels=['07-migrations'])
|
||||
k8s_resource('suppliers-migration', resource_deps=['suppliers-db'], labels=['07-migrations'])
|
||||
k8s_resource('pos-migration', resource_deps=['pos-db'], labels=['07-migrations'])
|
||||
k8s_resource('orders-migration', resource_deps=['orders-db'], labels=['07-migrations'])
|
||||
k8s_resource('external-migration', resource_deps=['external-db'], labels=['07-migrations'])
|
||||
k8s_resource('recipes-migration-' + git_commit_short, resource_deps=['recipes-db'], labels=['07-migrations'])
|
||||
k8s_resource('suppliers-migration-' + git_commit_short, resource_deps=['suppliers-db'], labels=['07-migrations'])
|
||||
k8s_resource('pos-migration-' + git_commit_short, resource_deps=['pos-db'], labels=['07-migrations'])
|
||||
k8s_resource('orders-migration-' + git_commit_short, resource_deps=['orders-db'], labels=['07-migrations'])
|
||||
k8s_resource('external-migration-' + git_commit_short, resource_deps=['external-db'], labels=['07-migrations'])
|
||||
|
||||
# Platform Service Migrations
|
||||
k8s_resource('notification-migration', resource_deps=['notification-db'], labels=['07-migrations'])
|
||||
k8s_resource('alert-processor-migration', resource_deps=['alert-processor-db'], labels=['07-migrations'])
|
||||
k8s_resource('orchestrator-migration', resource_deps=['orchestrator-db'], labels=['07-migrations'])
|
||||
k8s_resource('notification-migration-' + git_commit_short, resource_deps=['notification-db'], labels=['07-migrations'])
|
||||
k8s_resource('alert-processor-migration-' + git_commit_short, resource_deps=['alert-processor-db'], labels=['07-migrations'])
|
||||
k8s_resource('orchestrator-migration-' + git_commit_short, resource_deps=['orchestrator-db'], labels=['07-migrations'])
|
||||
|
||||
# Demo Service Migrations
|
||||
k8s_resource('demo-session-migration', resource_deps=['demo-session-db'], labels=['07-migrations'])
|
||||
k8s_resource('demo-session-migration-' + git_commit_short, resource_deps=['demo-session-db'], labels=['07-migrations'])
|
||||
|
||||
# =============================================================================
|
||||
# DATA INITIALIZATION JOBS
|
||||
# =============================================================================
|
||||
|
||||
k8s_resource('external-data-init', resource_deps=['external-migration', 'redis'], labels=['08-data-init'])
|
||||
k8s_resource('external-data-init-' + git_commit_short, resource_deps=['external-migration-' + git_commit_short, 'redis'], labels=['08-data-init'])
|
||||
|
||||
# =============================================================================
|
||||
# APPLICATION SERVICES
|
||||
# =============================================================================
|
||||
|
||||
# Core Services
|
||||
k8s_resource('auth-service', resource_deps=['auth-migration', 'redis'], labels=['09-services-core'])
|
||||
k8s_resource('tenant-service', resource_deps=['tenant-migration', 'redis'], labels=['09-services-core'])
|
||||
k8s_resource('auth-service', resource_deps=['auth-migration-' + git_commit_short, 'redis'], labels=['09-services-core'])
|
||||
k8s_resource('tenant-service', resource_deps=['tenant-migration-' + git_commit_short, 'redis'], labels=['09-services-core'])
|
||||
|
||||
# Data & Analytics Services
|
||||
k8s_resource('training-service', resource_deps=['training-migration', 'redis'], labels=['10-services-analytics'])
|
||||
k8s_resource('forecasting-service', resource_deps=['forecasting-migration', 'redis'], labels=['10-services-analytics'])
|
||||
k8s_resource('ai-insights-service', resource_deps=['ai-insights-migration', 'redis', 'forecasting-service', 'production-service', 'procurement-service'], labels=['10-services-analytics'])
|
||||
k8s_resource('training-service', resource_deps=['training-migration-' + git_commit_short, 'redis'], labels=['10-services-analytics'])
|
||||
k8s_resource('forecasting-service', resource_deps=['forecasting-migration-' + git_commit_short, 'redis'], labels=['10-services-analytics'])
|
||||
k8s_resource('ai-insights-service', resource_deps=['ai-insights-migration-' + git_commit_short, 'redis', 'forecasting-service', 'production-service', 'procurement-service'], labels=['10-services-analytics'])
|
||||
|
||||
# Operations Services
|
||||
k8s_resource('sales-service', resource_deps=['sales-migration', 'redis'], labels=['11-services-operations'])
|
||||
k8s_resource('inventory-service', resource_deps=['inventory-migration', 'redis'], labels=['11-services-operations'])
|
||||
k8s_resource('production-service', resource_deps=['production-migration', 'redis'], labels=['11-services-operations'])
|
||||
k8s_resource('procurement-service', resource_deps=['procurement-migration', 'redis'], labels=['11-services-operations'])
|
||||
k8s_resource('distribution-service', resource_deps=['distribution-migration', 'redis', 'rabbitmq'], labels=['11-services-operations'])
|
||||
k8s_resource('sales-service', resource_deps=['sales-migration-' + git_commit_short, 'redis'], labels=['11-services-operations'])
|
||||
k8s_resource('inventory-service', resource_deps=['inventory-migration-' + git_commit_short, 'redis'], labels=['11-services-operations'])
|
||||
k8s_resource('production-service', resource_deps=['production-migration-' + git_commit_short, 'redis'], labels=['11-services-operations'])
|
||||
k8s_resource('procurement-service', resource_deps=['procurement-migration-' + git_commit_short, 'redis'], labels=['11-services-operations'])
|
||||
k8s_resource('distribution-service', resource_deps=['distribution-migration-' + git_commit_short, 'redis', 'rabbitmq'], labels=['11-services-operations'])
|
||||
|
||||
# Supporting Services
|
||||
k8s_resource('recipes-service', resource_deps=['recipes-migration', 'redis'], labels=['12-services-supporting'])
|
||||
k8s_resource('suppliers-service', resource_deps=['suppliers-migration', 'redis'], labels=['12-services-supporting'])
|
||||
k8s_resource('pos-service', resource_deps=['pos-migration', 'redis'], labels=['12-services-supporting'])
|
||||
k8s_resource('orders-service', resource_deps=['orders-migration', 'redis'], labels=['12-services-supporting'])
|
||||
k8s_resource('external-service', resource_deps=['external-migration', 'external-data-init', 'redis'], labels=['12-services-supporting'])
|
||||
k8s_resource('recipes-service', resource_deps=['recipes-migration-' + git_commit_short, 'redis'], labels=['12-services-supporting'])
|
||||
k8s_resource('suppliers-service', resource_deps=['suppliers-migration-' + git_commit_short, 'redis'], labels=['12-services-supporting'])
|
||||
k8s_resource('pos-service', resource_deps=['pos-migration-' + git_commit_short, 'redis'], labels=['12-services-supporting'])
|
||||
k8s_resource('orders-service', resource_deps=['orders-migration-' + git_commit_short, 'redis'], labels=['12-services-supporting'])
|
||||
k8s_resource('external-service', resource_deps=['external-migration-' + git_commit_short, 'external-data-init-' + git_commit_short, 'redis'], labels=['12-services-supporting'])
|
||||
|
||||
# Platform Services
|
||||
k8s_resource('notification-service', resource_deps=['notification-migration', 'redis', 'rabbitmq'], labels=['13-services-platform'])
|
||||
k8s_resource('alert-processor', resource_deps=['alert-processor-migration', 'redis', 'rabbitmq'], labels=['13-services-platform'])
|
||||
k8s_resource('orchestrator-service', resource_deps=['orchestrator-migration', 'redis'], labels=['13-services-platform'])
|
||||
k8s_resource('notification-service', resource_deps=['notification-migration-' + git_commit_short, 'redis', 'rabbitmq'], labels=['13-services-platform'])
|
||||
k8s_resource('alert-processor', resource_deps=['alert-processor-migration-' + git_commit_short, 'redis', 'rabbitmq'], labels=['13-services-platform'])
|
||||
k8s_resource('orchestrator-service', resource_deps=['orchestrator-migration-' + git_commit_short, 'redis'], labels=['13-services-platform'])
|
||||
|
||||
# Demo Services
|
||||
k8s_resource('demo-session-service', resource_deps=['demo-session-migration', 'redis'], labels=['14-services-demo'])
|
||||
k8s_resource('demo-cleanup-worker', resource_deps=['demo-session-service', 'redis'], labels=['14-services-demo'])
|
||||
k8s_resource('demo-session-service', resource_deps=['demo-session-migration-' + git_commit_short, 'redis'], labels=['14-services-demo'])
|
||||
k8s_resource('demo-cleanup-worker-' + git_commit_short, resource_deps=['demo-session-service', 'redis'], labels=['14-services-demo'])
|
||||
|
||||
# =============================================================================
|
||||
# FRONTEND & GATEWAY
|
||||
@@ -1317,7 +1280,7 @@ k8s_resource('frontend', resource_deps=['gateway'], labels=['15-frontend'])
|
||||
# =============================================================================
|
||||
|
||||
k8s_resource('demo-session-cleanup', resource_deps=['demo-session-service'], labels=['16-cronjobs'])
|
||||
k8s_resource('external-data-rotation', resource_deps=['external-service'], labels=['16-cronjobs'])
|
||||
k8s_resource('external-data-rotation-' + git_commit_short, resource_deps=['external-service'], labels=['16-cronjobs'])
|
||||
|
||||
# =============================================================================
|
||||
# WATCH SETTINGS
|
||||
|
||||
@@ -91,7 +91,7 @@ The Bakery-IA platform is organized into distinct infrastructure layers, each wi
|
||||
│ PostgreSQL (18 DBs) │ Redis │ RabbitMQ │ MinIO │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ LAYER 2: NETWORK & SECURITY │
|
||||
│ Unbound DNS │ CoreDNS │ Ingress Controller │ Cert-Manager │ TLS │
|
||||
│ CoreDNS (DNS-over-TLS) │ Ingress Controller │ Cert-Manager │ TLS │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ LAYER 1: FOUNDATION │
|
||||
│ Namespaces │ Storage Classes │ RBAC │ ConfigMaps │ Secrets │
|
||||
@@ -112,11 +112,9 @@ Components must be deployed in a specific order due to dependencies:
|
||||
↓
|
||||
3. TLS Certificates (internal + ingress)
|
||||
↓
|
||||
4. Unbound DNS Resolver (required for Mailu DNSSEC)
|
||||
4. CoreDNS Configuration (DNS-over-TLS for DNSSEC)
|
||||
↓
|
||||
5. CoreDNS Configuration (forward to Unbound)
|
||||
↓
|
||||
6. Ingress Controller & Resources
|
||||
5. Ingress Controller & Resources
|
||||
↓
|
||||
7. Data Layer: PostgreSQL, Redis, RabbitMQ, MinIO
|
||||
↓
|
||||
@@ -146,7 +144,6 @@ Components must be deployed in a specific order due to dependencies:
|
||||
| **Redis** | Caching & sessions | Yes | bakery-ia |
|
||||
| **RabbitMQ** | Message broker | Yes | bakery-ia |
|
||||
| **MinIO** | Object storage (ML models) | Yes | bakery-ia |
|
||||
| **Unbound DNS** | DNSSEC resolver | For Mailu | bakery-ia |
|
||||
| **Mailu** | Self-hosted email server | Optional | bakery-ia |
|
||||
| **Nominatim** | Geocoding service | Optional | bakery-ia |
|
||||
| **Gitea** | Git server + container registry | Optional | gitea |
|
||||
@@ -945,9 +942,8 @@ SMTP_PORT: 587
|
||||
#### Prerequisites
|
||||
|
||||
Before deploying Mailu, ensure:
|
||||
1. **Unbound DNS is deployed** (for DNSSEC validation)
|
||||
2. **CoreDNS is configured** to forward to Unbound
|
||||
3. **DNS records are configured** for your domain
|
||||
1. **CoreDNS is configured** with DNS-over-TLS for DNSSEC validation
|
||||
2. **DNS records are configured** for your domain
|
||||
|
||||
#### Step 1: Configure DNS Records
|
||||
|
||||
@@ -963,55 +959,64 @@ TXT _dmarc v=DMARC1; p=reject; rua=... Auto
|
||||
|
||||
**DKIM record** will be generated after Mailu is running - you'll add it later.
|
||||
|
||||
#### Step 2: Deploy Unbound DNS Resolver
|
||||
#### Step 2: Configure CoreDNS for DNSSEC (DNS-over-TLS)
|
||||
|
||||
Unbound provides DNSSEC validation required by Mailu for email authentication.
|
||||
Mailu requires DNSSEC validation. Configure CoreDNS to use DNS-over-TLS with Cloudflare:
|
||||
|
||||
```bash
|
||||
# On VPS - Deploy Unbound via Helm
|
||||
helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \
|
||||
-n bakery-ia \
|
||||
--create-namespace \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/prod/values.yaml \
|
||||
--timeout 5m \
|
||||
--wait
|
||||
# Check if CoreDNS is already configured with DNS-over-TLS
|
||||
kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' | grep -o 'tls://1.1.1.1' || echo "Not configured"
|
||||
|
||||
# Verify Unbound is running
|
||||
kubectl get pods -n bakery-ia | grep unbound
|
||||
# Should show: unbound-xxx 1/1 Running
|
||||
# If not configured, update CoreDNS
|
||||
cat > /tmp/coredns-corefile.yaml << 'EOF'
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
health {
|
||||
lameduck 5s
|
||||
}
|
||||
ready
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
}
|
||||
prometheus :9153
|
||||
forward . tls://1.1.1.1 tls://1.0.0.1 {
|
||||
tls_servername cloudflare-dns.com
|
||||
health_check 5s
|
||||
}
|
||||
cache 30 {
|
||||
disable success cluster.local
|
||||
disable denial cluster.local
|
||||
}
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
EOF
|
||||
|
||||
# Get Unbound service IP (needed for CoreDNS configuration)
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n bakery-ia -o jsonpath='{.spec.clusterIP}')
|
||||
echo "Unbound DNS IP: $UNBOUND_IP"
|
||||
```
|
||||
|
||||
#### Step 3: Configure CoreDNS for DNSSEC
|
||||
|
||||
Mailu requires DNSSEC validation. Configure CoreDNS to forward external queries to Unbound:
|
||||
|
||||
```bash
|
||||
# Get the Unbound service IP
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n bakery-ia -o jsonpath='{.spec.clusterIP}')
|
||||
|
||||
# Patch CoreDNS to forward to Unbound
|
||||
kubectl patch configmap coredns -n kube-system --type merge -p "{
|
||||
\"data\": {
|
||||
\"Corefile\": \".:53 {\\n errors\\n health {\\n lameduck 5s\\n }\\n ready\\n kubernetes cluster.local in-addr.arpa ip6.arpa {\\n pods insecure\\n fallthrough in-addr.arpa ip6.arpa\\n ttl 30\\n }\\n prometheus :9153\\n forward . $UNBOUND_IP {\\n max_concurrent 1000\\n }\\n cache 30 {\\n disable success cluster.local\\n disable denial cluster.local\\n }\\n loop\\n reload\\n loadbalance\\n}\\n\"
|
||||
}
|
||||
}"
|
||||
kubectl apply -f /tmp/coredns-corefile.yaml
|
||||
|
||||
# Restart CoreDNS to apply changes
|
||||
kubectl rollout restart deployment coredns -n kube-system
|
||||
kubectl rollout status deployment coredns -n kube-system --timeout=60s
|
||||
|
||||
# Verify DNSSEC is working
|
||||
kubectl run -it --rm debug --image=alpine --restart=Never -- \
|
||||
sh -c "apk add drill && drill -D google.com"
|
||||
# Should show: ;; flags: ... ad ... (ad = authenticated data = DNSSEC valid)
|
||||
# Get CoreDNS service IP (needed for Mailu configuration)
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
echo "CoreDNS IP: $COREDNS_IP"
|
||||
|
||||
# Verify DNS resolution is working
|
||||
kubectl run -it --rm dns-test --image=busybox --restart=Never -- nslookup google.com
|
||||
```
|
||||
|
||||
#### Step 4: Create TLS Certificate Secret
|
||||
#### Step 3: Create TLS Certificate Secret
|
||||
|
||||
Mailu Front pod requires a TLS certificate:
|
||||
|
||||
@@ -1036,7 +1041,7 @@ rm -rf "$TEMP_DIR"
|
||||
kubectl get secret mailu-certificates -n bakery-ia
|
||||
```
|
||||
|
||||
#### Step 5: Create Admin Credentials Secret
|
||||
#### Step 4: Create Admin Credentials Secret
|
||||
|
||||
```bash
|
||||
# Generate a secure password (or use your own)
|
||||
@@ -1050,30 +1055,35 @@ kubectl create secret generic mailu-admin-credentials \
|
||||
-n bakery-ia
|
||||
```
|
||||
|
||||
#### Step 6: Deploy Mailu via Helm
|
||||
#### Step 5: Deploy Mailu via Helm
|
||||
|
||||
```bash
|
||||
# Add Mailu Helm repository
|
||||
helm repo add mailu https://mailu.github.io/helm-charts
|
||||
helm repo update mailu
|
||||
|
||||
# Get CoreDNS service IP
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
|
||||
# Deploy Mailu with production values
|
||||
# Admin user is created automatically via initialAccount feature
|
||||
# CoreDNS provides DNSSEC validation via DNS-over-TLS to Cloudflare
|
||||
helm upgrade --install mailu mailu/mailu \
|
||||
-n bakery-ia \
|
||||
--create-namespace \
|
||||
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
||||
-f infrastructure/platform/mail/mailu-helm/prod/values.yaml \
|
||||
--set global.custom_dns_servers="$COREDNS_IP" \
|
||||
--timeout 10m
|
||||
|
||||
# Wait for pods to be ready (may take 5-10 minutes for ClamAV)
|
||||
kubectl get pods -n bakery-ia -l app.kubernetes.io/instance=mailu -w
|
||||
|
||||
# Admin user (admin@bakewise.ai) is created automatically!
|
||||
# Password is the one you set in Step 5
|
||||
# Password is the one you set in Step 4
|
||||
```
|
||||
|
||||
#### Step 7: Configure DKIM
|
||||
#### Step 6: Configure DKIM
|
||||
|
||||
After Mailu is running, get the DKIM key and add it to DNS:
|
||||
|
||||
@@ -1087,7 +1097,7 @@ kubectl exec -n bakery-ia deployment/mailu-admin -- \
|
||||
# Value: (the key from above)
|
||||
```
|
||||
|
||||
#### Step 8: Verify Email Setup
|
||||
#### Step 7: Verify Email Setup
|
||||
|
||||
```bash
|
||||
# Check all Mailu pods are running
|
||||
@@ -1117,11 +1127,11 @@ kubectl port-forward -n bakery-ia svc/mailu-front 8080:80
|
||||
|
||||
**Issue: Admin pod CrashLoopBackOff with "DNSSEC validation" error**
|
||||
```bash
|
||||
# Verify CoreDNS is forwarding to Unbound
|
||||
kubectl get configmap coredns -n kube-system -o yaml | grep forward
|
||||
# Should show: forward . <unbound-ip>
|
||||
# Verify CoreDNS is configured with DNS-over-TLS
|
||||
kubectl get configmap coredns -n kube-system -o yaml | grep 'tls://'
|
||||
# Should show: tls://1.1.1.1 tls://1.0.0.1
|
||||
|
||||
# If not, re-run Step 3 above
|
||||
# If not, re-run Step 2 above
|
||||
```
|
||||
|
||||
**Issue: Front pod stuck in ContainerCreating**
|
||||
@@ -1129,7 +1139,7 @@ kubectl get configmap coredns -n kube-system -o yaml | grep forward
|
||||
# Check for missing certificate secret
|
||||
kubectl describe pod -n bakery-ia -l app.kubernetes.io/component=front | grep -A5 Events
|
||||
|
||||
# If missing mailu-certificates, re-run Step 4 above
|
||||
# If missing mailu-certificates, re-run Step 3 above
|
||||
```
|
||||
|
||||
**Issue: Admin pod can't connect to Redis**
|
||||
@@ -2018,41 +2028,22 @@ Mailu is a full-featured, self-hosted email server with built-in antispam, webma
|
||||
### Prerequisites
|
||||
|
||||
Before deploying Mailu:
|
||||
- [ ] Unbound DNS resolver deployed (for DNSSEC validation)
|
||||
- [ ] CoreDNS configured with DNS-over-TLS for DNSSEC validation
|
||||
- [ ] DNS records configured for mail domain
|
||||
- [ ] TLS certificates available
|
||||
- [ ] Mailgun account created and domain verified (for outbound email relay)
|
||||
|
||||
### Step 1: Deploy Unbound DNS Resolver
|
||||
### Step 1: Configure CoreDNS for DNSSEC (DNS-over-TLS)
|
||||
|
||||
Mailu requires DNSSEC validation for email authentication (DKIM/SPF/DMARC).
|
||||
CoreDNS is configured to use DNS-over-TLS with Cloudflare for DNSSEC validation.
|
||||
|
||||
```bash
|
||||
# Deploy Unbound via Helm
|
||||
helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \
|
||||
-n bakery-ia \
|
||||
--create-namespace \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/prod/values.yaml \
|
||||
--timeout 5m \
|
||||
--wait
|
||||
# Check if CoreDNS is already configured with DNS-over-TLS
|
||||
kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' | grep -o 'tls://1.1.1.1' || echo "Not configured"
|
||||
|
||||
# Verify Unbound is running
|
||||
kubectl get pods -n bakery-ia | grep unbound
|
||||
|
||||
# Get Unbound service IP
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n bakery-ia -o jsonpath='{.spec.clusterIP}')
|
||||
echo "Unbound DNS IP: $UNBOUND_IP"
|
||||
```
|
||||
|
||||
### Step 2: Configure CoreDNS for DNSSEC
|
||||
|
||||
```bash
|
||||
# Get Unbound IP
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n bakery-ia -o jsonpath='{.spec.clusterIP}')
|
||||
|
||||
# Create updated CoreDNS ConfigMap
|
||||
cat > /tmp/coredns-config.yaml <<EOF
|
||||
# If not configured, update CoreDNS ConfigMap
|
||||
cat > /tmp/coredns-config.yaml << 'EOF'
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
@@ -2072,8 +2063,9 @@ data:
|
||||
ttl 30
|
||||
}
|
||||
prometheus :9153
|
||||
forward . $UNBOUND_IP {
|
||||
max_concurrent 1000
|
||||
forward . tls://1.1.1.1 tls://1.0.0.1 {
|
||||
tls_servername cloudflare-dns.com
|
||||
health_check 5s
|
||||
}
|
||||
cache 30 {
|
||||
disable success cluster.local
|
||||
@@ -2092,10 +2084,12 @@ kubectl apply -f /tmp/coredns-config.yaml
|
||||
kubectl rollout restart deployment coredns -n kube-system
|
||||
kubectl rollout status deployment coredns -n kube-system --timeout=60s
|
||||
|
||||
# Verify DNSSEC is working
|
||||
kubectl run -it --rm dns-test --image=alpine --restart=Never -- \
|
||||
sh -c "apk add drill && drill -D google.com"
|
||||
# Look for "ad" flag (authenticated data) in output
|
||||
# Get CoreDNS service IP
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
echo "CoreDNS IP: $COREDNS_IP"
|
||||
|
||||
# Verify DNS resolution is working
|
||||
kubectl run -it --rm dns-test --image=busybox --restart=Never -- nslookup google.com
|
||||
```
|
||||
|
||||
### Step 3: Configure Mailgun (External SMTP Relay)
|
||||
@@ -2216,15 +2210,20 @@ kubectl apply -f infrastructure/platform/mail/mailu-helm/configs/mailu-admin-cre
|
||||
helm repo add mailu https://mailu.github.io/helm-charts
|
||||
helm repo update mailu
|
||||
|
||||
# Get CoreDNS service IP for Mailu DNS configuration
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
|
||||
# Deploy Mailu with production values
|
||||
# Note:
|
||||
# - externalRelay uses Mailgun via the secret created in Step 3
|
||||
# - initialAccount creates admin user automatically using the secret from Step 6
|
||||
# - CoreDNS provides DNSSEC validation via DNS-over-TLS (Cloudflare)
|
||||
helm upgrade --install mailu mailu/mailu \
|
||||
-n bakery-ia \
|
||||
--create-namespace \
|
||||
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
||||
-f infrastructure/platform/mail/mailu-helm/prod/values.yaml \
|
||||
--set global.custom_dns_servers="$COREDNS_IP" \
|
||||
--timeout 10m
|
||||
|
||||
# Wait for pods to be ready (ClamAV may take 5-10 minutes)
|
||||
@@ -2306,11 +2305,11 @@ kubectl port-forward -n bakery-ia svc/mailu-front 8080:80
|
||||
#### Admin Pod CrashLoopBackOff with DNSSEC Error
|
||||
|
||||
```bash
|
||||
# Verify CoreDNS is forwarding to Unbound
|
||||
kubectl get configmap coredns -n kube-system -o yaml | grep forward
|
||||
# Should show: forward . <unbound-ip>
|
||||
# Verify CoreDNS is configured with DNS-over-TLS
|
||||
kubectl get configmap coredns -n kube-system -o yaml | grep 'tls://'
|
||||
# Should show: tls://1.1.1.1 tls://1.0.0.1
|
||||
|
||||
# If not configured, re-run Step 2
|
||||
# If not configured, re-run Step 1
|
||||
```
|
||||
|
||||
#### Front Pod Stuck in ContainerCreating
|
||||
@@ -3419,8 +3418,7 @@ kubectl scale deployment monitoring -n bakery-ia --replicas=0
|
||||
- [ ] End-to-end pipeline test successful
|
||||
|
||||
### Email Infrastructure (Optional - Mailu)
|
||||
- [ ] Unbound DNS resolver deployed
|
||||
- [ ] CoreDNS configured for DNSSEC
|
||||
- [ ] CoreDNS configured with DNS-over-TLS for DNSSEC
|
||||
- [ ] Mailu TLS certificate created
|
||||
- [ ] Mailu deployed via Helm
|
||||
- [ ] Admin user created
|
||||
@@ -3473,8 +3471,7 @@ kubectl scale deployment monitoring -n bakery-ia --replicas=0
|
||||
- Webhook integration and end-to-end testing
|
||||
- Troubleshooting guide for CI/CD issues
|
||||
- **NEW: Mailu Email Server Deployment** - Comprehensive self-hosted email setup
|
||||
- Unbound DNS resolver deployment for DNSSEC
|
||||
- CoreDNS configuration for mail authentication
|
||||
- CoreDNS configuration with DNS-over-TLS for DNSSEC validation
|
||||
- Mailu Helm deployment with all components
|
||||
- DKIM/SPF/DMARC configuration
|
||||
- Troubleshooting common Mailu issues
|
||||
|
||||
@@ -1,85 +1,22 @@
|
||||
# Kubernetes-optimized Dockerfile for Frontend
|
||||
# Multi-stage build for production deployment
|
||||
# Frontend Dockerfile for Kubernetes
|
||||
# Simple two-stage build: node for build, nginx for serve
|
||||
|
||||
# Stage 1: Build the application
|
||||
# Stage 1: Build
|
||||
FROM node:18-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json ./
|
||||
|
||||
# Install all dependencies for building
|
||||
RUN npm ci --verbose && \
|
||||
npm cache clean --force
|
||||
|
||||
# Copy source code (excluding unnecessary files like node_modules, dist, etc.)
|
||||
RUN npm ci
|
||||
COPY . .
|
||||
|
||||
# Create a default runtime config in the public directory if it doesn't exist to satisfy the reference in index.html
|
||||
RUN if [ ! -f public/runtime-config.js ]; then \
|
||||
mkdir -p public && \
|
||||
echo "window.__RUNTIME_CONFIG__ = {};" > public/runtime-config.js; \
|
||||
fi
|
||||
|
||||
# Set build-time environment variables to prevent hanging on undefined variables
|
||||
ENV NODE_ENV=production
|
||||
ENV CI=true
|
||||
ENV VITE_API_URL=/api
|
||||
ENV VITE_APP_TITLE="BakeWise"
|
||||
ENV VITE_APP_VERSION="1.0.0"
|
||||
ENV VITE_PILOT_MODE_ENABLED="false"
|
||||
ENV VITE_PILOT_COUPON_CODE="PILOT2025"
|
||||
ENV VITE_PILOT_TRIAL_MONTHS="3"
|
||||
ENV VITE_STRIPE_PUBLISHABLE_KEY="pk_test_"
|
||||
# Set Node.js memory limit for the build process
|
||||
ENV NODE_OPTIONS="--max-old-space-size=4096"
|
||||
ENV VITE_STRIPE_PUBLISHABLE_KEY="pk_test_51QuxKyIzCdnBmAVTGM8fvXYkItrBUILz6lHYwhAva6ZAH1HRi0e8zDRgZ4X3faN0zEABp5RHjCVBmMJL3aKXbaC200fFrSNnPl"
|
||||
RUN npm run build
|
||||
|
||||
# Stage 2: Production server with Nginx
|
||||
FROM nginx:1.25-alpine AS production
|
||||
|
||||
# Install curl for health checks
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
# Copy main nginx configuration that sets the PID file location
|
||||
COPY nginx-main.conf /etc/nginx/nginx.conf
|
||||
|
||||
# Remove default nginx configuration
|
||||
# Stage 2: Serve with nginx
|
||||
FROM nginx:1.25-alpine
|
||||
RUN rm /etc/nginx/conf.d/default.conf
|
||||
|
||||
# Copy custom nginx configuration
|
||||
COPY nginx.conf /etc/nginx/conf.d/
|
||||
|
||||
# Copy built application from builder stage
|
||||
COPY --from=builder /app/dist /usr/share/nginx/html
|
||||
|
||||
# Copy and setup environment substitution script
|
||||
COPY substitute-env.sh /docker-entrypoint.d/30-substitute-env.sh
|
||||
|
||||
# Make the script executable
|
||||
RUN chmod +x /docker-entrypoint.d/30-substitute-env.sh
|
||||
|
||||
# Set proper permissions
|
||||
RUN chown -R nginx:nginx /usr/share/nginx/html && \
|
||||
chown -R nginx:nginx /var/cache/nginx && \
|
||||
chown -R nginx:nginx /var/log/nginx && \
|
||||
chown -R nginx:nginx /etc/nginx/conf.d
|
||||
|
||||
# Create nginx PID directory and fix permissions
|
||||
RUN mkdir -p /var/run/nginx /var/lib/nginx/tmp && \
|
||||
chown -R nginx:nginx /var/run/nginx /var/lib/nginx /etc/nginx
|
||||
|
||||
# Switch to non-root user
|
||||
USER nginx
|
||||
|
||||
# Expose port 3000 (to match current setup)
|
||||
EXPOSE 3000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
|
||||
CMD curl -f http://localhost:3000/health || exit 1
|
||||
|
||||
# Start nginx
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ ENV VITE_APP_VERSION="1.0.0-debug"
|
||||
ENV VITE_PILOT_MODE_ENABLED="false"
|
||||
ENV VITE_PILOT_COUPON_CODE="PILOT2025"
|
||||
ENV VITE_PILOT_TRIAL_MONTHS="3"
|
||||
ENV VITE_STRIPE_PUBLISHABLE_KEY="pk_test_"
|
||||
ENV VITE_STRIPE_PUBLISHABLE_KEY="pk_test_51QuxKyIzCdnBmAVTGM8fvXYkItrBUILz6lHYwhAva6ZAH1HRi0e8zDRgZ4X3faN0zEABp5RHjCVBmMJL3aKXbaC200fFrSNnPl"
|
||||
|
||||
# Set Node.js memory limit for the build process
|
||||
ENV NODE_OPTIONS="--max-old-space-size=4096"
|
||||
|
||||
@@ -18,9 +18,6 @@
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
|
||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&family=Poppins:wght@600;700&display=swap" rel="stylesheet" />
|
||||
|
||||
<!-- Runtime configuration - MUST load before app code (Kubernetes deployment) -->
|
||||
<script src="/runtime-config.js"></script>
|
||||
|
||||
<title>BakeWise - Gestión Inteligente para Panaderías</title>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
pid /var/run/nginx/nginx.pid;
|
||||
worker_processes auto;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
@@ -12,7 +12,7 @@ server {
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
|
||||
add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://fonts.googleapis.com https://js.stripe.com; script-src-elem 'self' 'unsafe-inline' https://js.stripe.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; font-src 'self' https://fonts.gstatic.com; img-src 'self' data: https:; connect-src 'self' http://localhost http://localhost:8000 http://localhost:8001 http://localhost:8006 ws: wss:; frame-src https://js.stripe.com;" always;
|
||||
add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://fonts.googleapis.com https://js.stripe.com; script-src-elem 'self' 'unsafe-inline' https://js.stripe.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; font-src 'self' https://fonts.gstatic.com; img-src 'self' data: https:; connect-src 'self' http://localhost:* https://localhost:* https://bakery-ia.local https://bakewise.ai wss://bakery-ia.local wss://bakewise.ai ws: wss:; frame-src https://js.stripe.com;" always;
|
||||
|
||||
# Gzip compression
|
||||
gzip on;
|
||||
@@ -93,13 +93,11 @@ server {
|
||||
}
|
||||
|
||||
# Main location block for SPA routing
|
||||
# Note: JS/CSS files are handled by specific location blocks above with try_files $uri =404
|
||||
# This ensures missing assets return 404 instead of index.html (which causes MIME type errors)
|
||||
location / {
|
||||
try_files $uri $uri/ @fallback;
|
||||
}
|
||||
|
||||
# Fallback for SPA routing - serve index.html
|
||||
location @fallback {
|
||||
rewrite ^.*$ /index.html last;
|
||||
# Don't fallback to index.html for asset files - they should 404 if missing
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
# Health check endpoint
|
||||
|
||||
42
frontend/package-lock.json
generated
42
frontend/package-lock.json
generated
@@ -148,7 +148,6 @@
|
||||
"integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/code-frame": "^7.27.1",
|
||||
"@babel/generator": "^7.28.3",
|
||||
@@ -2278,6 +2277,7 @@
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/ecma402-abstract/-/ecma402-abstract-2.3.6.tgz",
|
||||
"integrity": "sha512-HJnTFeRM2kVFVr5gr5kH1XP6K0JcJtE7Lzvtr3FS/so5f1kpsqqqxy5JF+FRaO6H2qmcMfAUIox7AJteieRtVw==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@formatjs/fast-memoize": "2.2.7",
|
||||
"@formatjs/intl-localematcher": "0.6.2",
|
||||
@@ -2290,6 +2290,7 @@
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/fast-memoize/-/fast-memoize-2.2.7.tgz",
|
||||
"integrity": "sha512-Yabmi9nSvyOMrlSeGGWDiH7rf3a7sIwplbvo/dlz9WCIjzIQAfy1RMf4S0X3yG724n5Ghu2GmEl5NJIV6O9sZQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.8.0"
|
||||
}
|
||||
@@ -2299,6 +2300,7 @@
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/icu-messageformat-parser/-/icu-messageformat-parser-2.11.4.tgz",
|
||||
"integrity": "sha512-7kR78cRrPNB4fjGFZg3Rmj5aah8rQj9KPzuLsmcSn4ipLXQvC04keycTI1F7kJYDwIXtT2+7IDEto842CfZBtw==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@formatjs/ecma402-abstract": "2.3.6",
|
||||
"@formatjs/icu-skeleton-parser": "1.8.16",
|
||||
@@ -2310,6 +2312,7 @@
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/icu-skeleton-parser/-/icu-skeleton-parser-1.8.16.tgz",
|
||||
"integrity": "sha512-H13E9Xl+PxBd8D5/6TVUluSpxGNvFSlN/b3coUp0e0JpuWXXnQDiavIpY3NnvSp4xhEMoXyyBvVfdFX8jglOHQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@formatjs/ecma402-abstract": "2.3.6",
|
||||
"tslib": "^2.8.0"
|
||||
@@ -2320,6 +2323,7 @@
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/intl-localematcher/-/intl-localematcher-0.6.2.tgz",
|
||||
"integrity": "sha512-XOMO2Hupl0wdd172Y06h6kLpBz6Dv+J4okPLl4LPtzbr8f66WbIoy4ev98EBuZ6ZK4h5ydTN6XneT4QVpD7cdA==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.8.0"
|
||||
}
|
||||
@@ -2748,7 +2752,6 @@
|
||||
"deprecated": "Glob versions prior to v9 are no longer supported",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"fs.realpath": "^1.0.0",
|
||||
"inflight": "^1.0.4",
|
||||
@@ -2988,7 +2991,6 @@
|
||||
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
|
||||
"integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==",
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=8.0.0"
|
||||
}
|
||||
@@ -6329,7 +6331,6 @@
|
||||
"resolved": "https://registry.npmjs.org/@stripe/stripe-js/-/stripe-js-4.10.0.tgz",
|
||||
"integrity": "sha512-KrMOL+sH69htCIXCaZ4JluJ35bchuCCznyPyrbN8JXSGQfwBI1SuIEMZNwvy8L8ykj29t6sa5BAAiL7fNoLZ8A==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=12.16"
|
||||
}
|
||||
@@ -6419,7 +6420,6 @@
|
||||
"resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.89.0.tgz",
|
||||
"integrity": "sha512-SXbtWSTSRXyBOe80mszPxpEbaN4XPRUp/i0EfQK1uyj3KCk/c8FuPJNIRwzOVe/OU3rzxrYtiNabsAmk1l714A==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@tanstack/query-core": "5.89.0"
|
||||
},
|
||||
@@ -6911,7 +6911,6 @@
|
||||
"integrity": "sha512-0dLEBsA1kI3OezMBF8nSsb7Nk19ZnsyE1LLhB8r27KbgU5H4pvuqZLdtE+aUkJVoXgTVuA+iLIwmZ0TuK4tx6A==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@types/prop-types": "*",
|
||||
"csstype": "^3.0.2"
|
||||
@@ -6923,7 +6922,6 @@
|
||||
"integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"peerDependencies": {
|
||||
"@types/react": "^18.0.0"
|
||||
}
|
||||
@@ -7065,7 +7063,6 @@
|
||||
"integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==",
|
||||
"dev": true,
|
||||
"license": "BSD-2-Clause",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/scope-manager": "6.21.0",
|
||||
"@typescript-eslint/types": "6.21.0",
|
||||
@@ -7404,7 +7401,6 @@
|
||||
"integrity": "sha512-xa57bCPGuzEFqGjPs3vVLyqareG8DX0uMkr5U/v5vLv5/ZUrBrPL7gzxzTJedEyZxFMfsozwTIbbYfEQVo3kgg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@vitest/utils": "1.6.1",
|
||||
"fast-glob": "^3.3.2",
|
||||
@@ -7502,7 +7498,6 @@
|
||||
"integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"acorn": "bin/acorn"
|
||||
},
|
||||
@@ -8045,7 +8040,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"baseline-browser-mapping": "^2.8.3",
|
||||
"caniuse-lite": "^1.0.30001741",
|
||||
@@ -8251,7 +8245,6 @@
|
||||
"resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.5.0.tgz",
|
||||
"integrity": "sha512-aYeC/jDgSEx8SHWZvANYMioYMZ2KX02W6f6uVfyteuCGcadDLcYVHdfdygsTQkQ4TKn5lghoojAsPj5pu0SnvQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@kurkle/color": "^0.3.0"
|
||||
},
|
||||
@@ -8616,8 +8609,7 @@
|
||||
"version": "3.1.3",
|
||||
"resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
|
||||
"integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/d3-array": {
|
||||
"version": "3.2.4",
|
||||
@@ -8799,7 +8791,6 @@
|
||||
"resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz",
|
||||
"integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.21.0"
|
||||
},
|
||||
@@ -8842,7 +8833,8 @@
|
||||
"version": "10.6.0",
|
||||
"resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz",
|
||||
"integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==",
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/decimal.js-light": {
|
||||
"version": "2.5.1",
|
||||
@@ -9326,7 +9318,6 @@
|
||||
"dev": true,
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"esbuild": "bin/esbuild"
|
||||
},
|
||||
@@ -9430,7 +9421,6 @@
|
||||
"deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@eslint-community/eslint-utils": "^4.2.0",
|
||||
"@eslint-community/regexpp": "^4.6.1",
|
||||
@@ -10856,7 +10846,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.23.2"
|
||||
}
|
||||
@@ -10905,7 +10894,6 @@
|
||||
"resolved": "https://registry.npmjs.org/immer/-/immer-10.1.3.tgz",
|
||||
"integrity": "sha512-tmjF/k8QDKydUlm3mZU+tjM6zeq9/fFpPqH9SzWmBnVVKsPBg/V66qsMwb3/Bo90cgUN+ghdVBess+hPsxUyRw==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/immer"
|
||||
@@ -11887,8 +11875,7 @@
|
||||
"version": "1.9.4",
|
||||
"resolved": "https://registry.npmjs.org/leaflet/-/leaflet-1.9.4.tgz",
|
||||
"integrity": "sha512-nxS1ynzJOmOlHp+iL3FyWqK89GtNL8U8rvlMOsQdTTssxZwCXh8N2NB3GDQOL+YR3XnWyZAxwQixURb+FA74PA==",
|
||||
"license": "BSD-2-Clause",
|
||||
"peer": true
|
||||
"license": "BSD-2-Clause"
|
||||
},
|
||||
"node_modules/leven": {
|
||||
"version": "3.1.0",
|
||||
@@ -13077,7 +13064,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"nanoid": "^3.3.11",
|
||||
"picocolors": "^1.1.1",
|
||||
@@ -13244,7 +13230,6 @@
|
||||
"integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"prettier": "bin/prettier.cjs"
|
||||
},
|
||||
@@ -13541,7 +13526,6 @@
|
||||
"resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
|
||||
"integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"loose-envify": "^1.1.0"
|
||||
},
|
||||
@@ -13614,7 +13598,6 @@
|
||||
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
|
||||
"integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"loose-envify": "^1.1.0",
|
||||
"scheduler": "^0.23.2"
|
||||
@@ -13680,7 +13663,6 @@
|
||||
"resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.63.0.tgz",
|
||||
"integrity": "sha512-ZwueDMvUeucovM2VjkCf7zIHcs1aAlDimZu2Hvel5C5907gUzMpm4xCrQXtRzCvsBqFjonB4m3x4LzCFI1ZKWA==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
@@ -14286,7 +14268,6 @@
|
||||
"integrity": "sha512-fS6iqSPZDs3dr/y7Od6y5nha8dW1YnbgtsyotCVvoFGKbERG++CVRFv1meyGDE1SNItQA8BrnCw7ScdAhRJ3XQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"rollup": "dist/bin/rollup"
|
||||
},
|
||||
@@ -15169,7 +15150,6 @@
|
||||
"integrity": "sha512-w33E2aCvSDP0tW9RZuNXadXlkHXqFzSkQew/aIa2i/Sj8fThxwovwlXHSPXTbAHwEIhBFXAedUhP2tueAKP8Og==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@alloc/quick-lru": "^5.2.0",
|
||||
"arg": "^5.0.2",
|
||||
@@ -15701,7 +15681,6 @@
|
||||
"integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
@@ -16113,7 +16092,6 @@
|
||||
"integrity": "sha512-j3lYzGC3P+B5Yfy/pfKNgVEg4+UtcIJcVRt2cDjIOmhLourAqPqf8P7acgxeiSgUB7E3p2P8/3gNIgDLpwzs4g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"esbuild": "^0.21.3",
|
||||
"postcss": "^8.4.43",
|
||||
@@ -16677,7 +16655,6 @@
|
||||
"integrity": "sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@vitest/expect": "1.6.1",
|
||||
"@vitest/runner": "1.6.1",
|
||||
@@ -17059,7 +17036,6 @@
|
||||
"integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"fast-deep-equal": "^3.1.3",
|
||||
"fast-uri": "^3.0.1",
|
||||
|
||||
@@ -79,6 +79,10 @@ class ApiClient {
|
||||
const publicEndpoints = [
|
||||
'/demo/accounts',
|
||||
'/demo/session/create',
|
||||
'/public/contact',
|
||||
'/public/feedback',
|
||||
'/public/prelaunch-subscribe',
|
||||
'/plans', // Subscription plans metadata - public for registration flow
|
||||
];
|
||||
|
||||
// Endpoints that require authentication but not a tenant ID (user-level endpoints)
|
||||
|
||||
83
frontend/src/api/services/publicContact.ts
Normal file
83
frontend/src/api/services/publicContact.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
/**
|
||||
* Public Contact API Service
|
||||
* Handles public form submissions (contact, feedback, prelaunch)
|
||||
* These endpoints don't require authentication
|
||||
*/
|
||||
|
||||
import axios from 'axios';
|
||||
import { getApiUrl } from '../../config/runtime';
|
||||
|
||||
const publicApiClient = axios.create({
|
||||
baseURL: getApiUrl(),
|
||||
timeout: 30000,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
// Types
|
||||
export interface ContactFormData {
|
||||
name: string;
|
||||
email: string;
|
||||
phone?: string;
|
||||
bakery_name?: string;
|
||||
type: 'general' | 'technical' | 'sales' | 'feedback';
|
||||
subject: string;
|
||||
message: string;
|
||||
}
|
||||
|
||||
export interface FeedbackFormData {
|
||||
name: string;
|
||||
email: string;
|
||||
category: 'suggestion' | 'bug' | 'feature' | 'praise' | 'complaint';
|
||||
title: string;
|
||||
description: string;
|
||||
rating?: number;
|
||||
}
|
||||
|
||||
export interface PrelaunchEmailData {
|
||||
email: string;
|
||||
}
|
||||
|
||||
export interface ContactFormResponse {
|
||||
success: boolean;
|
||||
message: string;
|
||||
}
|
||||
|
||||
// API Functions
|
||||
export const publicContactService = {
|
||||
/**
|
||||
* Submit a contact form
|
||||
*/
|
||||
submitContactForm: async (data: ContactFormData): Promise<ContactFormResponse> => {
|
||||
const response = await publicApiClient.post<ContactFormResponse>(
|
||||
'/v1/public/contact',
|
||||
data
|
||||
);
|
||||
return response.data;
|
||||
},
|
||||
|
||||
/**
|
||||
* Submit a feedback form
|
||||
*/
|
||||
submitFeedbackForm: async (data: FeedbackFormData): Promise<ContactFormResponse> => {
|
||||
const response = await publicApiClient.post<ContactFormResponse>(
|
||||
'/v1/public/feedback',
|
||||
data
|
||||
);
|
||||
return response.data;
|
||||
},
|
||||
|
||||
/**
|
||||
* Submit a prelaunch email subscription
|
||||
*/
|
||||
submitPrelaunchEmail: async (data: PrelaunchEmailData): Promise<ContactFormResponse> => {
|
||||
const response = await publicApiClient.post<ContactFormResponse>(
|
||||
'/v1/public/prelaunch-subscribe',
|
||||
data
|
||||
);
|
||||
return response.data;
|
||||
},
|
||||
};
|
||||
|
||||
export default publicContactService;
|
||||
185
frontend/src/components/domain/auth/PrelaunchEmailForm.tsx
Normal file
185
frontend/src/components/domain/auth/PrelaunchEmailForm.tsx
Normal file
@@ -0,0 +1,185 @@
|
||||
import React, { useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { Mail, Rocket, CheckCircle, Loader, ArrowLeft } from 'lucide-react';
|
||||
import { Button, Input, Card } from '../../ui';
|
||||
import { publicContactService } from '../../../api/services/publicContact';
|
||||
|
||||
interface PrelaunchEmailFormProps {
|
||||
onLoginClick?: () => void;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export const PrelaunchEmailForm: React.FC<PrelaunchEmailFormProps> = ({
|
||||
onLoginClick,
|
||||
className = '',
|
||||
}) => {
|
||||
const { t } = useTranslation(['auth', 'common']);
|
||||
const [email, setEmail] = useState('');
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [isSubmitted, setIsSubmitted] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const validateEmail = (email: string): boolean => {
|
||||
const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/;
|
||||
return emailRegex.test(email);
|
||||
};
|
||||
|
||||
const handleSubmit = async (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
setError(null);
|
||||
|
||||
if (!email.trim()) {
|
||||
setError(t('auth:prelaunch.email_required'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!validateEmail(email)) {
|
||||
setError(t('auth:prelaunch.email_invalid'));
|
||||
return;
|
||||
}
|
||||
|
||||
setIsSubmitting(true);
|
||||
|
||||
try {
|
||||
await publicContactService.submitPrelaunchEmail({ email });
|
||||
setIsSubmitted(true);
|
||||
} catch {
|
||||
setError(t('auth:prelaunch.submit_error'));
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
};
|
||||
|
||||
if (isSubmitted) {
|
||||
return (
|
||||
<Card className={`max-w-lg mx-auto p-8 ${className}`}>
|
||||
<div className="text-center">
|
||||
<div className="mx-auto w-16 h-16 bg-green-100 dark:bg-green-900/30 rounded-full flex items-center justify-center mb-6">
|
||||
<CheckCircle className="w-8 h-8 text-green-600 dark:text-green-400" />
|
||||
</div>
|
||||
|
||||
<h2 className="text-2xl font-bold text-[var(--text-primary)] mb-3">
|
||||
{t('auth:prelaunch.success_title')}
|
||||
</h2>
|
||||
|
||||
<p className="text-[var(--text-secondary)] mb-6">
|
||||
{t('auth:prelaunch.success_message')}
|
||||
</p>
|
||||
|
||||
<div className="space-y-3">
|
||||
<Button
|
||||
onClick={() => window.location.href = '/'}
|
||||
variant="outline"
|
||||
className="w-full"
|
||||
>
|
||||
<ArrowLeft className="w-4 h-4 mr-2" />
|
||||
{t('auth:prelaunch.back_to_home')}
|
||||
</Button>
|
||||
|
||||
{onLoginClick && (
|
||||
<p className="text-sm text-[var(--text-secondary)]">
|
||||
{t('auth:register.have_account')}{' '}
|
||||
<button
|
||||
onClick={onLoginClick}
|
||||
className="text-[var(--color-primary)] hover:underline font-medium"
|
||||
>
|
||||
{t('auth:register.login_link')}
|
||||
</button>
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Card className={`max-w-lg mx-auto p-8 ${className}`}>
|
||||
<div className="text-center mb-8">
|
||||
<div className="mx-auto w-16 h-16 bg-gradient-to-br from-amber-500 to-orange-500 rounded-full flex items-center justify-center mb-6 shadow-lg">
|
||||
<Rocket className="w-8 h-8 text-white" />
|
||||
</div>
|
||||
|
||||
<h1 className="text-3xl font-bold text-[var(--text-primary)] mb-3">
|
||||
{t('auth:prelaunch.title')}
|
||||
</h1>
|
||||
|
||||
<p className="text-lg text-[var(--text-secondary)] mb-2">
|
||||
{t('auth:prelaunch.subtitle')}
|
||||
</p>
|
||||
|
||||
<p className="text-[var(--text-tertiary)]">
|
||||
{t('auth:prelaunch.description')}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<form onSubmit={handleSubmit} className="space-y-6">
|
||||
<Input
|
||||
type="email"
|
||||
label={t('auth:register.email')}
|
||||
placeholder={t('auth:register.email_placeholder')}
|
||||
value={email}
|
||||
onChange={(e) => setEmail(e.target.value)}
|
||||
leftIcon={<Mail className="w-5 h-5" />}
|
||||
error={error || undefined}
|
||||
isRequired
|
||||
size="lg"
|
||||
/>
|
||||
|
||||
<Button
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
className="w-full py-4 text-lg font-semibold"
|
||||
>
|
||||
{isSubmitting ? (
|
||||
<>
|
||||
<Loader className="w-5 h-5 mr-2 animate-spin" />
|
||||
{t('auth:prelaunch.submitting')}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Mail className="w-5 h-5 mr-2" />
|
||||
{t('auth:prelaunch.subscribe_button')}
|
||||
</>
|
||||
)}
|
||||
</Button>
|
||||
</form>
|
||||
|
||||
<div className="mt-8 pt-6 border-t border-[var(--border-primary)]">
|
||||
<h3 className="text-sm font-semibold text-[var(--text-primary)] mb-3">
|
||||
{t('auth:prelaunch.benefits_title')}
|
||||
</h3>
|
||||
<ul className="space-y-2 text-sm text-[var(--text-secondary)]">
|
||||
<li className="flex items-center gap-2">
|
||||
<CheckCircle className="w-4 h-4 text-green-500" />
|
||||
{t('auth:prelaunch.benefit_1')}
|
||||
</li>
|
||||
<li className="flex items-center gap-2">
|
||||
<CheckCircle className="w-4 h-4 text-green-500" />
|
||||
{t('auth:prelaunch.benefit_2')}
|
||||
</li>
|
||||
<li className="flex items-center gap-2">
|
||||
<CheckCircle className="w-4 h-4 text-green-500" />
|
||||
{t('auth:prelaunch.benefit_3')}
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
{onLoginClick && (
|
||||
<div className="mt-6 text-center">
|
||||
<p className="text-sm text-[var(--text-secondary)]">
|
||||
{t('auth:register.have_account')}{' '}
|
||||
<button
|
||||
onClick={onLoginClick}
|
||||
className="text-[var(--color-primary)] hover:underline font-medium"
|
||||
>
|
||||
{t('auth:register.login_link')}
|
||||
</button>
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
|
||||
export default PrelaunchEmailForm;
|
||||
@@ -26,10 +26,15 @@ const getStripeKey = (): string => {
|
||||
if (typeof window !== 'undefined' && window.__RUNTIME_CONFIG__?.VITE_STRIPE_PUBLISHABLE_KEY) {
|
||||
return window.__RUNTIME_CONFIG__.VITE_STRIPE_PUBLISHABLE_KEY;
|
||||
}
|
||||
return import.meta.env.VITE_STRIPE_PUBLISHABLE_KEY || 'pk_test_51234567890123456789012345678901234567890123456789012345678901234567890123456789012345';
|
||||
return import.meta.env.VITE_STRIPE_PUBLISHABLE_KEY || 'pk_test_51QuxKyIzCdnBmAVTGM8fvXYkItrBUILz6lHYwhAva6ZAH1HRi0e8zDRgZ4X3faN0zEABp5RHjCVBmMJL3aKXbaC200fFrSNnPl';
|
||||
};
|
||||
|
||||
const stripePromise = loadStripe(getStripeKey());
|
||||
// Force Stripe to use test environment by loading from test endpoint
|
||||
const stripePromise = loadStripe(getStripeKey(), {
|
||||
stripeAccount: import.meta.env.VITE_STRIPE_ACCOUNT_ID,
|
||||
apiVersion: '2023-10-16',
|
||||
betas: ['elements_v2']
|
||||
});
|
||||
|
||||
interface RegistrationContainerProps {
|
||||
onSuccess?: () => void;
|
||||
|
||||
@@ -3,13 +3,15 @@ export { default as LoginForm } from './LoginForm';
|
||||
export { default as RegistrationContainer } from './RegistrationContainer';
|
||||
export { default as PasswordResetForm } from './PasswordResetForm';
|
||||
export { default as ProfileSettings } from './ProfileSettings';
|
||||
export { default as PrelaunchEmailForm } from './PrelaunchEmailForm';
|
||||
|
||||
// Re-export types for convenience
|
||||
export type {
|
||||
export type {
|
||||
LoginFormProps,
|
||||
RegistrationContainerProps,
|
||||
RegistrationContainerProps,
|
||||
PasswordResetFormProps,
|
||||
ProfileSettingsProps
|
||||
ProfileSettingsProps,
|
||||
PrelaunchEmailFormProps
|
||||
} from './types';
|
||||
|
||||
// Component metadata for documentation
|
||||
|
||||
@@ -29,6 +29,11 @@ export interface ProfileSettingsProps {
|
||||
initialTab?: 'profile' | 'security' | 'preferences' | 'notifications';
|
||||
}
|
||||
|
||||
export interface PrelaunchEmailFormProps {
|
||||
onLoginClick?: () => void;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
// Additional types for internal use
|
||||
export type RegistrationStep = 'personal' | 'bakery' | 'security' | 'verification';
|
||||
|
||||
|
||||
@@ -52,6 +52,8 @@ const getStripePublishableKey = (): string => {
|
||||
const stripePromise = loadStripe(getStripePublishableKey(), {
|
||||
betas: ['elements_v2'],
|
||||
locale: 'auto',
|
||||
stripeAccount: import.meta.env.VITE_STRIPE_ACCOUNT_ID,
|
||||
apiVersion: '2023-10-16'
|
||||
});
|
||||
|
||||
/**
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
SUBSCRIPTION_TIERS
|
||||
} from '../../api';
|
||||
import { getRegisterUrl } from '../../utils/navigation';
|
||||
import { PRELAUNCH_CONFIG } from '../../config/prelaunch';
|
||||
|
||||
type BillingCycle = 'monthly' | 'yearly';
|
||||
type DisplayMode = 'landing' | 'settings' | 'selection';
|
||||
@@ -411,12 +412,16 @@ export const SubscriptionPricingCards: React.FC<SubscriptionPricingCardsProps> =
|
||||
)
|
||||
: mode === 'settings'
|
||||
? t('ui.change_subscription', 'Cambiar Suscripción')
|
||||
: PRELAUNCH_CONFIG.enabled
|
||||
? t('ui.notify_me', 'Avísame del Lanzamiento')
|
||||
: t('ui.start_free_trial')}
|
||||
</Button>
|
||||
|
||||
{/* Footer */}
|
||||
<p className={`text-xs text-center mt-3 ${(isPopular || isSelected) && !isCurrentPlan ? 'text-white/80' : 'text-[var(--text-secondary)]'}`}>
|
||||
{showPilotBanner
|
||||
{PRELAUNCH_CONFIG.enabled
|
||||
? t('ui.prelaunch_footer', 'Lanzamiento oficial próximamente')
|
||||
: showPilotBanner
|
||||
? t('ui.free_trial_footer', { months: pilotTrialMonths })
|
||||
: t('ui.free_trial_footer', { months: 0 })
|
||||
}
|
||||
|
||||
@@ -1,92 +1,16 @@
|
||||
/**
|
||||
* Pilot Program Configuration
|
||||
*
|
||||
* Centralized configuration for pilot mode features.
|
||||
*
|
||||
* Works in two modes:
|
||||
* 1. Kubernetes/Docker: Reads from window.__RUNTIME_CONFIG__ (injected at container startup)
|
||||
* 2. Local Development: Reads from import.meta.env (build-time variables from .env)
|
||||
* Uses build-time environment variables
|
||||
*/
|
||||
|
||||
/**
|
||||
* Helper function to get environment variable value
|
||||
* Tries runtime config first (Kubernetes), falls back to build-time (local dev)
|
||||
*/
|
||||
const getEnvVar = (key: string): string | undefined => {
|
||||
// Try runtime config first (Kubernetes/Docker environment)
|
||||
if (typeof window !== 'undefined' && (window as any).__RUNTIME_CONFIG__) {
|
||||
const value = (window as any).__RUNTIME_CONFIG__[key];
|
||||
if (value !== undefined) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to build-time environment variables (local development)
|
||||
return import.meta.env[key];
|
||||
export const PILOT_CONFIG = {
|
||||
enabled: import.meta.env.VITE_PILOT_MODE_ENABLED === 'true',
|
||||
couponCode: import.meta.env.VITE_PILOT_COUPON_CODE || 'PILOT2025',
|
||||
trialMonths: parseInt(import.meta.env.VITE_PILOT_TRIAL_MONTHS || '3'),
|
||||
get trialDays(): number {
|
||||
return this.trialMonths * 30;
|
||||
},
|
||||
lifetimeDiscount: 20,
|
||||
};
|
||||
|
||||
/**
|
||||
* Create pilot config with getter functions to ensure we always read fresh values
|
||||
* This is important because runtime-config.js might load after this module
|
||||
*/
|
||||
const createPilotConfig = () => {
|
||||
return {
|
||||
/**
|
||||
* Master switch for pilot mode
|
||||
* When false, all pilot features are disabled globally
|
||||
*/
|
||||
get enabled(): boolean {
|
||||
const value = getEnvVar('VITE_PILOT_MODE_ENABLED');
|
||||
return value === 'true';
|
||||
},
|
||||
|
||||
/**
|
||||
* Coupon code for pilot participants
|
||||
*/
|
||||
get couponCode(): string {
|
||||
return getEnvVar('VITE_PILOT_COUPON_CODE') || 'PILOT2025';
|
||||
},
|
||||
|
||||
/**
|
||||
* Trial period in months for pilot participants
|
||||
*/
|
||||
get trialMonths(): number {
|
||||
return parseInt(getEnvVar('VITE_PILOT_TRIAL_MONTHS') || '3');
|
||||
},
|
||||
|
||||
/**
|
||||
* Trial period in days (calculated from months)
|
||||
*/
|
||||
get trialDays(): number {
|
||||
return this.trialMonths * 30;
|
||||
},
|
||||
|
||||
/**
|
||||
* Lifetime discount percentage for pilot participants
|
||||
*/
|
||||
lifetimeDiscount: 20,
|
||||
};
|
||||
};
|
||||
|
||||
export const PILOT_CONFIG = createPilotConfig();
|
||||
|
||||
// Debug logging
|
||||
console.log('🔧 Pilot Config Loading:', {
|
||||
source: typeof window !== 'undefined' && (window as any).__RUNTIME_CONFIG__ ? 'runtime' : 'build-time',
|
||||
raw: getEnvVar('VITE_PILOT_MODE_ENABLED'),
|
||||
type: typeof getEnvVar('VITE_PILOT_MODE_ENABLED'),
|
||||
enabled: PILOT_CONFIG.enabled,
|
||||
runtimeConfigExists: typeof window !== 'undefined' && !!(window as any).__RUNTIME_CONFIG__,
|
||||
runtimeConfigKeys: typeof window !== 'undefined' && (window as any).__RUNTIME_CONFIG__
|
||||
? Object.keys((window as any).__RUNTIME_CONFIG__)
|
||||
: []
|
||||
});
|
||||
|
||||
console.log('✅ Pilot Config:', {
|
||||
enabled: PILOT_CONFIG.enabled,
|
||||
couponCode: PILOT_CONFIG.couponCode,
|
||||
trialMonths: PILOT_CONFIG.trialMonths,
|
||||
trialDays: PILOT_CONFIG.trialDays
|
||||
});
|
||||
|
||||
export default PILOT_CONFIG;
|
||||
|
||||
17
frontend/src/config/prelaunch.ts
Normal file
17
frontend/src/config/prelaunch.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
/**
|
||||
* Pre-launch Mode Configuration
|
||||
* Uses build-time environment variables
|
||||
*
|
||||
* When VITE_PRELAUNCH_MODE=true:
|
||||
* - Registration page shows email capture form instead of Stripe flow
|
||||
* - Pricing cards link to the same page but show interest form
|
||||
*
|
||||
* When VITE_PRELAUNCH_MODE=false (or not set):
|
||||
* - Normal registration flow with Stripe payments
|
||||
*/
|
||||
|
||||
export const PRELAUNCH_CONFIG = {
|
||||
enabled: import.meta.env.VITE_PRELAUNCH_MODE === 'false',
|
||||
};
|
||||
|
||||
export default PRELAUNCH_CONFIG;
|
||||
@@ -1,83 +1,33 @@
|
||||
// Runtime configuration for Kubernetes deployments
|
||||
// This allows environment variables to be injected at container startup
|
||||
// Configuration - uses build-time environment variables
|
||||
export const config = {
|
||||
VITE_API_URL: import.meta.env.VITE_API_URL || '/api',
|
||||
VITE_APP_TITLE: import.meta.env.VITE_APP_TITLE || 'BakeWise',
|
||||
VITE_APP_VERSION: import.meta.env.VITE_APP_VERSION || '1.0.0',
|
||||
VITE_OTEL_ENABLED: import.meta.env.VITE_OTEL_ENABLED || 'true',
|
||||
VITE_OTEL_TRACES_ENDPOINT: import.meta.env.VITE_OTEL_TRACES_ENDPOINT || '/api/v1/telemetry/v1/traces',
|
||||
VITE_OTEL_METRICS_ENDPOINT: import.meta.env.VITE_OTEL_METRICS_ENDPOINT || '/api/v1/telemetry/v1/metrics',
|
||||
};
|
||||
|
||||
interface RuntimeConfig {
|
||||
VITE_API_URL: string;
|
||||
VITE_APP_TITLE: string;
|
||||
VITE_APP_VERSION: string;
|
||||
VITE_OTEL_TRACES_ENDPOINT?: string;
|
||||
VITE_OTEL_METRICS_ENDPOINT?: string;
|
||||
VITE_OTEL_ENABLED?: string;
|
||||
}
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
__RUNTIME_CONFIG__?: RuntimeConfig;
|
||||
}
|
||||
}
|
||||
|
||||
// Types are defined in vite-env.d.ts
|
||||
|
||||
// Get configuration from runtime or fall back to build-time environment variables
|
||||
function getRuntimeConfig(): RuntimeConfig {
|
||||
// First try to get from window (injected at runtime in Kubernetes)
|
||||
if (typeof window !== 'undefined' && window.__RUNTIME_CONFIG__) {
|
||||
return window.__RUNTIME_CONFIG__;
|
||||
}
|
||||
|
||||
// Fall back to build-time environment variables (development/local)
|
||||
return {
|
||||
VITE_API_URL: import.meta.env.VITE_API_URL || 'http://localhost:8000',
|
||||
VITE_APP_TITLE: import.meta.env.VITE_APP_TITLE || 'PanIA Dashboard',
|
||||
VITE_APP_VERSION: import.meta.env.VITE_APP_VERSION || '1.0.0',
|
||||
VITE_OTEL_TRACES_ENDPOINT: import.meta.env.VITE_OTEL_TRACES_ENDPOINT || '/api/v1/telemetry/v1/traces',
|
||||
VITE_OTEL_METRICS_ENDPOINT: import.meta.env.VITE_OTEL_METRICS_ENDPOINT || '/api/v1/telemetry/v1/metrics',
|
||||
VITE_OTEL_ENABLED: import.meta.env.VITE_OTEL_ENABLED || 'true',
|
||||
};
|
||||
}
|
||||
|
||||
export const config = getRuntimeConfig();
|
||||
|
||||
// Helper function to get the API base URL
|
||||
export function getApiUrl(): string {
|
||||
return config.VITE_API_URL;
|
||||
}
|
||||
|
||||
// Helper function to get app title
|
||||
export function getAppTitle(): string {
|
||||
return config.VITE_APP_TITLE;
|
||||
}
|
||||
|
||||
// Helper function to get app version
|
||||
export function getAppVersion(): string {
|
||||
return config.VITE_APP_VERSION;
|
||||
}
|
||||
|
||||
// Helper to check if running in Kubernetes
|
||||
export function isKubernetesEnvironment(): boolean {
|
||||
return typeof window !== 'undefined' && !!window.__RUNTIME_CONFIG__;
|
||||
}
|
||||
|
||||
// Helper to check if OpenTelemetry is enabled
|
||||
export function isOpenTelemetryEnabled(): boolean {
|
||||
return config.VITE_OTEL_ENABLED?.toLowerCase() !== 'false';
|
||||
}
|
||||
|
||||
// Helper to get OpenTelemetry traces endpoint
|
||||
export function getOtelTracesEndpoint(): string {
|
||||
return config.VITE_OTEL_TRACES_ENDPOINT || '/api/v1/telemetry/v1/traces';
|
||||
return config.VITE_OTEL_TRACES_ENDPOINT;
|
||||
}
|
||||
|
||||
// Helper to get OpenTelemetry metrics endpoint
|
||||
export function getOtelMetricsEndpoint(): string {
|
||||
return config.VITE_OTEL_METRICS_ENDPOINT || '/api/v1/telemetry/v1/metrics';
|
||||
return config.VITE_OTEL_METRICS_ENDPOINT;
|
||||
}
|
||||
|
||||
// Debug function to log current configuration
|
||||
export function logConfig(): void {
|
||||
console.log('Current configuration:', {
|
||||
...config,
|
||||
isKubernetes: isKubernetesEnvironment(),
|
||||
source: isKubernetesEnvironment() ? 'runtime' : 'build-time'
|
||||
});
|
||||
}
|
||||
@@ -116,6 +116,23 @@
|
||||
"secure_payment": "Your payment information is protected with end-to-end encryption",
|
||||
"payment_info_secure": "Your payment information is secure"
|
||||
},
|
||||
"prelaunch": {
|
||||
"title": "Coming Soon",
|
||||
"subtitle": "We're preparing something special for your bakery",
|
||||
"description": "Be the first to know when we officially launch. Leave your email and we'll notify you.",
|
||||
"email_required": "Email is required",
|
||||
"email_invalid": "Please enter a valid email address",
|
||||
"submit_error": "An error occurred. Please try again.",
|
||||
"subscribe_button": "Notify Me",
|
||||
"submitting": "Submitting...",
|
||||
"success_title": "You're on the list!",
|
||||
"success_message": "We'll send you an email when we're ready to launch. Thanks for your interest!",
|
||||
"back_to_home": "Back to Home",
|
||||
"benefits_title": "By subscribing you'll receive:",
|
||||
"benefit_1": "Early access to launch",
|
||||
"benefit_2": "Exclusive offers for early adopters",
|
||||
"benefit_3": "Product news and updates"
|
||||
},
|
||||
"steps": {
|
||||
"info": "Information",
|
||||
"subscription": "Plan",
|
||||
|
||||
@@ -157,6 +157,8 @@
|
||||
"payment_details": "Payment Details",
|
||||
"payment_info_secure": "Your payment information is protected with end-to-end encryption",
|
||||
"updating_payment": "Updating...",
|
||||
"cancel": "Cancel"
|
||||
"cancel": "Cancel",
|
||||
"notify_me": "Notify Me of Launch",
|
||||
"prelaunch_footer": "Official launch coming soon"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,6 +127,23 @@
|
||||
"go_to_login": "Ir a inicio de sesión",
|
||||
"try_again": "Intentar registro de nuevo"
|
||||
},
|
||||
"prelaunch": {
|
||||
"title": "Próximamente",
|
||||
"subtitle": "Estamos preparando algo especial para tu panadería",
|
||||
"description": "Sé el primero en saber cuándo lancemos oficialmente. Déjanos tu email y te avisaremos.",
|
||||
"email_required": "El correo electrónico es obligatorio",
|
||||
"email_invalid": "Por favor, introduce un correo electrónico válido",
|
||||
"submit_error": "Ha ocurrido un error. Por favor, inténtalo de nuevo.",
|
||||
"subscribe_button": "Quiero que me avisen",
|
||||
"submitting": "Enviando...",
|
||||
"success_title": "¡Genial! Te hemos apuntado",
|
||||
"success_message": "Te enviaremos un email cuando estemos listos para el lanzamiento. ¡Gracias por tu interés!",
|
||||
"back_to_home": "Volver al inicio",
|
||||
"benefits_title": "Al suscribirte recibirás:",
|
||||
"benefit_1": "Acceso anticipado al lanzamiento",
|
||||
"benefit_2": "Ofertas exclusivas para early adopters",
|
||||
"benefit_3": "Noticias y actualizaciones del producto"
|
||||
},
|
||||
"steps": {
|
||||
"info": "Información",
|
||||
"subscription": "Plan",
|
||||
|
||||
@@ -157,6 +157,8 @@
|
||||
"payment_details": "Detalles de Pago",
|
||||
"payment_info_secure": "Tu información de pago está protegida con encriptación de extremo a extremo",
|
||||
"updating_payment": "Actualizando...",
|
||||
"cancel": "Cancelar"
|
||||
"cancel": "Cancelar",
|
||||
"notify_me": "Avísame del Lanzamiento",
|
||||
"prelaunch_footer": "Lanzamiento oficial próximamente"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
AlertCircle,
|
||||
HelpCircle
|
||||
} from 'lucide-react';
|
||||
import { publicContactService } from '../../api/services/publicContact';
|
||||
|
||||
interface ContactMethod {
|
||||
id: string;
|
||||
@@ -73,25 +74,35 @@ const ContactPage: React.FC = () => {
|
||||
e.preventDefault();
|
||||
setSubmitStatus('loading');
|
||||
|
||||
// Simulate API call
|
||||
await new Promise((resolve) => setTimeout(resolve, 1500));
|
||||
|
||||
// In production, this would be an actual API call
|
||||
console.log('Form submitted:', formState);
|
||||
|
||||
setSubmitStatus('success');
|
||||
setTimeout(() => {
|
||||
setSubmitStatus('idle');
|
||||
setFormState({
|
||||
name: '',
|
||||
email: '',
|
||||
phone: '',
|
||||
bakeryName: '',
|
||||
subject: '',
|
||||
message: '',
|
||||
type: 'general',
|
||||
try {
|
||||
await publicContactService.submitContactForm({
|
||||
name: formState.name,
|
||||
email: formState.email,
|
||||
phone: formState.phone || undefined,
|
||||
bakery_name: formState.bakeryName || undefined,
|
||||
type: formState.type,
|
||||
subject: formState.subject,
|
||||
message: formState.message,
|
||||
});
|
||||
}, 3000);
|
||||
|
||||
setSubmitStatus('success');
|
||||
setTimeout(() => {
|
||||
setSubmitStatus('idle');
|
||||
setFormState({
|
||||
name: '',
|
||||
email: '',
|
||||
phone: '',
|
||||
bakeryName: '',
|
||||
subject: '',
|
||||
message: '',
|
||||
type: 'general',
|
||||
});
|
||||
}, 3000);
|
||||
} catch (error) {
|
||||
console.error('Contact form submission error:', error);
|
||||
setSubmitStatus('error');
|
||||
setTimeout(() => setSubmitStatus('idle'), 5000);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
|
||||
@@ -13,6 +13,7 @@ import {
|
||||
AlertCircle,
|
||||
Star
|
||||
} from 'lucide-react';
|
||||
import { publicContactService } from '../../api/services/publicContact';
|
||||
|
||||
interface FeedbackCategory {
|
||||
id: string;
|
||||
@@ -90,24 +91,33 @@ const FeedbackPage: React.FC = () => {
|
||||
e.preventDefault();
|
||||
setSubmitStatus('loading');
|
||||
|
||||
// Simulate API call
|
||||
await new Promise((resolve) => setTimeout(resolve, 1500));
|
||||
|
||||
// In production, this would be an actual API call
|
||||
console.log('Feedback submitted:', formState);
|
||||
|
||||
setSubmitStatus('success');
|
||||
setTimeout(() => {
|
||||
setSubmitStatus('idle');
|
||||
setFormState({
|
||||
name: '',
|
||||
email: '',
|
||||
category: 'suggestion',
|
||||
title: '',
|
||||
description: '',
|
||||
rating: 0,
|
||||
try {
|
||||
await publicContactService.submitFeedbackForm({
|
||||
name: formState.name,
|
||||
email: formState.email,
|
||||
category: formState.category,
|
||||
title: formState.title,
|
||||
description: formState.description,
|
||||
rating: formState.rating > 0 ? formState.rating : undefined,
|
||||
});
|
||||
}, 3000);
|
||||
|
||||
setSubmitStatus('success');
|
||||
setTimeout(() => {
|
||||
setSubmitStatus('idle');
|
||||
setFormState({
|
||||
name: '',
|
||||
email: '',
|
||||
category: 'suggestion',
|
||||
title: '',
|
||||
description: '',
|
||||
rating: 0,
|
||||
});
|
||||
}, 3000);
|
||||
} catch (error) {
|
||||
console.error('Feedback form submission error:', error);
|
||||
setSubmitStatus('error');
|
||||
setTimeout(() => setSubmitStatus('idle'), 5000);
|
||||
}
|
||||
};
|
||||
|
||||
const getCategoryColor = (color: string) => {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import React from 'react';
|
||||
import { useNavigate } from 'react-router-dom';
|
||||
import { RegistrationContainer } from '../../components/domain/auth';
|
||||
import { RegistrationContainer, PrelaunchEmailForm } from '../../components/domain/auth';
|
||||
import { PublicLayout } from '../../components/layout';
|
||||
import { PRELAUNCH_CONFIG } from '../../config/prelaunch';
|
||||
|
||||
const RegisterPage: React.FC = () => {
|
||||
const navigate = useNavigate();
|
||||
@@ -14,6 +15,27 @@ const RegisterPage: React.FC = () => {
|
||||
navigate('/login');
|
||||
};
|
||||
|
||||
// Show prelaunch email form or full registration based on build-time config
|
||||
if (PRELAUNCH_CONFIG.enabled) {
|
||||
return (
|
||||
<PublicLayout
|
||||
variant="centered"
|
||||
maxWidth="lg"
|
||||
headerProps={{
|
||||
showThemeToggle: true,
|
||||
showAuthButtons: false,
|
||||
showLanguageSelector: true,
|
||||
variant: "minimal"
|
||||
}}
|
||||
>
|
||||
<PrelaunchEmailForm
|
||||
onLoginClick={handleLoginClick}
|
||||
className="mx-auto"
|
||||
/>
|
||||
</PublicLayout>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<PublicLayout
|
||||
variant="centered"
|
||||
|
||||
21
frontend/src/vite-env.d.ts
vendored
21
frontend/src/vite-env.d.ts
vendored
@@ -2,30 +2,17 @@
|
||||
|
||||
interface ImportMetaEnv {
|
||||
readonly VITE_API_URL: string
|
||||
readonly VITE_API_BASE_URL: string
|
||||
readonly VITE_APP_TITLE: string
|
||||
readonly VITE_APP_VERSION: string
|
||||
readonly VITE_ENVIRONMENT: string
|
||||
readonly VITE_OTEL_ENABLED?: string
|
||||
readonly VITE_OTEL_TRACES_ENDPOINT?: string
|
||||
readonly VITE_OTEL_METRICS_ENDPOINT?: string
|
||||
readonly VITE_PILOT_MODE_ENABLED?: string
|
||||
readonly VITE_PILOT_COUPON_CODE?: string
|
||||
readonly VITE_PILOT_TRIAL_MONTHS?: string
|
||||
readonly VITE_STRIPE_PUBLISHABLE_KEY?: string
|
||||
// more env variables...
|
||||
readonly VITE_PRELAUNCH_MODE?: string
|
||||
}
|
||||
|
||||
interface ImportMeta {
|
||||
readonly env: ImportMetaEnv
|
||||
}
|
||||
|
||||
// Runtime configuration injected by Kubernetes at container startup
|
||||
interface Window {
|
||||
__RUNTIME_CONFIG__?: {
|
||||
VITE_API_URL?: string;
|
||||
VITE_APP_TITLE?: string;
|
||||
VITE_APP_VERSION?: string;
|
||||
VITE_PILOT_MODE_ENABLED?: string;
|
||||
VITE_PILOT_COUPON_CODE?: string;
|
||||
VITE_PILOT_TRIAL_MONTHS?: string;
|
||||
VITE_STRIPE_PUBLISHABLE_KEY?: string;
|
||||
};
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Handle VITE_API_URL specially to preserve empty values
|
||||
# If VITE_API_URL is unset, use default; if empty, preserve empty; otherwise use value
|
||||
if [ -z "${VITE_API_URL+x}" ]; then
|
||||
export VITE_API_URL="/api"
|
||||
elif [ -z "$VITE_API_URL" ]; then
|
||||
# If VITE_API_URL is explicitly set to empty string, use relative API path
|
||||
export VITE_API_URL="/api"
|
||||
fi
|
||||
|
||||
# Default values for environment variables
|
||||
export VITE_APP_TITLE=${VITE_APP_TITLE:-"BakeWise"}
|
||||
export VITE_APP_VERSION=${VITE_APP_VERSION:-"1.0.0"}
|
||||
|
||||
# Default values for pilot program configuration
|
||||
export VITE_PILOT_MODE_ENABLED=${VITE_PILOT_MODE_ENABLED:-"false"}
|
||||
export VITE_PILOT_COUPON_CODE=${VITE_PILOT_COUPON_CODE:-"PILOT2025"}
|
||||
export VITE_PILOT_TRIAL_MONTHS=${VITE_PILOT_TRIAL_MONTHS:-"3"}
|
||||
export VITE_STRIPE_PUBLISHABLE_KEY=${VITE_STRIPE_PUBLISHABLE_KEY:-"pk_test_"}
|
||||
|
||||
# Create a runtime configuration file that can be loaded by the frontend
|
||||
cat > /usr/share/nginx/html/runtime-config.js << EOL
|
||||
window.__RUNTIME_CONFIG__ = {
|
||||
VITE_API_URL: '${VITE_API_URL}',
|
||||
VITE_APP_TITLE: '${VITE_APP_TITLE}',
|
||||
VITE_APP_VERSION: '${VITE_APP_VERSION}',
|
||||
VITE_PILOT_MODE_ENABLED: '${VITE_PILOT_MODE_ENABLED}',
|
||||
VITE_PILOT_COUPON_CODE: '${VITE_PILOT_COUPON_CODE}',
|
||||
VITE_PILOT_TRIAL_MONTHS: '${VITE_PILOT_TRIAL_MONTHS}',
|
||||
VITE_STRIPE_PUBLISHABLE_KEY: '${VITE_STRIPE_PUBLISHABLE_KEY}'
|
||||
};
|
||||
EOL
|
||||
|
||||
echo "Runtime configuration created:"
|
||||
echo " API URL: ${VITE_API_URL}"
|
||||
echo " Pilot Mode: ${VITE_PILOT_MODE_ENABLED}"
|
||||
echo " Pilot Coupon: ${VITE_PILOT_COUPON_CODE}"
|
||||
echo " Trial Months: ${VITE_PILOT_TRIAL_MONTHS}"
|
||||
@@ -25,7 +25,7 @@ from app.middleware.rate_limiting import APIRateLimitMiddleware
|
||||
from app.middleware.subscription import SubscriptionMiddleware
|
||||
from app.middleware.demo_middleware import DemoMiddleware
|
||||
from app.middleware.read_only_mode import ReadOnlyModeMiddleware
|
||||
from app.routes import auth, tenant, registration, nominatim, subscription, demo, pos, geocoding, poi_context, webhooks, telemetry
|
||||
from app.routes import auth, tenant, registration, nominatim, subscription, demo, pos, geocoding, poi_context, webhooks, telemetry, public
|
||||
|
||||
# Initialize logger
|
||||
logger = structlog.get_logger()
|
||||
@@ -172,6 +172,9 @@ app.include_router(webhooks.router, prefix="", tags=["webhooks"])
|
||||
# Include telemetry routes for frontend OpenTelemetry data
|
||||
app.include_router(telemetry.router, prefix="/api/v1", tags=["telemetry"])
|
||||
|
||||
# Include public routes (contact forms, feedback, pre-launch subscriptions)
|
||||
app.include_router(public.router, prefix="/api/v1/public", tags=["public"])
|
||||
|
||||
|
||||
# ================================================================
|
||||
# SERVER-SENT EVENTS (SSE) HELPER FUNCTIONS
|
||||
|
||||
@@ -50,7 +50,10 @@ PUBLIC_ROUTES = [
|
||||
"/api/v1/webhooks/generic", # Generic webhook endpoint
|
||||
"/api/v1/telemetry/v1/traces", # Frontend telemetry traces - no auth for performance
|
||||
"/api/v1/telemetry/v1/metrics", # Frontend telemetry metrics - no auth for performance
|
||||
"/api/v1/telemetry/health" # Telemetry health check
|
||||
"/api/v1/telemetry/health", # Telemetry health check
|
||||
"/api/v1/public/contact", # Public contact form - no auth required
|
||||
"/api/v1/public/feedback", # Public feedback form - no auth required
|
||||
"/api/v1/public/prelaunch-subscribe" # Pre-launch email subscription - no auth required
|
||||
]
|
||||
|
||||
# Routes accessible with demo session (no JWT required, just demo session header)
|
||||
|
||||
56
gateway/app/routes/public.py
Normal file
56
gateway/app/routes/public.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# gateway/app/routes/public.py
|
||||
"""
|
||||
Public routes for API Gateway - Handles unauthenticated public endpoints
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
import httpx
|
||||
import logging
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
async def _proxy_to_notification_service(request: Request, path: str):
|
||||
"""Proxy request to notification service"""
|
||||
try:
|
||||
body = await request.body()
|
||||
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
response = await client.request(
|
||||
method=request.method,
|
||||
url=f"{settings.NOTIFICATION_SERVICE_URL}{path}",
|
||||
content=body,
|
||||
headers={
|
||||
"Content-Type": request.headers.get("Content-Type", "application/json"),
|
||||
}
|
||||
)
|
||||
|
||||
return response.json() if response.content else {}
|
||||
|
||||
except httpx.TimeoutException:
|
||||
logger.error(f"Timeout proxying to notification service: {path}")
|
||||
return {"success": False, "message": "Service temporarily unavailable"}
|
||||
except Exception as e:
|
||||
logger.error(f"Error proxying to notification service: {e}")
|
||||
return {"success": False, "message": "Internal error"}
|
||||
|
||||
|
||||
@router.post("/contact")
|
||||
async def submit_contact_form(request: Request):
|
||||
"""Proxy contact form submission to notification service"""
|
||||
return await _proxy_to_notification_service(request, "/api/v1/public/contact")
|
||||
|
||||
|
||||
@router.post("/feedback")
|
||||
async def submit_feedback_form(request: Request):
|
||||
"""Proxy feedback form submission to notification service"""
|
||||
return await _proxy_to_notification_service(request, "/api/v1/public/feedback")
|
||||
|
||||
|
||||
@router.post("/prelaunch-subscribe")
|
||||
async def submit_prelaunch_email(request: Request):
|
||||
"""Proxy pre-launch email subscription to notification service"""
|
||||
return await _proxy_to_notification_service(request, "/api/v1/public/prelaunch-subscribe")
|
||||
@@ -44,6 +44,33 @@ gitea:
|
||||
SSH_DOMAIN: gitea.bakewise.ai
|
||||
ROOT_URL: https://gitea.bakewise.ai
|
||||
|
||||
# =============================================================================
|
||||
# PACKAGE/REGISTRY RETENTION POLICY
|
||||
# =============================================================================
|
||||
# Automatic cleanup of old container images and packages
|
||||
# This prevents the registry from growing indefinitely
|
||||
packages:
|
||||
ENABLED: true
|
||||
# Limit container image versions to prevent storage bloat
|
||||
# 0 = unlimited (default), set a reasonable limit for CI/CD
|
||||
LIMIT_TOTAL_OWNER_SIZE: 10737418240 # 10GB per owner/organization
|
||||
LIMIT_SIZE_CONTAINER: 2147483648 # 2GB per container image
|
||||
|
||||
# Cron job for automatic package cleanup
|
||||
cron:
|
||||
ENABLED: true
|
||||
"cron.cleanup_packages":
|
||||
ENABLED: true
|
||||
# Run daily at 3 AM
|
||||
SCHEDULE: "0 3 * * *"
|
||||
# Keep packages newer than this (in hours) - 168h = 7 days
|
||||
OLDER_THAN: 168h
|
||||
# Number of versions to keep per package (0 = disabled)
|
||||
# This keeps the last 5 versions regardless of age
|
||||
NUMBER_TO_KEEP: 5
|
||||
# Also clean up unreferenced blobs
|
||||
REMOVE_UNUSED_ARTIFACTS: true
|
||||
|
||||
# Production resources - adjust based on expected load
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -93,6 +93,8 @@ gitea:
|
||||
DEFAULT_BRANCH: main
|
||||
packages:
|
||||
ENABLED: true
|
||||
# Retention policy is configured in values-prod.yaml for production
|
||||
# See: LIMIT_TOTAL_OWNER_SIZE, LIMIT_SIZE_CONTAINER, cron.cleanup_packages
|
||||
webhook:
|
||||
ALLOWED_HOST_LIST: "*"
|
||||
# Allow internal cluster URLs for Tekton EventListener
|
||||
|
||||
@@ -16,16 +16,10 @@ spec:
|
||||
interceptors:
|
||||
- ref:
|
||||
name: "cel"
|
||||
kind: ClusterInterceptor
|
||||
params:
|
||||
- name: "filter"
|
||||
value: "has(body.repository) && body.ref.contains('main')"
|
||||
- ref:
|
||||
name: "bitbucket"
|
||||
params:
|
||||
- name: "secretRef"
|
||||
value:
|
||||
secretName: gitea-webhook-secret
|
||||
secretKey: secretToken
|
||||
bindings:
|
||||
- ref: bakery-ia-trigger-binding
|
||||
template:
|
||||
|
||||
@@ -6,4 +6,7 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ .Values.labels.app.name }}
|
||||
app.kubernetes.io/component: {{ .Values.labels.app.component }}
|
||||
{{- end }}
|
||||
pod-security.kubernetes.io/enforce: baseline
|
||||
pod-security.kubernetes.io/warn: baseline
|
||||
pod-security.kubernetes.io/audit: baseline
|
||||
{{- end }}
|
||||
|
||||
@@ -19,7 +19,6 @@ spec:
|
||||
description: Docker registry credentials
|
||||
- name: git-credentials
|
||||
description: Git credentials for pushing GitOps updates
|
||||
optional: true
|
||||
params:
|
||||
- name: git-url
|
||||
type: string
|
||||
@@ -98,7 +97,7 @@ spec:
|
||||
|
||||
# Stage 4: Build and push container images
|
||||
- name: build-and-push
|
||||
runAfter: [run-tests]
|
||||
runAfter: [detect-changes]
|
||||
taskRef:
|
||||
name: kaniko-build
|
||||
when:
|
||||
|
||||
@@ -18,70 +18,105 @@ spec:
|
||||
description: Comma-separated list of changed services
|
||||
steps:
|
||||
- name: detect-changes
|
||||
image: alpine/git
|
||||
image: alpine/git:2.43.0
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
env:
|
||||
- name: HOME
|
||||
value: /tekton/home
|
||||
script: |
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Mark workspace as safe directory to avoid ownership issues
|
||||
git config --global --add safe.directory "$(workspaces.source.path)"
|
||||
|
||||
cd $(workspaces.source.path)
|
||||
|
||||
# Get the list of changed files
|
||||
CHANGED_FILES=$(git diff --name-only HEAD~1 HEAD 2>/dev/null || git diff --name-only $(git rev-parse --abbrev-ref HEAD)@{upstream} HEAD 2>/dev/null || echo "")
|
||||
echo "Git log (last 3 commits):"
|
||||
git log --oneline -3 || echo "Cannot get git log"
|
||||
|
||||
if [ -z "$CHANGED_FILES" ]; then
|
||||
# No changes detected, assume all services need building
|
||||
echo "No git changes detected, building all services"
|
||||
echo "all" > $(results.changed-services.path)
|
||||
# Check if we have enough history for comparison
|
||||
COMMIT_COUNT=$(git rev-list --count HEAD 2>/dev/null || echo "0")
|
||||
echo "Commit count in history: $COMMIT_COUNT"
|
||||
|
||||
if [ "$COMMIT_COUNT" -lt 2 ]; then
|
||||
echo "Not enough git history for change detection (need at least 2 commits)"
|
||||
echo "Building all services as fallback"
|
||||
echo -n "all" > $(results.changed-services.path)
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Initialize an array to collect changed services
|
||||
declare -a changed_services=()
|
||||
# Get the list of changed files
|
||||
CHANGED_FILES=$(git diff --name-only HEAD~1 HEAD 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$CHANGED_FILES" ]; then
|
||||
# Empty commit or something unusual - skip build
|
||||
echo "No file changes detected in last commit"
|
||||
echo -n "infrastructure" > $(results.changed-services.path)
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Changed files:"
|
||||
echo "$CHANGED_FILES"
|
||||
|
||||
# Initialize empty string to collect changed services
|
||||
CHANGED_SERVICES=""
|
||||
|
||||
# Helper function to add service if not already present
|
||||
add_service() {
|
||||
svc="$1"
|
||||
case ",$CHANGED_SERVICES," in
|
||||
*",$svc,"*) ;; # Already present
|
||||
*)
|
||||
if [ -z "$CHANGED_SERVICES" ]; then
|
||||
CHANGED_SERVICES="$svc"
|
||||
else
|
||||
CHANGED_SERVICES="$CHANGED_SERVICES,$svc"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Check for changes in services/ directory
|
||||
while IFS= read -r service_dir; do
|
||||
if [ -n "$service_dir" ]; then
|
||||
service_name=$(basename "$service_dir")
|
||||
if [[ ! " ${changed_services[@]} " =~ " ${service_name} " ]]; then
|
||||
changed_services+=("$service_name")
|
||||
fi
|
||||
for svc in $(echo "$CHANGED_FILES" | grep '^services/' | cut -d'/' -f2 | sort -u); do
|
||||
if [ -n "$svc" ]; then
|
||||
add_service "$svc"
|
||||
fi
|
||||
done < <(echo "$CHANGED_FILES" | grep '^services/' | cut -d'/' -f2 | sort -u)
|
||||
done
|
||||
|
||||
# Check for changes in gateway/ directory
|
||||
if echo "$CHANGED_FILES" | grep -q '^gateway/'; then
|
||||
if [[ ! " ${changed_services[@]} " =~ " gateway " ]]; then
|
||||
changed_services+=("gateway")
|
||||
fi
|
||||
add_service "gateway"
|
||||
fi
|
||||
|
||||
# Check for changes in frontend/ directory
|
||||
if echo "$CHANGED_FILES" | grep -q '^frontend/'; then
|
||||
if [[ ! " ${changed_services[@]} " =~ " frontend " ]]; then
|
||||
changed_services+=("frontend")
|
||||
fi
|
||||
add_service "frontend"
|
||||
fi
|
||||
|
||||
# Check for changes in shared/ directory (might affect multiple services)
|
||||
# Check for changes in shared/ directory
|
||||
# shared/ contains code used by services and gateway (NOT frontend), so rebuild them
|
||||
if echo "$CHANGED_FILES" | grep -q '^shared/'; then
|
||||
if [[ ! " ${changed_services[@]} " =~ " shared " ]]; then
|
||||
changed_services+=("shared")
|
||||
fi
|
||||
echo "Detected changes in shared/ - triggering rebuild of all services and gateway"
|
||||
echo -n "services-and-gateway" > $(results.changed-services.path)
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Convert array to comma-separated string
|
||||
CHANGED_SERVICES=""
|
||||
for service in "${changed_services[@]}"; do
|
||||
if [ -z "$CHANGED_SERVICES" ]; then
|
||||
CHANGED_SERVICES="$service"
|
||||
else
|
||||
CHANGED_SERVICES="$CHANGED_SERVICES,$service"
|
||||
fi
|
||||
done
|
||||
|
||||
# IMPORTANT: Use echo -n (no newline) to avoid trailing newline in results
|
||||
# Trailing newlines cause Tekton when expressions to fail matching
|
||||
if [ -z "$CHANGED_SERVICES" ]; then
|
||||
# Changes are in infrastructure or other non-service files
|
||||
echo "infrastructure" > $(results.changed-services.path)
|
||||
echo "Detected: infrastructure changes only"
|
||||
echo -n "infrastructure" > $(results.changed-services.path)
|
||||
else
|
||||
echo "$CHANGED_SERVICES" > $(results.changed-services.path)
|
||||
echo "Detected changed services: $CHANGED_SERVICES"
|
||||
echo -n "$CHANGED_SERVICES" > $(results.changed-services.path)
|
||||
fi
|
||||
@@ -23,8 +23,8 @@ spec:
|
||||
default: "main"
|
||||
- name: depth
|
||||
type: string
|
||||
description: Git clone depth (0 for full history)
|
||||
default: "1"
|
||||
description: Git clone depth (0 for full history, minimum 2 for change detection)
|
||||
default: "10"
|
||||
results:
|
||||
- name: commit-sha
|
||||
description: The commit SHA that was checked out
|
||||
@@ -33,6 +33,18 @@ spec:
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:2.43.0
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
env:
|
||||
- name: HOME
|
||||
value: /tekton/home
|
||||
script: |
|
||||
#!/bin/sh
|
||||
set -e
|
||||
@@ -50,7 +62,11 @@ spec:
|
||||
echo "Depth: $DEPTH"
|
||||
echo "============================================"
|
||||
|
||||
# Mark workspace as safe directory to avoid ownership issues
|
||||
git config --global --add safe.directory "$OUTPUT_PATH"
|
||||
|
||||
# Clone with depth for faster checkout
|
||||
# Note: We need at least 2 commits for change detection (current + parent)
|
||||
if [ "$DEPTH" = "0" ]; then
|
||||
echo "Cloning full repository..."
|
||||
git clone "$URL" "$OUTPUT_PATH"
|
||||
@@ -61,15 +77,42 @@ spec:
|
||||
|
||||
cd "$OUTPUT_PATH"
|
||||
|
||||
# Fetch the specific revision if needed
|
||||
if [ "$REVISION" != "main" ] && [ "$REVISION" != "master" ]; then
|
||||
echo "Fetching revision: $REVISION"
|
||||
git fetch --depth 1 origin "$REVISION" 2>/dev/null || true
|
||||
# If revision is a specific commit SHA (40 hex chars), we need special handling
|
||||
if echo "$REVISION" | grep -qE '^[0-9a-f]{40}$'; then
|
||||
echo "Revision is a commit SHA: $REVISION"
|
||||
|
||||
# Check if commit is already in the clone
|
||||
if ! git cat-file -e "$REVISION" 2>/dev/null; then
|
||||
echo "Commit not in shallow clone, fetching with history..."
|
||||
# Fetch more history to include the specific commit
|
||||
git fetch --deepen="$DEPTH" origin main 2>/dev/null || true
|
||||
git fetch origin "$REVISION" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Ensure we have the parent commit for change detection
|
||||
PARENT_SHA=$(git rev-parse "$REVISION^" 2>/dev/null || echo "")
|
||||
if [ -z "$PARENT_SHA" ]; then
|
||||
echo "Parent commit not available, deepening history..."
|
||||
git fetch --deepen=10 origin 2>/dev/null || true
|
||||
fi
|
||||
elif [ "$REVISION" != "main" ] && [ "$REVISION" != "master" ]; then
|
||||
echo "Fetching branch/tag: $REVISION"
|
||||
git fetch --depth "$DEPTH" origin "$REVISION" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Checkout the revision
|
||||
echo "Checking out: $REVISION"
|
||||
git checkout "$REVISION" 2>/dev/null || git checkout "origin/$REVISION"
|
||||
git checkout "$REVISION" 2>/dev/null || git checkout "origin/$REVISION" 2>/dev/null || git checkout FETCH_HEAD
|
||||
|
||||
# Verify we have enough history for change detection
|
||||
COMMIT_COUNT=$(git rev-list --count HEAD 2>/dev/null || echo "0")
|
||||
echo "Commits available after checkout: $COMMIT_COUNT"
|
||||
if [ "$COMMIT_COUNT" -lt 2 ]; then
|
||||
echo "Warning: Not enough history, fetching more..."
|
||||
git fetch --deepen=10 origin 2>/dev/null || true
|
||||
COMMIT_COUNT=$(git rev-list --count HEAD 2>/dev/null || echo "0")
|
||||
echo "Commits available after deepen: $COMMIT_COUNT"
|
||||
fi
|
||||
|
||||
# Get commit info
|
||||
COMMIT_SHA=$(git rev-parse HEAD)
|
||||
|
||||
@@ -39,14 +39,37 @@ spec:
|
||||
description: Status of the build operation
|
||||
steps:
|
||||
- name: build-and-push
|
||||
image: gcr.io/kaniko-project/executor:v1.15.0
|
||||
image: gcr.io/kaniko-project/executor:v1.15.0-debug
|
||||
# Note: Kaniko requires root to unpack image layers and perform chown operations
|
||||
# This is a known requirement for container image building
|
||||
securityContext:
|
||||
runAsNonRoot: false
|
||||
runAsUser: 0
|
||||
allowPrivilegeEscalation: false
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
env:
|
||||
- name: DOCKER_CONFIG
|
||||
value: /tekton/home/.docker
|
||||
script: |
|
||||
#!/bin/bash
|
||||
#!/busybox/sh
|
||||
set -e
|
||||
|
||||
# Set up Docker credentials from workspace
|
||||
DOCKER_CREDS_PATH="$(workspaces.docker-credentials.path)"
|
||||
echo "Setting up Docker credentials from: $DOCKER_CREDS_PATH"
|
||||
mkdir -p /tekton/home/.docker
|
||||
if [ -f "$DOCKER_CREDS_PATH/config.json" ]; then
|
||||
cp "$DOCKER_CREDS_PATH/config.json" /tekton/home/.docker/config.json
|
||||
echo "Docker config.json copied successfully"
|
||||
elif [ -f "$DOCKER_CREDS_PATH/.dockerconfigjson" ]; then
|
||||
cp "$DOCKER_CREDS_PATH/.dockerconfigjson" /tekton/home/.docker/config.json
|
||||
echo "Docker .dockerconfigjson copied successfully"
|
||||
else
|
||||
echo "Warning: No docker credentials found in workspace"
|
||||
ls -la "$DOCKER_CREDS_PATH/" || echo "Cannot list docker-credentials workspace"
|
||||
fi
|
||||
|
||||
echo "==================================================================="
|
||||
echo "Kaniko Build Configuration"
|
||||
echo "==================================================================="
|
||||
@@ -54,50 +77,202 @@ spec:
|
||||
echo "Base Registry: $(params.base-registry)"
|
||||
echo "Python Image: $(params.python-image)"
|
||||
echo "Git Revision: $(params.git-revision)"
|
||||
echo "Services param: $(params.services)"
|
||||
echo "==================================================================="
|
||||
|
||||
# Split services parameter by comma
|
||||
IFS=',' read -ra SERVICES <<< "$(params.services)"
|
||||
# Trim whitespace and newlines from services param
|
||||
SERVICES_PARAM=$(echo "$(params.services)" | tr -d '\n' | tr -d ' ')
|
||||
WORKSPACE="$(workspaces.source.path)"
|
||||
|
||||
# Build each service
|
||||
for service in "${SERVICES[@]}"; do
|
||||
service=$(echo "$service" | xargs) # Trim whitespace
|
||||
if [ -n "$service" ] && [ "$service" != "none" ]; then
|
||||
echo ""
|
||||
echo "Building service: $service"
|
||||
echo "-------------------------------------------------------------------"
|
||||
echo "Trimmed services param: '$SERVICES_PARAM'"
|
||||
|
||||
# Determine Dockerfile path (services vs gateway vs frontend)
|
||||
if [ "$service" = "gateway" ]; then
|
||||
DOCKERFILE_PATH="$(workspaces.source.path)/gateway/Dockerfile"
|
||||
elif [ "$service" = "frontend" ]; then
|
||||
DOCKERFILE_PATH="$(workspaces.source.path)/frontend/Dockerfile.kubernetes"
|
||||
# Handle special cases for service discovery
|
||||
# "all" = all services + gateway + frontend
|
||||
# "services-and-gateway" = all services + gateway (no frontend) - used when shared/ changes
|
||||
if [ "$SERVICES_PARAM" = "all" ] || [ "$SERVICES_PARAM" = "services-and-gateway" ]; then
|
||||
if [ "$SERVICES_PARAM" = "all" ]; then
|
||||
echo "Building all services (including frontend) - discovering from workspace..."
|
||||
else
|
||||
echo "Building services and gateway (shared/ changed) - discovering from workspace..."
|
||||
fi
|
||||
echo "Workspace contents:"
|
||||
ls -la "$WORKSPACE/"
|
||||
echo "Services directory contents:"
|
||||
ls -la "$WORKSPACE/services/" || echo "No services directory"
|
||||
|
||||
SERVICES=""
|
||||
# Find all services with Dockerfiles using ls
|
||||
if [ -d "$WORKSPACE/services" ]; then
|
||||
for svc_name in $(ls "$WORKSPACE/services/"); do
|
||||
if [ -f "$WORKSPACE/services/$svc_name/Dockerfile" ]; then
|
||||
if [ -z "$SERVICES" ]; then
|
||||
SERVICES="$svc_name"
|
||||
else
|
||||
SERVICES="$SERVICES,$svc_name"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
# Add gateway if it has Dockerfile
|
||||
if [ -f "$WORKSPACE/gateway/Dockerfile" ]; then
|
||||
if [ -z "$SERVICES" ]; then
|
||||
SERVICES="gateway"
|
||||
else
|
||||
DOCKERFILE_PATH="$(workspaces.source.path)/services/$service/Dockerfile"
|
||||
SERVICES="$SERVICES,gateway"
|
||||
fi
|
||||
fi
|
||||
# Add frontend ONLY for "all" (not for "services-and-gateway")
|
||||
if [ "$SERVICES_PARAM" = "all" ] && [ -f "$WORKSPACE/frontend/Dockerfile.kubernetes" ]; then
|
||||
if [ -z "$SERVICES" ]; then
|
||||
SERVICES="frontend"
|
||||
else
|
||||
SERVICES="$SERVICES,frontend"
|
||||
fi
|
||||
fi
|
||||
echo "Discovered services: $SERVICES"
|
||||
else
|
||||
SERVICES="$SERVICES_PARAM"
|
||||
fi
|
||||
|
||||
# Build each service SEQUENTIALLY to avoid registry upload conflicts
|
||||
# Track build results using files (variables don't persist across subshells)
|
||||
# Note: Use /tekton/home instead of /tmp as Kaniko container doesn't have /tmp
|
||||
BUILD_STATUS_FILE="/tekton/home/build_status"
|
||||
echo "0" > "$BUILD_STATUS_FILE.success"
|
||||
echo "0" > "$BUILD_STATUS_FILE.failed"
|
||||
echo "" > "$BUILD_STATUS_FILE.failed_services"
|
||||
|
||||
# Convert comma-separated to newline-separated and iterate
|
||||
# Using for loop instead of pipe to avoid subshell issues
|
||||
SERVICES_LIST=$(echo "$SERVICES" | tr ',' ' ')
|
||||
|
||||
for service in $SERVICES_LIST; do
|
||||
service=$(echo "$service" | tr -d ' ') # Trim whitespace
|
||||
if [ -n "$service" ] && [ "$service" != "none" ] && [ "$service" != "infrastructure" ] && [ "$service" != "shared" ]; then
|
||||
echo ""
|
||||
echo "==================================================================="
|
||||
echo "Building service: $service"
|
||||
echo "==================================================================="
|
||||
|
||||
# Determine Dockerfile path, context path, and image name
|
||||
# Folder names are: auth, tenant, gateway, frontend, alert_processor, etc.
|
||||
# Image names MUST match what's in the Kubernetes manifests exactly
|
||||
# The manifests use the folder name directly (with underscores preserved)
|
||||
#
|
||||
# CONTEXT_PATH is important - some Dockerfiles expect the context to be their directory
|
||||
# (e.g., frontend expects context=frontend/), while services expect context=workspace root
|
||||
if [ "$service" = "gateway" ]; then
|
||||
DOCKERFILE_PATH="$WORKSPACE/gateway/Dockerfile"
|
||||
CONTEXT_PATH="$WORKSPACE"
|
||||
IMAGE_NAME="gateway"
|
||||
elif [ "$service" = "frontend" ]; then
|
||||
# Frontend Dockerfile expects context to be the frontend/ directory
|
||||
# because it does COPY package*.json ./ and COPY nginx.conf etc.
|
||||
DOCKERFILE_PATH="$WORKSPACE/frontend/Dockerfile.kubernetes"
|
||||
CONTEXT_PATH="$WORKSPACE/frontend"
|
||||
IMAGE_NAME="frontend"
|
||||
else
|
||||
DOCKERFILE_PATH="$WORKSPACE/services/$service/Dockerfile"
|
||||
CONTEXT_PATH="$WORKSPACE"
|
||||
# Use folder name directly - matches manifest image references
|
||||
# e.g., auth, tenant, ai_insights, alert_processor, demo_session, external
|
||||
IMAGE_NAME="$service"
|
||||
fi
|
||||
|
||||
/kaniko/executor \
|
||||
--dockerfile="$DOCKERFILE_PATH" \
|
||||
--destination="$(params.registry)/$service:$(params.git-revision)" \
|
||||
--context="$(workspaces.source.path)" \
|
||||
--build-arg="BASE_REGISTRY=$(params.base-registry)" \
|
||||
--build-arg="PYTHON_IMAGE=$(params.python-image)" \
|
||||
--cache=true \
|
||||
--cache-repo="$(params.registry)/cache"
|
||||
# Check if Dockerfile exists
|
||||
if [ ! -f "$DOCKERFILE_PATH" ]; then
|
||||
echo "Warning: Dockerfile not found at $DOCKERFILE_PATH, skipping..."
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Successfully built: $(params.registry)/$service:$(params.git-revision)"
|
||||
echo "Building $service -> Image: $IMAGE_NAME"
|
||||
|
||||
# Build with retry logic to handle transient registry errors
|
||||
RETRY_COUNT=0
|
||||
MAX_RETRIES=2
|
||||
BUILD_SUCCESS=false
|
||||
|
||||
while [ "$RETRY_COUNT" -le "$MAX_RETRIES" ] && [ "$BUILD_SUCCESS" = "false" ]; do
|
||||
if [ "$RETRY_COUNT" -gt 0 ]; then
|
||||
echo "Retry $RETRY_COUNT/$MAX_RETRIES for $IMAGE_NAME..."
|
||||
# Wait before retry to let registry recover
|
||||
sleep 10
|
||||
fi
|
||||
|
||||
echo "Context: $CONTEXT_PATH"
|
||||
if /kaniko/executor \
|
||||
--dockerfile="$DOCKERFILE_PATH" \
|
||||
--destination="$(params.registry)/$IMAGE_NAME:$(params.git-revision)" \
|
||||
--context="$CONTEXT_PATH" \
|
||||
--build-arg="BASE_REGISTRY=$(params.base-registry)" \
|
||||
--build-arg="PYTHON_IMAGE=$(params.python-image)" \
|
||||
--cache=true \
|
||||
--cache-repo="$(params.registry)/cache" \
|
||||
--cache-ttl=168h \
|
||||
--push-retry=3 \
|
||||
--image-fs-extract-retry=3; then
|
||||
BUILD_SUCCESS=true
|
||||
echo "Successfully built and pushed: $(params.registry)/$IMAGE_NAME:$(params.git-revision)"
|
||||
# Increment success count
|
||||
COUNT=$(cat "$BUILD_STATUS_FILE.success")
|
||||
echo $((COUNT + 1)) > "$BUILD_STATUS_FILE.success"
|
||||
else
|
||||
RETRY_COUNT=$((RETRY_COUNT + 1))
|
||||
echo "Build/push failed for $IMAGE_NAME (attempt $RETRY_COUNT)"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$BUILD_SUCCESS" = "false" ]; then
|
||||
echo "ERROR: Failed to build $IMAGE_NAME after $MAX_RETRIES retries"
|
||||
# Increment failed count and record service name
|
||||
COUNT=$(cat "$BUILD_STATUS_FILE.failed")
|
||||
echo $((COUNT + 1)) > "$BUILD_STATUS_FILE.failed"
|
||||
echo "$service" >> "$BUILD_STATUS_FILE.failed_services"
|
||||
fi
|
||||
|
||||
# Small delay between services to let registry settle
|
||||
# This prevents "offset mismatch" errors from concurrent uploads
|
||||
echo "Waiting 5s before next build to let registry settle..."
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
|
||||
# Read final counts
|
||||
SUCCESS_COUNT=$(cat "$BUILD_STATUS_FILE.success")
|
||||
FAILED_COUNT=$(cat "$BUILD_STATUS_FILE.failed")
|
||||
FAILED_SERVICES=$(cat "$BUILD_STATUS_FILE.failed_services" | tr '\n' ',' | sed 's/,$//')
|
||||
|
||||
echo ""
|
||||
echo "==================================================================="
|
||||
echo "Build completed successfully!"
|
||||
echo "Build Summary"
|
||||
echo "==================================================================="
|
||||
echo "success" > $(results.build-status.path)
|
||||
echo "Successful builds: $SUCCESS_COUNT"
|
||||
echo "Failed builds: $FAILED_COUNT"
|
||||
if [ -n "$FAILED_SERVICES" ]; then
|
||||
echo "Failed services: $FAILED_SERVICES"
|
||||
fi
|
||||
echo "==================================================================="
|
||||
|
||||
# Set result based on outcome
|
||||
# IMPORTANT: Use echo -n (no newline) to avoid trailing newline in results
|
||||
# Trailing newlines cause Tekton when expressions to fail matching
|
||||
if [ "$FAILED_COUNT" -gt 0 ]; then
|
||||
if [ "$SUCCESS_COUNT" -gt 0 ]; then
|
||||
echo -n "partial" > $(results.build-status.path)
|
||||
echo "Build completed with some failures"
|
||||
else
|
||||
echo -n "failed" > $(results.build-status.path)
|
||||
echo "All builds failed!"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -n "success" > $(results.build-status.path)
|
||||
echo "All builds completed successfully!"
|
||||
fi
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 4Gi
|
||||
memory: 8Gi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 1Gi
|
||||
memory: 2Gi
|
||||
@@ -19,9 +19,18 @@ spec:
|
||||
description: Git revision being processed
|
||||
steps:
|
||||
- name: generate-summary
|
||||
image: alpine
|
||||
image: alpine:3.19
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
script: |
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
echo "=== Bakery-IA CI Pipeline Summary ==="
|
||||
|
||||
@@ -23,12 +23,21 @@ spec:
|
||||
default: "false"
|
||||
steps:
|
||||
- name: run-unit-tests
|
||||
image: registry.bakewise.ai/bakery-admin/python:3.11-slim
|
||||
image: python:3.11-slim
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
workingDir: $(workspaces.source.path)
|
||||
script: |
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
|
||||
echo "============================================"
|
||||
echo "Running Unit Tests"
|
||||
echo "Services: $(params.services)"
|
||||
@@ -57,12 +66,21 @@ spec:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
- name: run-integration-tests
|
||||
image: registry.bakewise.ai/bakery-admin/python:3.11-slim
|
||||
image: python:3.11-slim
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
workingDir: $(workspaces.source.path)
|
||||
script: |
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
|
||||
echo "============================================"
|
||||
echo "Running Integration Tests"
|
||||
echo "Services: $(params.services)"
|
||||
|
||||
@@ -35,8 +35,19 @@ spec:
|
||||
steps:
|
||||
- name: update-manifests
|
||||
image: alpine/git:2.43.0
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
workingDir: $(workspaces.source.path)
|
||||
env:
|
||||
- name: HOME
|
||||
value: /tekton/home
|
||||
- name: GIT_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
@@ -48,7 +59,7 @@ spec:
|
||||
name: gitea-git-credentials
|
||||
key: password
|
||||
script: |
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
echo "============================================"
|
||||
@@ -63,67 +74,151 @@ spec:
|
||||
# Configure git
|
||||
git config --global user.email "ci@bakery-ia.local"
|
||||
git config --global user.name "bakery-ia-ci"
|
||||
# Mark directories as safe to avoid ownership issues
|
||||
git config --global --add safe.directory /tmp/gitops
|
||||
git config --global --add safe.directory "$(workspaces.source.path)"
|
||||
|
||||
# Clone the main repository (not a separate gitops repo)
|
||||
# Use internal cluster DNS which works in all environments
|
||||
REPO_URL="https://${GIT_USERNAME}:${GIT_PASSWORD}@gitea-http.gitea.svc.cluster.local:3000/bakery-admin/bakery-ia.git"
|
||||
# Use external HTTPS URL via ingress for reliable TLS connectivity
|
||||
REPO_URL="https://${GIT_USERNAME}:${GIT_PASSWORD}@gitea.bakewise.ai/bakery-admin/bakery-ia.git"
|
||||
git clone "$REPO_URL" /tmp/gitops
|
||||
|
||||
cd /tmp/gitops
|
||||
|
||||
# Switch to target branch
|
||||
git checkout "$(params.git-branch)" || git checkout -b "$(params.git-branch)"
|
||||
# The git-branch param may come as "refs/heads/main" from webhook, extract just the branch name
|
||||
BRANCH_NAME=$(echo "$(params.git-branch)" | sed 's|refs/heads/||')
|
||||
echo "Target branch: $BRANCH_NAME"
|
||||
git checkout "$BRANCH_NAME" || git checkout -b "$BRANCH_NAME"
|
||||
|
||||
# Compute short hash once for job name updates
|
||||
SHORT_HASH=$(echo "$(params.git-revision)" | cut -c 1-8)
|
||||
|
||||
# Handle special cases for service discovery
|
||||
# "all" = all services + gateway + frontend
|
||||
# "services-and-gateway" = all services + gateway (no frontend) - used when shared/ changes
|
||||
SERVICES_PARAM=$(echo "$(params.services)" | tr -d '\n' | tr -d ' ')
|
||||
WORKSPACE="$(workspaces.source.path)"
|
||||
|
||||
if [ "$SERVICES_PARAM" = "all" ] || [ "$SERVICES_PARAM" = "services-and-gateway" ]; then
|
||||
echo "Expanding '$SERVICES_PARAM' to actual service list..."
|
||||
SERVICES=""
|
||||
# Find all services with Dockerfiles
|
||||
if [ -d "$WORKSPACE/services" ]; then
|
||||
for svc_name in $(ls "$WORKSPACE/services/"); do
|
||||
if [ -f "$WORKSPACE/services/$svc_name/Dockerfile" ]; then
|
||||
if [ -z "$SERVICES" ]; then
|
||||
SERVICES="$svc_name"
|
||||
else
|
||||
SERVICES="$SERVICES,$svc_name"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
# Add gateway
|
||||
if [ -d "$WORKSPACE/gateway" ]; then
|
||||
if [ -z "$SERVICES" ]; then
|
||||
SERVICES="gateway"
|
||||
else
|
||||
SERVICES="$SERVICES,gateway"
|
||||
fi
|
||||
fi
|
||||
# Add frontend ONLY for "all" (not for "services-and-gateway")
|
||||
if [ "$SERVICES_PARAM" = "all" ] && [ -d "$WORKSPACE/frontend" ]; then
|
||||
if [ -z "$SERVICES" ]; then
|
||||
SERVICES="frontend"
|
||||
else
|
||||
SERVICES="$SERVICES,frontend"
|
||||
fi
|
||||
fi
|
||||
echo "Expanded services: $SERVICES"
|
||||
else
|
||||
SERVICES="$SERVICES_PARAM"
|
||||
fi
|
||||
|
||||
# Update image tags in Kubernetes manifests
|
||||
for service in $(echo "$(params.services)" | tr ',' '\n'); do
|
||||
# Service names come from detect-changes task as folder names: auth, tenant, ai_insights, etc.
|
||||
for service in $(echo "$SERVICES" | tr ',' '\n'); do
|
||||
service=$(echo "$service" | xargs) # Trim whitespace
|
||||
if [ -n "$service" ] && [ "$service" != "none" ] && [ "$service" != "infrastructure" ] && [ "$service" != "shared" ]; then
|
||||
echo ""
|
||||
echo "============================================"
|
||||
echo "Updating manifest for service: $service"
|
||||
echo "============================================"
|
||||
|
||||
# Format service name for directory (convert from kebab-case to snake_case if needed)
|
||||
# Handle special cases like demo-session -> demo_session, alert-processor -> alert_processor, etc.
|
||||
formatted_service=$(echo "$service" | sed 's/-/_/g')
|
||||
# IMAGE_NAME is the same as the service folder name (matching Kaniko output)
|
||||
# This ensures consistency: folder name = image name = manifest reference
|
||||
IMAGE_NAME="$service"
|
||||
|
||||
# Determine manifest paths based on service
|
||||
# Directory structure uses hyphens: ai-insights, alert-processor, demo-session
|
||||
# But image names use underscores: ai_insights, alert_processor, demo_session
|
||||
service_dir=$(echo "$service" | sed 's/_/-/g')
|
||||
|
||||
# For gateway and frontend, they have different directory structures
|
||||
if [ "$service" = "gateway" ]; then
|
||||
MANIFEST_PATH="infrastructure/platform/gateway/gateway-service.yaml"
|
||||
IMAGE_NAME="gateway" # gateway image name is just "gateway"
|
||||
elif [ "$service" = "frontend" ]; then
|
||||
MANIFEST_PATH="infrastructure/services/microservices/frontend/frontend-service.yaml"
|
||||
IMAGE_NAME="dashboard" # frontend service uses "dashboard" as image name
|
||||
elif [ "$service" = "alert_processor" ]; then
|
||||
MANIFEST_PATH="infrastructure/services/microservices/alert-processor/alert-processor.yaml"
|
||||
elif [ "$service" = "demo_session" ]; then
|
||||
# demo-session uses deployment.yaml instead of demo-session-service.yaml
|
||||
MANIFEST_PATH="infrastructure/services/microservices/demo-session/deployment.yaml"
|
||||
else
|
||||
# For microservices, look in the microservices directory
|
||||
# Convert service name to directory format (kebab-case)
|
||||
service_dir=$(echo "$service" | sed 's/_/-/g')
|
||||
|
||||
# Check for different possible manifest file names
|
||||
if [ -f "infrastructure/services/microservices/$service_dir/deployment.yaml" ]; then
|
||||
MANIFEST_PATH="infrastructure/services/microservices/$service_dir/deployment.yaml"
|
||||
elif [ -f "infrastructure/services/microservices/$service_dir/${formatted_service}-service.yaml" ]; then
|
||||
MANIFEST_PATH="infrastructure/services/microservices/$service_dir/${formatted_service}-service.yaml"
|
||||
elif [ -f "infrastructure/services/microservices/$service_dir/${service_dir}-service.yaml" ]; then
|
||||
MANIFEST_PATH="infrastructure/services/microservices/$service_dir/${service_dir}-service.yaml"
|
||||
else
|
||||
# Default to the standard naming pattern
|
||||
MANIFEST_PATH="infrastructure/services/microservices/$service_dir/${formatted_service}-service.yaml"
|
||||
fi
|
||||
|
||||
# For most services, the image name follows the pattern service-name-service
|
||||
IMAGE_NAME="${service_dir}-service"
|
||||
# Standard services: auth, tenant, orders, inventory, etc.
|
||||
# Also handles: ai_insights -> ai-insights, external -> external
|
||||
MANIFEST_PATH="infrastructure/services/microservices/${service_dir}/${service_dir}-service.yaml"
|
||||
fi
|
||||
|
||||
# Update the image tag in the deployment YAML
|
||||
if [ -f "$MANIFEST_PATH" ]; then
|
||||
# Update image reference from bakery/image_name:tag to registry/image_name:git_revision
|
||||
# Handle various image name formats that might exist in the manifests
|
||||
sed -i "s|image: bakery/${IMAGE_NAME}:.*|image: $(params.registry)/${IMAGE_NAME}:$(params.git-revision)|g" "$MANIFEST_PATH"
|
||||
# Also handle the case where the image name might be formatted differently
|
||||
sed -i "s|image: bakery/${service}:.*|image: $(params.registry)/${service}:$(params.git-revision)|g" "$MANIFEST_PATH"
|
||||
sed -i "s|image: bakery/${formatted_service}:.*|image: $(params.registry)/${formatted_service}:$(params.git-revision)|g" "$MANIFEST_PATH"
|
||||
|
||||
echo "Updated image in: $MANIFEST_PATH for image: bakery/${IMAGE_NAME}:* -> $(params.registry)/${IMAGE_NAME}:$(params.git-revision)"
|
||||
# Update image reference - match the exact image name pattern used in manifests
|
||||
sed -i "s|image: registry.bakewise.ai/bakery-admin/${IMAGE_NAME}:.*|image: $(params.registry)/${IMAGE_NAME}:$(params.git-revision)|g" "$MANIFEST_PATH"
|
||||
echo "Updated: $MANIFEST_PATH -> $(params.registry)/${IMAGE_NAME}:$(params.git-revision)"
|
||||
else
|
||||
echo "Warning: Manifest file not found: $MANIFEST_PATH"
|
||||
echo "Warning: Manifest not found: $MANIFEST_PATH"
|
||||
fi
|
||||
|
||||
# Update migration job if it exists
|
||||
# Migration jobs use the hyphenated directory name
|
||||
MIGRATION_JOB_PATH="infrastructure/services/microservices/${service_dir}/migrations/${service_dir}-migration-job.yaml"
|
||||
if [ -f "$MIGRATION_JOB_PATH" ]; then
|
||||
# Update migration job image reference
|
||||
sed -i "s|image: registry.bakewise.ai/bakery-admin/${IMAGE_NAME}:.*|image: $(params.registry)/${IMAGE_NAME}:$(params.git-revision)|g" "$MIGRATION_JOB_PATH"
|
||||
# Update job name to include short commit hash (makes it unique for K8s)
|
||||
sed -i "s|name: ${service_dir}-migration-[a-f0-9]*|name: ${service_dir}-migration-${SHORT_HASH}|g" "$MIGRATION_JOB_PATH"
|
||||
# Also update labels to match
|
||||
sed -i "s|app.kubernetes.io/name: ${service_dir}-migration-[a-f0-9]*|app.kubernetes.io/name: ${service_dir}-migration-${SHORT_HASH}|g" "$MIGRATION_JOB_PATH"
|
||||
echo "Updated migration: $MIGRATION_JOB_PATH"
|
||||
fi
|
||||
|
||||
# Special case: external service has additional jobs
|
||||
if [ "$service" = "external" ]; then
|
||||
# Update external-data-init job
|
||||
EXTERNAL_DATA_INIT_JOB="infrastructure/services/microservices/external/migrations/external-data-init-job.yaml"
|
||||
if [ -f "$EXTERNAL_DATA_INIT_JOB" ]; then
|
||||
sed -i "s|image: registry.bakewise.ai/bakery-admin/external:.*|image: $(params.registry)/external:$(params.git-revision)|g" "$EXTERNAL_DATA_INIT_JOB"
|
||||
sed -i "s|name: external-data-init-[a-f0-9]*|name: external-data-init-${SHORT_HASH}|g" "$EXTERNAL_DATA_INIT_JOB"
|
||||
echo "Updated external-data-init job: $EXTERNAL_DATA_INIT_JOB"
|
||||
fi
|
||||
|
||||
# Update external-data-rotation cronjob
|
||||
EXTERNAL_DATA_ROTATION_JOB="infrastructure/services/microservices/external/cronjobs/external-data-rotation-cronjob.yaml"
|
||||
if [ -f "$EXTERNAL_DATA_ROTATION_JOB" ]; then
|
||||
sed -i "s|image: registry.bakewise.ai/bakery-admin/external:.*|image: $(params.registry)/external:$(params.git-revision)|g" "$EXTERNAL_DATA_ROTATION_JOB"
|
||||
sed -i "s|name: external-data-rotation-[a-f0-9]*|name: external-data-rotation-${SHORT_HASH}|g" "$EXTERNAL_DATA_ROTATION_JOB"
|
||||
echo "Updated external-data-rotation cronjob: $EXTERNAL_DATA_ROTATION_JOB"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Special case: demo_session service has cleanup worker
|
||||
if [ "$service" = "demo_session" ]; then
|
||||
DEMO_CLEANUP_WORKER="infrastructure/services/microservices/demo-session/demo-cleanup-worker.yaml"
|
||||
if [ -f "$DEMO_CLEANUP_WORKER" ]; then
|
||||
sed -i "s|image: registry.bakewise.ai/bakery-admin/demo_session:.*|image: $(params.registry)/demo_session:$(params.git-revision)|g" "$DEMO_CLEANUP_WORKER"
|
||||
sed -i "s|name: demo-cleanup-worker-[a-f0-9]*|name: demo-cleanup-worker-${SHORT_HASH}|g" "$DEMO_CLEANUP_WORKER"
|
||||
echo "Updated demo-cleanup-worker: $DEMO_CLEANUP_WORKER"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
@@ -133,8 +228,9 @@ spec:
|
||||
git add .
|
||||
git status
|
||||
if ! git diff --cached --quiet; then
|
||||
git commit -m "Update images for services: $(params.services) [skip ci]"
|
||||
git push origin "$(params.git-branch)"
|
||||
git commit -m "Update images for services: $SERVICES [skip ci]"
|
||||
echo "Pushing to branch: $BRANCH_NAME"
|
||||
git push origin "HEAD:$BRANCH_NAME"
|
||||
echo "GitOps manifests updated successfully"
|
||||
else
|
||||
echo "No changes to commit"
|
||||
|
||||
@@ -12,12 +12,12 @@ metadata:
|
||||
spec:
|
||||
params:
|
||||
- name: git-repo-url
|
||||
value: "{{"{{ .payload.repository.clone_url }}"}}"
|
||||
value: $(body.repository.clone_url)
|
||||
- name: git-revision
|
||||
value: "{{"{{ .payload.after }}"}}"
|
||||
value: $(body.after)
|
||||
- name: git-branch
|
||||
value: "{{"{{ .payload.ref }}" | replace "refs/heads/" "" | replace "refs/tags/" "" }}"
|
||||
value: $(body.ref)
|
||||
- name: git-repo-name
|
||||
value: "{{"{{ .payload.repository.name }}"}}"
|
||||
value: $(body.repository.name)
|
||||
- name: git-repo-full-name
|
||||
value: "{{"{{ .payload.repository.full_name }}"}}"
|
||||
value: $(body.repository.full_name)
|
||||
@@ -41,6 +41,9 @@ spec:
|
||||
# Track the source commit
|
||||
bakery-ia.io/git-revision: $(tt.params.git-revision)
|
||||
bakery-ia.io/git-branch: $(tt.params.git-branch)
|
||||
# Automatic cleanup: delete PipelineRun after completion
|
||||
# Default: 1 hour (3600 seconds). Configure via values.yaml
|
||||
tekton.dev/pipelinerunDeletionPropagation: "foreground"
|
||||
spec:
|
||||
pipelineRef:
|
||||
name: bakery-ia-ci
|
||||
@@ -70,7 +73,7 @@ spec:
|
||||
- name: registry
|
||||
value: $(tt.params.registry-url)
|
||||
- name: skip-tests
|
||||
value: "false"
|
||||
value: "true"
|
||||
- name: dry-run
|
||||
value: "false"
|
||||
# Timeout for the entire pipeline run
|
||||
|
||||
@@ -93,6 +93,20 @@ serviceAccounts:
|
||||
pipeline:
|
||||
name: "tekton-pipeline-sa"
|
||||
|
||||
# Cleanup and retention configuration
|
||||
cleanup:
|
||||
# PipelineRun retention (requires Tekton Pipelines >= v0.52.0)
|
||||
# Set to 0 to disable automatic cleanup
|
||||
pipelineRuns:
|
||||
# Keep last N successful PipelineRuns per Pipeline
|
||||
keepSuccessful: 3
|
||||
# Keep last N failed PipelineRuns per Pipeline
|
||||
keepFailed: 5
|
||||
# TaskRun retention
|
||||
taskRuns:
|
||||
keepSuccessful: 3
|
||||
keepFailed: 5
|
||||
|
||||
# Labels to apply to resources
|
||||
labels:
|
||||
app:
|
||||
|
||||
@@ -387,6 +387,7 @@ data:
|
||||
VITE_PILOT_COUPON_CODE: "PILOT2025"
|
||||
VITE_PILOT_TRIAL_MONTHS: "3"
|
||||
VITE_STRIPE_PUBLISHABLE_KEY: "pk_test_51QuxKyIzCdnBmAVTGM8fvXYkItrBUILz6lHYwhAva6ZAH1HRi0e8zDRgZ4X3faN0zEABp5RHjCVBmMJL3aKXbaC200fFrSNnPl"
|
||||
VITE_STRIPE_ACCOUNT_ID: "acct_1QuxKsIucMC6K1cg"
|
||||
|
||||
# ================================================================
|
||||
# LOCATION SETTINGS (Nominatim Geocoding)
|
||||
@@ -488,4 +489,4 @@ data:
|
||||
EXTERNAL_ENABLED_CITIES: "madrid"
|
||||
EXTERNAL_RETENTION_MONTHS: "6" # Reduced from 24 to avoid memory issues during init
|
||||
EXTERNAL_CACHE_TTL_DAYS: "7"
|
||||
EXTERNAL_REDIS_URL: "rediss://redis-service:6379/0?ssl_cert_reqs=none"
|
||||
EXTERNAL_REDIS_URL: "rediss://redis-service:6379/0"
|
||||
|
||||
@@ -21,9 +21,9 @@ resources:
|
||||
- prod-certificate.yaml
|
||||
|
||||
|
||||
# SigNoz is managed via Helm deployment (see infrastructure/helm/deploy-signoz.sh)
|
||||
# Monitoring is handled by SigNoz (no separate monitoring components needed)
|
||||
# SigNoz paths are now included in the main ingress (ingress-https.yaml)
|
||||
# SigNoz is managed via Helm deployment (see infrastructure/monitoring/signoz/deploy-signoz.sh)
|
||||
# Monitoring is handled by SigNoz with its own dedicated ingress
|
||||
# SigNoz creates its own ingress resource for monitoring.bakewise.ai
|
||||
|
||||
labels:
|
||||
- includeSelectors: false
|
||||
@@ -107,6 +107,12 @@ patches:
|
||||
- op: add
|
||||
path: /data/VITE_ENVIRONMENT
|
||||
value: "production"
|
||||
- op: replace
|
||||
path: /data/VITE_STRIPE_PUBLISHABLE_KEY
|
||||
value: "pk_test_51QuxKyIzCdnBmAVTGM8fvXYkItrBUILz6lHYwhAva6ZAH1HRi0e8zDRgZ4X3faN0zEABp5RHjCVBmMJL3aKXbaC200fFrSNnPl"
|
||||
- op: add
|
||||
path: /data/VITE_STRIPE_ACCOUNT_ID
|
||||
value: "acct_1QuxKsIucMC6K1cg"
|
||||
# Add imagePullSecrets to all Deployments for gitea registry authentication
|
||||
- target:
|
||||
kind: Deployment
|
||||
@@ -204,72 +210,120 @@ patches:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
|
||||
# =============================================================================
|
||||
# CPU Request Optimization for Production
|
||||
# Reduce CPU requests to match actual usage (was 100m, actual ~5-10m)
|
||||
# This prevents scheduler rejections due to overcommitted requests
|
||||
# =============================================================================
|
||||
|
||||
# Database deployments - reduce CPU request from 100m to 25m
|
||||
- target:
|
||||
group: apps
|
||||
version: v1
|
||||
kind: Deployment
|
||||
name: ".*-db$"
|
||||
namespace: bakery-ia
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/resources/requests/cpu
|
||||
value: "25m"
|
||||
|
||||
# Microservice deployments - reduce CPU request from 100m to 25m
|
||||
- target:
|
||||
group: apps
|
||||
version: v1
|
||||
kind: Deployment
|
||||
name: ".*-service$"
|
||||
namespace: bakery-ia
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/resources/requests/cpu
|
||||
value: "25m"
|
||||
|
||||
# Other core services
|
||||
- target:
|
||||
group: apps
|
||||
version: v1
|
||||
kind: Deployment
|
||||
name: gateway
|
||||
namespace: bakery-ia
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/resources/requests/cpu
|
||||
value: "25m"
|
||||
|
||||
- target:
|
||||
group: apps
|
||||
version: v1
|
||||
kind: Deployment
|
||||
name: alert-processor
|
||||
namespace: bakery-ia
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/resources/requests/cpu
|
||||
value: "25m"
|
||||
|
||||
- target:
|
||||
group: apps
|
||||
version: v1
|
||||
kind: Deployment
|
||||
name: frontend
|
||||
namespace: bakery-ia
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/resources/requests/cpu
|
||||
value: "50m"
|
||||
|
||||
- target:
|
||||
group: apps
|
||||
version: v1
|
||||
kind: Deployment
|
||||
name: redis
|
||||
namespace: bakery-ia
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/resources/requests/cpu
|
||||
value: "25m"
|
||||
|
||||
- target:
|
||||
group: apps
|
||||
version: v1
|
||||
kind: Deployment
|
||||
name: rabbitmq
|
||||
namespace: bakery-ia
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/resources/requests/cpu
|
||||
value: "50m"
|
||||
|
||||
- target:
|
||||
group: apps
|
||||
version: v1
|
||||
kind: Deployment
|
||||
name: minio
|
||||
namespace: bakery-ia
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/resources/requests/cpu
|
||||
value: "50m"
|
||||
|
||||
# Migration jobs - reduce CPU request from 100m to 25m
|
||||
- target:
|
||||
group: batch
|
||||
version: v1
|
||||
kind: Job
|
||||
name: ".*-migration$"
|
||||
namespace: bakery-ia
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/resources/requests/cpu
|
||||
value: "25m"
|
||||
|
||||
images:
|
||||
# Application services
|
||||
- name: bakery/auth-service
|
||||
newName: registry.bakewise.ai/bakery-admin/auth-service
|
||||
newTag: latest
|
||||
- name: bakery/tenant-service
|
||||
newName: registry.bakewise.ai/bakery-admin/tenant-service
|
||||
newTag: latest
|
||||
- name: bakery/training-service
|
||||
newName: registry.bakewise.ai/bakery-admin/training-service
|
||||
newTag: latest
|
||||
- name: bakery/forecasting-service
|
||||
newName: registry.bakewise.ai/bakery-admin/forecasting-service
|
||||
newTag: latest
|
||||
- name: bakery/sales-service
|
||||
newName: registry.bakewise.ai/bakery-admin/sales-service
|
||||
newTag: latest
|
||||
- name: bakery/external-service
|
||||
newName: registry.bakewise.ai/bakery-admin/external-service
|
||||
newTag: latest
|
||||
- name: bakery/notification-service
|
||||
newName: registry.bakewise.ai/bakery-admin/notification-service
|
||||
newTag: latest
|
||||
- name: bakery/inventory-service
|
||||
newName: registry.bakewise.ai/bakery-admin/inventory-service
|
||||
newTag: latest
|
||||
- name: bakery/recipes-service
|
||||
newName: registry.bakewise.ai/bakery-admin/recipes-service
|
||||
newTag: latest
|
||||
- name: bakery/suppliers-service
|
||||
newName: registry.bakewise.ai/bakery-admin/suppliers-service
|
||||
newTag: latest
|
||||
- name: bakery/pos-service
|
||||
newName: registry.bakewise.ai/bakery-admin/pos-service
|
||||
newTag: latest
|
||||
- name: bakery/orders-service
|
||||
newName: registry.bakewise.ai/bakery-admin/orders-service
|
||||
newTag: latest
|
||||
- name: bakery/production-service
|
||||
newName: registry.bakewise.ai/bakery-admin/production-service
|
||||
newTag: latest
|
||||
- name: bakery/alert-processor
|
||||
newName: registry.bakewise.ai/bakery-admin/alert-processor
|
||||
newTag: latest
|
||||
- name: bakery/gateway
|
||||
newName: registry.bakewise.ai/bakery-admin/gateway
|
||||
newTag: latest
|
||||
- name: bakery/dashboard
|
||||
newName: registry.bakewise.ai/bakery-admin/dashboard
|
||||
newTag: latest
|
||||
# Missing services (added to fix ImagePullBackOff errors)
|
||||
- name: bakery/ai-insights-service
|
||||
newName: registry.bakewise.ai/bakery-admin/ai-insights-service
|
||||
newTag: latest
|
||||
- name: bakery/demo-session-service
|
||||
newName: registry.bakewise.ai/bakery-admin/demo-session-service
|
||||
newTag: latest
|
||||
- name: bakery/distribution-service
|
||||
newName: registry.bakewise.ai/bakery-admin/distribution-service
|
||||
newTag: latest
|
||||
- name: bakery/orchestrator-service
|
||||
newName: registry.bakewise.ai/bakery-admin/orchestrator-service
|
||||
newTag: latest
|
||||
- name: bakery/procurement-service
|
||||
newName: registry.bakewise.ai/bakery-admin/procurement-service
|
||||
newTag: latest
|
||||
# =============================================================================
|
||||
# NOTE: Application service images (bakery/*) are NOT overridden here.
|
||||
# CI/CD pipeline (Tekton) updates base manifests directly with versioned tags.
|
||||
# This ensures deployments use the exact git revision that was built.
|
||||
# =============================================================================
|
||||
# Database images (cached in gitea registry for consistency)
|
||||
- name: postgres
|
||||
|
||||
121
infrastructure/monitoring/k8s-infra/README.md
Normal file
121
infrastructure/monitoring/k8s-infra/README.md
Normal file
@@ -0,0 +1,121 @@
|
||||
# Kubernetes Infrastructure Monitoring
|
||||
|
||||
This directory contains configurations for deploying Kubernetes infrastructure monitoring components that integrate with SigNoz.
|
||||
|
||||
## Components
|
||||
|
||||
| Component | Purpose | Metrics Endpoint |
|
||||
|-----------|---------|------------------|
|
||||
| **kube-state-metrics** | Kubernetes object metrics (pods, deployments, nodes, etc.) | `:8080/metrics` |
|
||||
| **node-exporter** | Host-level metrics (CPU, memory, disk, network) | `:9100/metrics` |
|
||||
|
||||
## Quick Start (MicroK8s Production)
|
||||
|
||||
```bash
|
||||
# 1. Deploy infrastructure monitoring components
|
||||
./deploy-k8s-infra-monitoring.sh --microk8s install
|
||||
|
||||
# 2. Upgrade SigNoz to scrape the new metrics
|
||||
microk8s helm3 upgrade signoz signoz/signoz \
|
||||
-n bakery-ia \
|
||||
-f ../signoz/signoz-values-prod.yaml
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Install
|
||||
|
||||
```bash
|
||||
# Standard Kubernetes
|
||||
./deploy-k8s-infra-monitoring.sh install
|
||||
|
||||
# MicroK8s
|
||||
./deploy-k8s-infra-monitoring.sh --microk8s install
|
||||
```
|
||||
|
||||
### Upgrade
|
||||
|
||||
```bash
|
||||
./deploy-k8s-infra-monitoring.sh --microk8s upgrade
|
||||
```
|
||||
|
||||
### Uninstall
|
||||
|
||||
```bash
|
||||
./deploy-k8s-infra-monitoring.sh --microk8s uninstall
|
||||
```
|
||||
|
||||
### Check Status
|
||||
|
||||
```bash
|
||||
./deploy-k8s-infra-monitoring.sh --microk8s status
|
||||
```
|
||||
|
||||
### Dry Run
|
||||
|
||||
```bash
|
||||
./deploy-k8s-infra-monitoring.sh --microk8s --dry-run install
|
||||
```
|
||||
|
||||
## Files
|
||||
|
||||
- `kube-state-metrics-values.yaml` - Helm values for kube-state-metrics
|
||||
- `node-exporter-values.yaml` - Helm values for node-exporter
|
||||
- `deploy-k8s-infra-monitoring.sh` - Deployment automation script
|
||||
|
||||
## SigNoz Integration
|
||||
|
||||
The SigNoz OTel Collector is configured (in `signoz-values-prod.yaml`) to scrape metrics from:
|
||||
|
||||
- `kube-state-metrics.bakery-ia.svc.cluster.local:8080`
|
||||
- `node-exporter-prometheus-node-exporter.bakery-ia.svc.cluster.local:9100`
|
||||
|
||||
After deploying these components, metrics will appear in SigNoz under:
|
||||
- **Infrastructure** > **Kubernetes** (for K8s object metrics)
|
||||
- **Infrastructure** > **Hosts** (for node metrics)
|
||||
|
||||
## Metrics Available
|
||||
|
||||
### From kube-state-metrics
|
||||
|
||||
- Pod status, phase, restarts
|
||||
- Deployment replicas (desired vs available)
|
||||
- Node conditions and capacity
|
||||
- PVC status and capacity
|
||||
- Resource requests and limits
|
||||
- Job/CronJob status
|
||||
|
||||
### From node-exporter
|
||||
|
||||
- CPU usage per core
|
||||
- Memory usage (total, free, cached)
|
||||
- Disk I/O and space
|
||||
- Network traffic (bytes in/out)
|
||||
- System load average
|
||||
- Filesystem usage
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Check if metrics are being scraped
|
||||
|
||||
```bash
|
||||
# Port-forward to kube-state-metrics
|
||||
microk8s kubectl port-forward svc/kube-state-metrics 8080:8080 -n bakery-ia &
|
||||
curl localhost:8080/metrics | head -50
|
||||
|
||||
# Port-forward to node-exporter
|
||||
microk8s kubectl port-forward svc/node-exporter-prometheus-node-exporter 9100:9100 -n bakery-ia &
|
||||
curl localhost:9100/metrics | head -50
|
||||
```
|
||||
|
||||
### Check OTel Collector logs
|
||||
|
||||
```bash
|
||||
microk8s kubectl logs -l app.kubernetes.io/name=signoz-otel-collector -n bakery-ia --tail=100
|
||||
```
|
||||
|
||||
### Verify pods are running
|
||||
|
||||
```bash
|
||||
microk8s kubectl get pods -n bakery-ia | grep -E "(kube-state|node-exporter)"
|
||||
```
|
||||
347
infrastructure/monitoring/k8s-infra/deploy-k8s-infra-monitoring.sh
Executable file
347
infrastructure/monitoring/k8s-infra/deploy-k8s-infra-monitoring.sh
Executable file
@@ -0,0 +1,347 @@
|
||||
#!/bin/bash
|
||||
|
||||
# ============================================================================
|
||||
# Kubernetes Infrastructure Monitoring Deployment Script
|
||||
# ============================================================================
|
||||
# Deploys kube-state-metrics and node-exporter for Kubernetes infrastructure
|
||||
# monitoring in SigNoz
|
||||
# ============================================================================
|
||||
|
||||
set -e
|
||||
|
||||
# Color codes for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
NAMESPACE="bakery-ia"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Function to display help
|
||||
show_help() {
|
||||
echo "Usage: $0 [OPTIONS] [COMMAND]"
|
||||
echo ""
|
||||
echo "Deploy Kubernetes infrastructure monitoring components"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " install Install kube-state-metrics and node-exporter (default)"
|
||||
echo " upgrade Upgrade existing deployments"
|
||||
echo " uninstall Remove all infrastructure monitoring components"
|
||||
echo " status Show deployment status"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -h, --help Show this help message"
|
||||
echo " -d, --dry-run Show what would be done without executing"
|
||||
echo " -n, --namespace NS Specify namespace (default: bakery-ia)"
|
||||
echo " --microk8s Use microk8s helm3 command (for MicroK8s clusters)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 install # Install on standard k8s"
|
||||
echo " $0 --microk8s install # Install on MicroK8s"
|
||||
echo " $0 --microk8s upgrade # Upgrade on MicroK8s"
|
||||
echo " $0 --microk8s uninstall # Remove from MicroK8s"
|
||||
echo " $0 status # Check deployment status"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
DRY_RUN=false
|
||||
USE_MICROK8S=false
|
||||
COMMAND="install"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
-d|--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
-n|--namespace)
|
||||
NAMESPACE="$2"
|
||||
shift 2
|
||||
;;
|
||||
--microk8s)
|
||||
USE_MICROK8S=true
|
||||
shift
|
||||
;;
|
||||
install|upgrade|uninstall|status)
|
||||
COMMAND="$1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Unknown argument: $1${NC}"
|
||||
show_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Set helm and kubectl commands based on environment
|
||||
if [[ "$USE_MICROK8S" == true ]]; then
|
||||
HELM_CMD="microk8s helm3"
|
||||
KUBECTL_CMD="microk8s kubectl"
|
||||
else
|
||||
HELM_CMD="helm"
|
||||
KUBECTL_CMD="kubectl"
|
||||
fi
|
||||
|
||||
# Function to check prerequisites
|
||||
check_prerequisites() {
|
||||
echo -e "${BLUE}Checking prerequisites...${NC}"
|
||||
|
||||
# Check helm
|
||||
if [[ "$USE_MICROK8S" == true ]]; then
|
||||
# Test if microk8s helm3 command works directly
|
||||
if ! microk8s helm3 version &> /dev/null; then
|
||||
echo -e "${RED}Error: MicroK8s helm3 addon is not working.${NC}"
|
||||
echo "Enable it with: microk8s enable helm3"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${GREEN}MicroK8s helm3 is available.${NC}"
|
||||
else
|
||||
if ! command -v helm &> /dev/null; then
|
||||
echo -e "${RED}Error: Helm is not installed.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check kubectl connectivity
|
||||
if ! $KUBECTL_CMD cluster-info &> /dev/null; then
|
||||
echo -e "${RED}Error: Cannot connect to Kubernetes cluster.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Prerequisites check passed.${NC}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to setup Helm repository
|
||||
setup_helm_repo() {
|
||||
echo -e "${BLUE}Setting up Prometheus Community Helm repository...${NC}"
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " (dry-run) Would add prometheus-community Helm repository"
|
||||
return
|
||||
fi
|
||||
|
||||
if $HELM_CMD repo list 2>/dev/null | grep -q "prometheus-community"; then
|
||||
echo -e "${BLUE}Repository already added, updating...${NC}"
|
||||
$HELM_CMD repo update prometheus-community
|
||||
else
|
||||
$HELM_CMD repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
$HELM_CMD repo update
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Helm repository ready.${NC}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to ensure namespace exists
|
||||
ensure_namespace() {
|
||||
echo -e "${BLUE}Ensuring namespace $NAMESPACE exists...${NC}"
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " (dry-run) Would create namespace if needed"
|
||||
return
|
||||
fi
|
||||
|
||||
if ! $KUBECTL_CMD get namespace "$NAMESPACE" &> /dev/null; then
|
||||
$KUBECTL_CMD create namespace "$NAMESPACE"
|
||||
echo -e "${GREEN}Namespace $NAMESPACE created.${NC}"
|
||||
else
|
||||
echo -e "${BLUE}Namespace $NAMESPACE already exists.${NC}"
|
||||
fi
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to install kube-state-metrics
|
||||
install_kube_state_metrics() {
|
||||
echo -e "${BLUE}Installing kube-state-metrics...${NC}"
|
||||
|
||||
local values_file="$SCRIPT_DIR/kube-state-metrics-values.yaml"
|
||||
|
||||
if [[ ! -f "$values_file" ]]; then
|
||||
echo -e "${RED}Error: Values file not found: $values_file${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " (dry-run) Would install kube-state-metrics"
|
||||
echo " Command: $HELM_CMD upgrade --install kube-state-metrics prometheus-community/kube-state-metrics -n $NAMESPACE -f $values_file"
|
||||
return
|
||||
fi
|
||||
|
||||
$HELM_CMD upgrade --install kube-state-metrics \
|
||||
prometheus-community/kube-state-metrics \
|
||||
-n "$NAMESPACE" \
|
||||
-f "$values_file" \
|
||||
--wait \
|
||||
--timeout 5m
|
||||
|
||||
echo -e "${GREEN}kube-state-metrics installed successfully.${NC}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to install node-exporter
|
||||
install_node_exporter() {
|
||||
echo -e "${BLUE}Installing node-exporter...${NC}"
|
||||
|
||||
local values_file="$SCRIPT_DIR/node-exporter-values.yaml"
|
||||
|
||||
if [[ ! -f "$values_file" ]]; then
|
||||
echo -e "${RED}Error: Values file not found: $values_file${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " (dry-run) Would install node-exporter"
|
||||
echo " Command: $HELM_CMD upgrade --install node-exporter prometheus-community/prometheus-node-exporter -n $NAMESPACE -f $values_file"
|
||||
return
|
||||
fi
|
||||
|
||||
$HELM_CMD upgrade --install node-exporter \
|
||||
prometheus-community/prometheus-node-exporter \
|
||||
-n "$NAMESPACE" \
|
||||
-f "$values_file" \
|
||||
--wait \
|
||||
--timeout 5m
|
||||
|
||||
echo -e "${GREEN}node-exporter installed successfully.${NC}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to uninstall components
|
||||
uninstall_components() {
|
||||
echo -e "${BLUE}Uninstalling Kubernetes infrastructure monitoring components...${NC}"
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " (dry-run) Would uninstall kube-state-metrics and node-exporter"
|
||||
return
|
||||
fi
|
||||
|
||||
# Uninstall kube-state-metrics
|
||||
if $HELM_CMD list -n "$NAMESPACE" | grep -q "kube-state-metrics"; then
|
||||
echo -e "${BLUE}Removing kube-state-metrics...${NC}"
|
||||
$HELM_CMD uninstall kube-state-metrics -n "$NAMESPACE" --wait
|
||||
echo -e "${GREEN}kube-state-metrics removed.${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}kube-state-metrics not found.${NC}"
|
||||
fi
|
||||
|
||||
# Uninstall node-exporter
|
||||
if $HELM_CMD list -n "$NAMESPACE" | grep -q "node-exporter"; then
|
||||
echo -e "${BLUE}Removing node-exporter...${NC}"
|
||||
$HELM_CMD uninstall node-exporter -n "$NAMESPACE" --wait
|
||||
echo -e "${GREEN}node-exporter removed.${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}node-exporter not found.${NC}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to show deployment status
|
||||
show_status() {
|
||||
echo -e "${BLUE}=== Kubernetes Infrastructure Monitoring Status ===${NC}"
|
||||
echo ""
|
||||
|
||||
echo -e "${BLUE}Helm Releases:${NC}"
|
||||
$HELM_CMD list -n "$NAMESPACE" | grep -E "(kube-state-metrics|node-exporter)" || echo " No releases found"
|
||||
echo ""
|
||||
|
||||
echo -e "${BLUE}Pods:${NC}"
|
||||
$KUBECTL_CMD get pods -n "$NAMESPACE" -l 'app.kubernetes.io/name in (kube-state-metrics, prometheus-node-exporter)' 2>/dev/null || echo " No pods found"
|
||||
echo ""
|
||||
|
||||
echo -e "${BLUE}Services:${NC}"
|
||||
$KUBECTL_CMD get svc -n "$NAMESPACE" | grep -E "(kube-state-metrics|node-exporter)" || echo " No services found"
|
||||
echo ""
|
||||
|
||||
echo -e "${BLUE}Endpoints (for SigNoz scraping):${NC}"
|
||||
echo " kube-state-metrics: kube-state-metrics.$NAMESPACE.svc.cluster.local:8080"
|
||||
echo " node-exporter: node-exporter-prometheus-node-exporter.$NAMESPACE.svc.cluster.local:9100"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to show post-install instructions
|
||||
show_post_install_instructions() {
|
||||
echo -e "${BLUE}=== Post-Installation Instructions ===${NC}"
|
||||
echo ""
|
||||
echo "To enable SigNoz to scrape these metrics, update your SigNoz OTel Collector config."
|
||||
echo ""
|
||||
echo "Add the following to your signoz-values-prod.yaml under otelCollector.config:"
|
||||
echo ""
|
||||
cat << 'EOF'
|
||||
otelCollector:
|
||||
config:
|
||||
receivers:
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: 'kube-state-metrics'
|
||||
static_configs:
|
||||
- targets: ['kube-state-metrics.bakery-ia.svc.cluster.local:8080']
|
||||
scrape_interval: 30s
|
||||
- job_name: 'node-exporter'
|
||||
static_configs:
|
||||
- targets: ['node-exporter-prometheus-node-exporter.bakery-ia.svc.cluster.local:9100']
|
||||
scrape_interval: 30s
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp, prometheus]
|
||||
EOF
|
||||
echo ""
|
||||
echo "Then upgrade SigNoz:"
|
||||
if [[ "$USE_MICROK8S" == true ]]; then
|
||||
echo " microk8s helm3 upgrade signoz signoz/signoz -n $NAMESPACE -f infrastructure/monitoring/signoz/signoz-values-prod.yaml"
|
||||
else
|
||||
echo " helm upgrade signoz signoz/signoz -n $NAMESPACE -f infrastructure/monitoring/signoz/signoz-values-prod.yaml"
|
||||
fi
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
echo -e "${BLUE}"
|
||||
echo "=========================================="
|
||||
echo "Kubernetes Infrastructure Monitoring"
|
||||
echo "=========================================="
|
||||
echo -e "${NC}"
|
||||
|
||||
check_prerequisites
|
||||
|
||||
case $COMMAND in
|
||||
install)
|
||||
setup_helm_repo
|
||||
ensure_namespace
|
||||
install_kube_state_metrics
|
||||
install_node_exporter
|
||||
show_status
|
||||
show_post_install_instructions
|
||||
echo -e "${GREEN}Installation completed successfully!${NC}"
|
||||
;;
|
||||
upgrade)
|
||||
setup_helm_repo
|
||||
install_kube_state_metrics
|
||||
install_node_exporter
|
||||
show_status
|
||||
echo -e "${GREEN}Upgrade completed successfully!${NC}"
|
||||
;;
|
||||
uninstall)
|
||||
uninstall_components
|
||||
echo -e "${GREEN}Uninstallation completed.${NC}"
|
||||
;;
|
||||
status)
|
||||
show_status
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main
|
||||
@@ -0,0 +1,109 @@
|
||||
# Kube-State-Metrics Helm Values for Bakery IA
|
||||
# Chart: prometheus-community/kube-state-metrics
|
||||
# Documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics
|
||||
#
|
||||
# Install Command:
|
||||
# helm install kube-state-metrics prometheus-community/kube-state-metrics \
|
||||
# -n bakery-ia -f kube-state-metrics-values.yaml
|
||||
|
||||
# Image configuration
|
||||
image:
|
||||
registry: registry.k8s.io
|
||||
repository: kube-state-metrics/kube-state-metrics
|
||||
tag: "" # Uses chart default (latest stable)
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# Replicas - single instance is sufficient for most clusters
|
||||
replicas: 1
|
||||
|
||||
# Resource limits optimized for MicroK8s VPS
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 32Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
# Service configuration
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8080
|
||||
annotations: {}
|
||||
|
||||
# Prometheus scrape annotations
|
||||
prometheusScrape: true
|
||||
|
||||
# Which Kubernetes resources to collect metrics for
|
||||
# Full list available, but we focus on most useful ones
|
||||
collectors:
|
||||
- certificatesigningrequests
|
||||
- configmaps
|
||||
- cronjobs
|
||||
- daemonsets
|
||||
- deployments
|
||||
- endpoints
|
||||
- horizontalpodautoscalers
|
||||
- ingresses
|
||||
- jobs
|
||||
- leases
|
||||
- limitranges
|
||||
- namespaces
|
||||
- networkpolicies
|
||||
- nodes
|
||||
- persistentvolumeclaims
|
||||
- persistentvolumes
|
||||
- poddisruptionbudgets
|
||||
- pods
|
||||
- replicasets
|
||||
- replicationcontrollers
|
||||
- resourcequotas
|
||||
- secrets
|
||||
- services
|
||||
- statefulsets
|
||||
- storageclasses
|
||||
|
||||
# Namespace to watch (empty = all namespaces)
|
||||
namespaces: ""
|
||||
|
||||
# Node selector for scheduling
|
||||
nodeSelector: {}
|
||||
|
||||
# Tolerations
|
||||
tolerations: []
|
||||
|
||||
# Affinity rules
|
||||
affinity: {}
|
||||
|
||||
# Pod security context
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
fsGroup: 65534
|
||||
|
||||
# Container security context
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
# Self-monitoring metrics
|
||||
selfMonitor:
|
||||
enabled: true
|
||||
|
||||
# Kubernetes API access
|
||||
kubeconfig:
|
||||
enabled: false
|
||||
|
||||
# RBAC configuration
|
||||
rbac:
|
||||
create: true
|
||||
useClusterRole: true
|
||||
|
||||
# Service account
|
||||
serviceAccount:
|
||||
create: true
|
||||
name: ""
|
||||
annotations: {}
|
||||
@@ -0,0 +1,97 @@
|
||||
# Prometheus Node Exporter Helm Values for Bakery IA
|
||||
# Chart: prometheus-community/prometheus-node-exporter
|
||||
# Documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter
|
||||
#
|
||||
# Install Command:
|
||||
# helm install node-exporter prometheus-community/prometheus-node-exporter \
|
||||
# -n bakery-ia -f node-exporter-values.yaml
|
||||
|
||||
# Image configuration
|
||||
image:
|
||||
registry: quay.io
|
||||
repository: prometheus/node-exporter
|
||||
tag: "" # Uses chart default (latest stable)
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# Resource limits optimized for MicroK8s VPS
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 32Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 64Mi
|
||||
|
||||
# Service configuration
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 9100
|
||||
targetPort: 9100
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
|
||||
# DaemonSet update strategy
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
|
||||
# Host network - required for accurate network metrics
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
hostRootFsMount:
|
||||
enabled: true
|
||||
mountPropagation: HostToContainer
|
||||
|
||||
# Node selector
|
||||
nodeSelector: {}
|
||||
|
||||
# Tolerations - allow scheduling on all nodes including control plane
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
|
||||
# Affinity rules
|
||||
affinity: {}
|
||||
|
||||
# Pod security context
|
||||
podSecurityContext:
|
||||
fsGroup: 65534
|
||||
runAsGroup: 65534
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
# Container security context
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
# RBAC configuration
|
||||
rbac:
|
||||
create: true
|
||||
pspEnabled: false
|
||||
|
||||
# Service account
|
||||
serviceAccount:
|
||||
create: true
|
||||
name: ""
|
||||
annotations: {}
|
||||
|
||||
# Prometheus scrape annotations
|
||||
prometheus:
|
||||
monitor:
|
||||
enabled: false # We use SigNoz OTel collector scraping instead
|
||||
|
||||
# Extra arguments for node-exporter
|
||||
extraArgs:
|
||||
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
|
||||
- --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
|
||||
|
||||
# Collectors to enable (default set)
|
||||
# Disable collectors that are not useful or cause issues
|
||||
extraHostVolumeMounts: []
|
||||
|
||||
# Sidecar containers
|
||||
sidecars: []
|
||||
|
||||
# Init containers
|
||||
initContainers: []
|
||||
53
infrastructure/monitoring/signoz/k8s-infra-values-dev.yaml
Normal file
53
infrastructure/monitoring/signoz/k8s-infra-values-dev.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
# SigNoz k8s-infra Helm Chart Values - Development Environment
|
||||
# Collects Kubernetes infrastructure metrics and sends to SigNoz
|
||||
#
|
||||
# Official Chart: https://github.com/SigNoz/charts/tree/main/charts/k8s-infra
|
||||
# Install Command: helm upgrade --install k8s-infra signoz/k8s-infra -n bakery-ia -f k8s-infra-values-dev.yaml
|
||||
|
||||
# ============================================================================
|
||||
# OTEL COLLECTOR ENDPOINT
|
||||
# ============================================================================
|
||||
otelCollectorEndpoint: "signoz-otel-collector.bakery-ia.svc.cluster.local:4317"
|
||||
otelInsecure: true
|
||||
clusterName: "bakery-ia-dev"
|
||||
|
||||
# ============================================================================
|
||||
# PRESETS - Minimal configuration for development
|
||||
# ============================================================================
|
||||
presets:
|
||||
hostMetrics:
|
||||
enabled: true
|
||||
collectionInterval: 60s # Less frequent in dev
|
||||
|
||||
kubeletMetrics:
|
||||
enabled: true
|
||||
collectionInterval: 60s
|
||||
|
||||
kubernetesAttributes:
|
||||
enabled: true
|
||||
|
||||
kubernetesEvents:
|
||||
enabled: false # Disabled in dev to reduce noise
|
||||
|
||||
logsCollection:
|
||||
enabled: false
|
||||
|
||||
# ============================================================================
|
||||
# OTEL AGENT - Minimal resources for dev
|
||||
# ============================================================================
|
||||
otelAgent:
|
||||
enabled: true
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
|
||||
otelDeployment:
|
||||
enabled: false
|
||||
|
||||
commonLabels:
|
||||
app.kubernetes.io/part-of: "signoz"
|
||||
environment: "development"
|
||||
76
infrastructure/monitoring/signoz/k8s-infra-values-prod.yaml
Normal file
76
infrastructure/monitoring/signoz/k8s-infra-values-prod.yaml
Normal file
@@ -0,0 +1,76 @@
|
||||
# SigNoz k8s-infra Helm Chart Values - Production Environment
|
||||
# Collects ALL Kubernetes infrastructure metrics and sends to SigNoz
|
||||
#
|
||||
# This chart REPLACES the need for:
|
||||
# - kube-state-metrics (delete after deploying this)
|
||||
# - node-exporter (delete after deploying this)
|
||||
#
|
||||
# Official Chart: https://github.com/SigNoz/charts/tree/main/charts/k8s-infra
|
||||
#
|
||||
# Install Command:
|
||||
# helm upgrade --install k8s-infra signoz/k8s-infra -n bakery-ia -f k8s-infra-values-prod.yaml
|
||||
#
|
||||
# After install, remove redundant exporters:
|
||||
# helm uninstall kube-state-metrics -n bakery-ia
|
||||
# helm uninstall node-exporter-prometheus-node-exporter -n bakery-ia
|
||||
# (or: helm uninstall prometheus -n bakery-ia if installed via prometheus stack)
|
||||
|
||||
# ============================================================================
|
||||
# CONNECTION TO SIGNOZ
|
||||
# ============================================================================
|
||||
otelCollectorEndpoint: "signoz-otel-collector.bakery-ia.svc.cluster.local:4317"
|
||||
otelInsecure: true
|
||||
clusterName: "bakery-ia-prod"
|
||||
|
||||
# ============================================================================
|
||||
# PRESETS - What metrics to collect
|
||||
# ============================================================================
|
||||
presets:
|
||||
# Host metrics: CPU, memory, disk, filesystem, network, load
|
||||
# Replaces node-exporter
|
||||
hostMetrics:
|
||||
enabled: true
|
||||
collectionInterval: 30s
|
||||
|
||||
# Kubelet metrics: Pod/container CPU, memory usage
|
||||
# Essential for seeing resource usage per pod in SigNoz
|
||||
kubeletMetrics:
|
||||
enabled: true
|
||||
collectionInterval: 30s
|
||||
|
||||
# Kubernetes cluster metrics: deployments, pods, nodes status
|
||||
# Replaces kube-state-metrics
|
||||
clusterMetrics:
|
||||
enabled: true
|
||||
collectionInterval: 30s
|
||||
|
||||
# Enriches all telemetry with k8s metadata (pod name, namespace, etc.)
|
||||
kubernetesAttributes:
|
||||
enabled: true
|
||||
|
||||
# Kubernetes events (pod scheduled, failed, etc.)
|
||||
kubernetesEvents:
|
||||
enabled: true
|
||||
|
||||
# Container logs - disabled (apps send logs via OTLP directly)
|
||||
logsCollection:
|
||||
enabled: false
|
||||
|
||||
# ============================================================================
|
||||
# OTEL AGENT (DaemonSet) - Runs on each node
|
||||
# ============================================================================
|
||||
otelAgent:
|
||||
enabled: true
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
|
||||
# ============================================================================
|
||||
# OTEL DEPLOYMENT - Disabled (using DaemonSet only)
|
||||
# ============================================================================
|
||||
otelDeployment:
|
||||
enabled: false
|
||||
@@ -1,6 +1,6 @@
|
||||
# SigNoz Helm Chart Values - Development Environment
|
||||
# Optimized for local development with minimal resource usage
|
||||
# DEPLOYED IN bakery-ia NAMESPACE - Ingress managed by bakery-ingress
|
||||
# DEPLOYED IN bakery-ia NAMESPACE - Ingress managed by SigNoz Helm chart
|
||||
#
|
||||
# Official Chart: https://github.com/SigNoz/charts
|
||||
# Install Command: helm install signoz signoz/signoz -n bakery-ia -f signoz-values-dev.yaml
|
||||
@@ -10,3 +10,60 @@ global:
|
||||
clusterName: "bakery-ia-dev"
|
||||
domain: "monitoring.bakery-ia.local"
|
||||
# Docker Hub credentials - applied to all sub-charts (including Zookeeper, ClickHouse, etc)
|
||||
|
||||
# Ingress configuration for SigNoz development
|
||||
frontend:
|
||||
ingress:
|
||||
enabled: true
|
||||
className: nginx
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false" # Disable for local development
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
|
||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
|
||||
hosts:
|
||||
- host: monitoring.bakery-ia.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls: [] # No TLS for local development
|
||||
|
||||
# Resource configuration for development
|
||||
# Minimal resources for local testing
|
||||
clickhouse:
|
||||
persistence:
|
||||
size: 5Gi
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
|
||||
otelCollector:
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "250m"
|
||||
|
||||
queryService:
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "250m"
|
||||
|
||||
alertmanager:
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
|
||||
@@ -1,12 +1,145 @@
|
||||
# SigNoz Helm Chart Values - Production Environment
|
||||
# High-availability configuration with resource optimization
|
||||
# DEPLOYED IN bakery-ia NAMESPACE - Ingress managed by bakery-ingress-prod
|
||||
# DEPLOYED IN bakery-ia NAMESPACE - Ingress managed by SigNoz Helm chart
|
||||
#
|
||||
# Official Chart: https://github.com/SigNoz/charts
|
||||
# Install Command: helm install signoz signoz/signoz -n bakery-ia -f signoz-values-prod.yaml
|
||||
# Install Command: helm upgrade --install signoz signoz/signoz -n bakery-ia -f signoz-values-prod.yaml
|
||||
#
|
||||
# IMPORTANT: This chart works together with k8s-infra chart for infrastructure monitoring
|
||||
# Deploy k8s-infra after this: helm upgrade --install k8s-infra signoz/k8s-infra -n bakery-ia -f k8s-infra-values-prod.yaml
|
||||
#
|
||||
# MEMORY OPTIMIZATION NOTES:
|
||||
# - ClickHouse memory increased to 8Gi to prevent OOM errors
|
||||
# - Retention reduced to 3 days for traces, 7 days for metrics/logs
|
||||
|
||||
global:
|
||||
storageClass: "microk8s-hostpath" # For MicroK8s, use "microk8s-hostpath" or custom storage class
|
||||
storageClass: "microk8s-hostpath"
|
||||
clusterName: "bakery-ia-prod"
|
||||
domain: "monitoring.bakewise.ai"
|
||||
# Docker Hub credentials - applied to all sub-charts (including Zookeeper, ClickHouse, etc)
|
||||
|
||||
# Ingress configuration for SigNoz Frontend
|
||||
signoz:
|
||||
ingress:
|
||||
enabled: true
|
||||
className: nginx
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
|
||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-production"
|
||||
nginx.ingress.kubernetes.io/limit-rps: "50"
|
||||
nginx.ingress.kubernetes.io/limit-connections: "25"
|
||||
hosts:
|
||||
- host: monitoring.bakewise.ai
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
port: 8080
|
||||
tls:
|
||||
- hosts:
|
||||
- monitoring.bakewise.ai
|
||||
secretName: bakery-ia-prod-tls-cert
|
||||
|
||||
# ============================================================================
|
||||
# CLICKHOUSE CONFIGURATION
|
||||
# Increased memory to 8Gi to prevent OOM errors (was 4Gi, causing code 241 errors)
|
||||
# ============================================================================
|
||||
clickhouse:
|
||||
persistence:
|
||||
size: 20Gi
|
||||
resources:
|
||||
requests:
|
||||
memory: "4Gi"
|
||||
cpu: "1000m"
|
||||
limits:
|
||||
memory: "8Gi"
|
||||
cpu: "2000m"
|
||||
|
||||
# Server-level settings only (NOT user-level settings like max_threads)
|
||||
# User-level settings must go in profiles section
|
||||
settings:
|
||||
# Max server memory usage: 80% of container limit (6.4GB of 8GB)
|
||||
max_server_memory_usage: "6400000000"
|
||||
# Mark cache size (256MB)
|
||||
mark_cache_size: "268435456"
|
||||
# Uncompressed cache (256MB)
|
||||
uncompressed_cache_size: "268435456"
|
||||
# Max concurrent queries
|
||||
max_concurrent_queries: "100"
|
||||
|
||||
# User-level settings go in profiles
|
||||
profiles:
|
||||
default:
|
||||
# Max memory per query: 2GB
|
||||
max_memory_usage: "2000000000"
|
||||
# Max threads per query
|
||||
max_threads: "4"
|
||||
# Background merges memory limit
|
||||
max_bytes_to_merge_at_max_space_in_pool: "1073741824"
|
||||
|
||||
coldStorage:
|
||||
enabled: false
|
||||
|
||||
# ============================================================================
|
||||
# DATA RETENTION CONFIGURATION
|
||||
# Reduced retention to minimize storage and memory pressure
|
||||
# ============================================================================
|
||||
queryService:
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
# Retention configuration via environment variables
|
||||
configVars:
|
||||
# Trace retention: 3 days (72 hours)
|
||||
SIGNOZ_TRACE_TTL_DURATION_HOURS: "72"
|
||||
# Logs retention: 7 days (168 hours)
|
||||
SIGNOZ_LOGS_TTL_DURATION_HOURS: "168"
|
||||
# Metrics retention: 7 days (168 hours)
|
||||
SIGNOZ_METRICS_TTL_DURATION_HOURS: "168"
|
||||
|
||||
# ============================================================================
|
||||
# OTEL COLLECTOR CONFIGURATION
|
||||
# This collector receives data from:
|
||||
# - Application services (traces, logs, metrics via OTLP)
|
||||
# - k8s-infra chart (infrastructure metrics)
|
||||
# ============================================================================
|
||||
otelCollector:
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
|
||||
# ============================================================================
|
||||
# ALERTMANAGER CONFIGURATION
|
||||
# ============================================================================
|
||||
alertmanager:
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
|
||||
# ============================================================================
|
||||
# ZOOKEEPER CONFIGURATION
|
||||
# ============================================================================
|
||||
zookeeper:
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
persistence:
|
||||
size: 5Gi
|
||||
|
||||
@@ -21,7 +21,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: gateway
|
||||
image: bakery/gateway:latest
|
||||
image: registry.bakewise.ai/bakery-admin/gateway:6c6a9fc58cb98ad24729af84d87f25c7b50874c9
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
|
||||
@@ -23,7 +23,7 @@ mailu-helm/
|
||||
The following critical configurations from the original Kustomize setup have been preserved:
|
||||
|
||||
- **Domain settings**: Domain and hostnames for both dev and prod
|
||||
- **External relay**: Mailgun SMTP relay configuration
|
||||
- **External relay**: MailerSend SMTP relay configuration
|
||||
- **Redis integration**: Connection to shared Redis cluster (database 15)
|
||||
- **Database settings**: PostgreSQL connection details
|
||||
- **Resource limits**: CPU and memory requests/limits matching original setup
|
||||
@@ -39,7 +39,7 @@ The following critical configurations from the original Kustomize setup have bee
|
||||
2. Kubernetes cluster with storage provisioner
|
||||
3. Ingress controller (NGINX) - already deployed in your cluster
|
||||
4. Cert-manager for TLS certificates (optional, depends on your ingress setup)
|
||||
5. External SMTP relay account (Mailgun)
|
||||
5. External SMTP relay account (MailerSend - https://mailersend.com)
|
||||
|
||||
### Deployment Commands
|
||||
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
# CoreDNS ConfigMap patch to forward external DNS queries to Unbound for DNSSEC validation
|
||||
# This is required for Mailu Admin which requires DNSSEC-validating DNS resolver
|
||||
#
|
||||
# Apply with: kubectl apply -f coredns-unbound-patch.yaml
|
||||
# Then restart CoreDNS: kubectl rollout restart deployment coredns -n kube-system
|
||||
#
|
||||
# Note: The Unbound service IP (10.104.127.213) may change when the cluster is recreated.
|
||||
# The setup script will automatically update this based on the actual Unbound service IP.
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
health {
|
||||
lameduck 5s
|
||||
}
|
||||
ready
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
}
|
||||
prometheus :9153
|
||||
forward . UNBOUND_SERVICE_IP {
|
||||
max_concurrent 1000
|
||||
}
|
||||
cache 30 {
|
||||
disable success cluster.local
|
||||
disable denial cluster.local
|
||||
}
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
# MailerSend SMTP Credentials Secret for Mailu
|
||||
#
|
||||
# This secret stores MailerSend credentials for outbound email relay.
|
||||
# Mailu uses MailerSend as an external SMTP relay to send all outbound emails.
|
||||
#
|
||||
# ============================================================================
|
||||
# HOW TO CONFIGURE:
|
||||
# ============================================================================
|
||||
#
|
||||
# 1. Go to https://accounts.mailersend.com/signup and create an account
|
||||
#
|
||||
# 2. Add and verify your domain:
|
||||
# - For dev: bakery-ia.dev
|
||||
# - For prod: bakewise.ai
|
||||
# - Go to Email -> Domains -> Add domain
|
||||
# - Follow the DNS verification steps (add TXT records)
|
||||
#
|
||||
# 3. Generate SMTP credentials:
|
||||
# - Go to Email -> Domains -> Click on your domain
|
||||
# - Go to SMTP section
|
||||
# - Click "Generate new user"
|
||||
# - Save the generated username and password
|
||||
#
|
||||
# 4. Note your SMTP credentials:
|
||||
# - SMTP hostname: smtp.mailersend.net
|
||||
# - Port: 587 (TLS/STARTTLS)
|
||||
# - Username: generated by MailerSend (e.g., MS_xxxxxx@trial-xxxxx.mlsender.net)
|
||||
# - Password: generated SMTP password
|
||||
#
|
||||
# 5. Replace the placeholder values below with your credentials
|
||||
#
|
||||
# 6. Apply this secret:
|
||||
# kubectl apply -f mailersend-credentials-secret.yaml -n bakery-ia
|
||||
#
|
||||
# ============================================================================
|
||||
# IMPORTANT NOTES:
|
||||
# ============================================================================
|
||||
#
|
||||
# - MailerSend requires TLS 1.2 or higher (supported by default)
|
||||
# - SMTP credentials are account-wide (work for any verified domain)
|
||||
# - Free tier: 3,000 emails/month (12,000 with verified domain)
|
||||
# - Rate limit: 120 requests/minute
|
||||
#
|
||||
# ============================================================================
|
||||
# CRITICAL: AFTER UPDATING THIS SECRET
|
||||
# ============================================================================
|
||||
#
|
||||
# Mailu's Postfix reads SASL credentials ONLY at pod startup. It does NOT
|
||||
# automatically reload when this secret changes. You MUST do one of:
|
||||
#
|
||||
# Option 1: Update the credentials-version annotation in values.yaml and run helm upgrade
|
||||
# - Edit prod/values.yaml: postfix.podAnnotations.credentials-version
|
||||
# - Set to current timestamp: date +%s
|
||||
# - Run: helm upgrade mailu mailu/mailu -f values.yaml -f prod/values.yaml -n bakery-ia
|
||||
#
|
||||
# Option 2: Manually restart Postfix pod
|
||||
# kubectl rollout restart deployment/mailu-postfix -n bakery-ia
|
||||
#
|
||||
# Option 3: Delete the Postfix pod (it will be recreated)
|
||||
# kubectl delete pod -l app.kubernetes.io/component=postfix -n bakery-ia
|
||||
#
|
||||
# ============================================================================
|
||||
# DNS RECORDS REQUIRED FOR MAILERSEND:
|
||||
# ============================================================================
|
||||
#
|
||||
# Add these DNS records to your domain (Cloudflare) for proper email delivery:
|
||||
#
|
||||
# 1. SPF Record (TXT):
|
||||
# Name: @
|
||||
# Value: v=spf1 include:mailersend.net ~all
|
||||
#
|
||||
# 2. DKIM Records (TXT):
|
||||
# MailerSend will provide DKIM keys after domain verification
|
||||
# Typically: mlsend._domainkey and mlsend2._domainkey
|
||||
# (check your MailerSend domain settings for exact values)
|
||||
#
|
||||
# 3. DMARC Record (TXT):
|
||||
# Name: _dmarc
|
||||
# Value: v=DMARC1; p=quarantine; rua=mailto:admin@bakewise.ai
|
||||
#
|
||||
# 4. MX Records (for receiving mail via Mailu):
|
||||
# Priority 10: mail.bakewise.ai
|
||||
#
|
||||
# 5. A Record:
|
||||
# Name: mail
|
||||
# Value: <your-server-public-IP>
|
||||
#
|
||||
# ============================================================================
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mailu-mailersend-credentials
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app: mailu
|
||||
component: external-relay
|
||||
annotations:
|
||||
description: "MailerSend SMTP credentials for Mailu external relay"
|
||||
type: Opaque
|
||||
stringData:
|
||||
# ============================================================================
|
||||
# REPLACE THESE VALUES WITH YOUR MAILERSEND CREDENTIALS
|
||||
# ============================================================================
|
||||
# Key names match Mailu Helm chart defaults (relay-username, relay-password)
|
||||
#
|
||||
relay-username: "MS_d34ZtW@bakewise.ai"
|
||||
relay-password: "mssp.Z6GRHQ8.zr6ke4nvq6egon12.IDyvEi7"
|
||||
@@ -1,94 +0,0 @@
|
||||
# Mailgun SMTP Credentials Secret for Mailu
|
||||
#
|
||||
# This secret stores Mailgun credentials for outbound email relay.
|
||||
# Mailu uses Mailgun as an external SMTP relay to send all outbound emails.
|
||||
#
|
||||
# ============================================================================
|
||||
# HOW TO CONFIGURE:
|
||||
# ============================================================================
|
||||
#
|
||||
# 1. Go to https://www.mailgun.com and create an account
|
||||
#
|
||||
# 2. Add and verify your domain:
|
||||
# - For dev: bakery-ia.dev
|
||||
# - For prod: bakewise.ai
|
||||
#
|
||||
# 3. Go to Domain Settings > SMTP credentials in Mailgun dashboard
|
||||
#
|
||||
# 4. Note your SMTP credentials:
|
||||
# - SMTP hostname: smtp.mailgun.org
|
||||
# - Port: 587 (TLS/STARTTLS)
|
||||
# - Username: typically postmaster@yourdomain.com
|
||||
# - Password: your Mailgun SMTP password (NOT the API key)
|
||||
#
|
||||
# 5. Base64 encode your credentials:
|
||||
# echo -n 'postmaster@bakewise.ai' | base64
|
||||
# echo -n 'your-mailgun-smtp-password' | base64
|
||||
#
|
||||
# 6. Replace the placeholder values below with your encoded credentials
|
||||
#
|
||||
# 7. Apply this secret:
|
||||
# kubectl apply -f mailgun-credentials-secret.yaml -n bakery-ia
|
||||
#
|
||||
# ============================================================================
|
||||
# IMPORTANT NOTES:
|
||||
# ============================================================================
|
||||
#
|
||||
# - Use the SMTP password from Mailgun, NOT the API key
|
||||
# - The username format is: postmaster@yourdomain.com
|
||||
# - For sandbox domains, Mailgun requires adding authorized recipients
|
||||
# - Production domains need DNS verification (SPF, DKIM records)
|
||||
#
|
||||
# ============================================================================
|
||||
# DNS RECORDS REQUIRED FOR MAILGUN:
|
||||
# ============================================================================
|
||||
#
|
||||
# Add these DNS records to your domain for proper email delivery:
|
||||
#
|
||||
# 1. SPF Record (TXT):
|
||||
# Name: @
|
||||
# Value: v=spf1 include:mailgun.org ~all
|
||||
#
|
||||
# 2. DKIM Records (TXT):
|
||||
# Mailgun will provide two DKIM keys to add as TXT records
|
||||
# (check your Mailgun domain settings for exact values)
|
||||
#
|
||||
# 3. MX Records (optional, only if receiving via Mailgun):
|
||||
# Priority 10: mxa.mailgun.org
|
||||
# Priority 10: mxb.mailgun.org
|
||||
#
|
||||
# ============================================================================
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mailu-mailgun-credentials
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app: mailu
|
||||
component: external-relay
|
||||
annotations:
|
||||
description: "Mailgun SMTP credentials for Mailu external relay"
|
||||
type: Opaque
|
||||
stringData:
|
||||
# ============================================================================
|
||||
# REPLACE THESE VALUES WITH YOUR MAILGUN CREDENTIALS
|
||||
# ============================================================================
|
||||
#
|
||||
# Option 1: Use stringData (plain text - Kubernetes will encode automatically)
|
||||
# This is easier for initial setup but shows credentials in the file
|
||||
#
|
||||
RELAY_USERNAME: "postmaster@sandboxc1bff891532b4f0c83056a68ae080b4c.mailgun.org"
|
||||
RELAY_PASSWORD: "2e47104abadad8eb820d00042ea6d5eb-77c6c375-89c7ea55"
|
||||
#
|
||||
# ============================================================================
|
||||
# ALTERNATIVE: Use pre-encoded values (more secure for version control)
|
||||
# ============================================================================
|
||||
# Comment out stringData above and uncomment data below:
|
||||
#
|
||||
# data:
|
||||
# # Base64 encoded values
|
||||
# # echo -n 'postmaster@bakewise.ai' | base64
|
||||
# RELAY_USERNAME: cG9zdG1hc3RlckBiYWtld2lzZS5haQ==
|
||||
# # echo -n 'your-password' | base64
|
||||
# RELAY_PASSWORD: WU9VUl9NQUlMR1VOX1NNVFBfUEFTU1dPUkQ=
|
||||
@@ -6,6 +6,7 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: mailu
|
||||
app.kubernetes.io/component: ingress
|
||||
environment: development
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
|
||||
@@ -26,6 +27,6 @@ spec:
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: mailu-front # Helm release name 'mailu' + component 'front'
|
||||
name: mailu-front
|
||||
port:
|
||||
number: 80
|
||||
number: 80
|
||||
@@ -1,31 +1,19 @@
|
||||
# Development-tuned Mailu configuration
|
||||
global:
|
||||
# Using Unbound DNS for DNSSEC validation (required by Mailu admin)
|
||||
# Unbound service is available at unbound-dns.bakery-ia.svc.cluster.local
|
||||
# Static ClusterIP configured in unbound-helm/values.yaml
|
||||
custom_dns_servers: "10.96.53.53" # Unbound DNS static ClusterIP
|
||||
# Using Kubernetes CoreDNS for DNS resolution
|
||||
# CoreDNS is configured with DNS-over-TLS (Cloudflare) for DNSSEC validation
|
||||
# Default to Kubernetes DNS IP (will be overridden dynamically if needed)
|
||||
custom_dns_servers: "10.96.0.10" # Kubernetes DNS IP
|
||||
|
||||
# Redis configuration - use built-in Mailu Redis (no authentication needed)
|
||||
externalRedis:
|
||||
enabled: false
|
||||
|
||||
# Component-specific DNS configuration
|
||||
# Admin requires DNSSEC validation - use Unbound DNS (forwards cluster.local to kube-dns)
|
||||
# DNS configuration - use Kubernetes DNS (ClusterFirst)
|
||||
# CoreDNS provides DNSSEC validation via DNS-over-TLS to Cloudflare
|
||||
admin:
|
||||
dnsPolicy: "None"
|
||||
dnsConfig:
|
||||
nameservers:
|
||||
- "10.96.53.53" # Unbound DNS static ClusterIP (forwards cluster.local to kube-dns)
|
||||
searches:
|
||||
- "bakery-ia.svc.cluster.local"
|
||||
- "svc.cluster.local"
|
||||
- "cluster.local"
|
||||
options:
|
||||
- name: ndots
|
||||
value: "5"
|
||||
dnsPolicy: "ClusterFirst"
|
||||
|
||||
# RSPAMD needs Unbound for DNSSEC validation (DKIM/SPF/DMARC checks)
|
||||
# Using ClusterFirst with search domains + Kubernetes DNS which can forward to Unbound
|
||||
rspamd:
|
||||
dnsPolicy: "ClusterFirst"
|
||||
|
||||
@@ -56,9 +44,18 @@ initialAccount:
|
||||
externalRelay:
|
||||
host: "[smtp.mailgun.org]:587"
|
||||
# Credentials loaded from Kubernetes secret
|
||||
secretName: "mailu-mailgun-credentials"
|
||||
usernameKey: "RELAY_USERNAME"
|
||||
passwordKey: "RELAY_PASSWORD"
|
||||
# Key names use Helm chart defaults: relay-username, relay-password
|
||||
existingSecret: "mailu-mailgun-credentials"
|
||||
|
||||
# Postfix configuration
|
||||
# CRITICAL: podAnnotations ensures Postfix restarts when credentials change
|
||||
# Without this, Mailu reads SASL credentials only at pod startup and won't pick up secret updates
|
||||
postfix:
|
||||
podAnnotations:
|
||||
# UPDATE THIS VALUE when changing mailu-mailgun-credentials secret
|
||||
# This triggers a rolling restart of Postfix to reload SASL credentials
|
||||
# Generate new value: date +%s or use the secret's resourceVersion
|
||||
credentials-version: "1706054400"
|
||||
|
||||
# Environment-specific configurations
|
||||
persistence:
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: mailu-ingress
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app.kubernetes.io/name: mailu
|
||||
app.kubernetes.io/component: ingress
|
||||
environment: production
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt-production
|
||||
# Proxy settings for large attachments and long connections
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
|
||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
|
||||
nginx.ingress.kubernetes.io/proxy-connect-timeout: "60"
|
||||
# SSL redirect
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
# CRITICAL: Disable proxy buffering for webmail streaming/long-polling
|
||||
# This prevents the "stuck loading" issue with Roundcube webmail
|
||||
# Reference: https://github.com/Mailu/Mailu/issues/2850
|
||||
nginx.ingress.kubernetes.io/proxy-buffering: "off"
|
||||
nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
|
||||
# WebSocket support for webmail real-time features
|
||||
nginx.ingress.kubernetes.io/proxy-http-version: "1.1"
|
||||
nginx.ingress.kubernetes.io/upstream-hash-by: "$remote_addr"
|
||||
# CRITICAL: Configuration snippet for Mailu header handling
|
||||
# Fixes redirect loops by ensuring Mailu sees the correct protocol
|
||||
# Reference: https://mailu.io/2.0/reverse.html
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
# CRITICAL: Force X-Forwarded-Proto to https to prevent redirect loops
|
||||
# Mailu's internal nginx checks this header to decide if redirect is needed
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header X-Forwarded-Port 443;
|
||||
proxy_set_header X-Forwarded-Ssl on;
|
||||
|
||||
# Real IP headers for Mailu logging and rate limiting
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# Fix redirects from Mailu internal services (admin, webmail)
|
||||
# Don't add trailing slash to prevent 404 on redirects
|
||||
proxy_redirect http://localhost https://$host;
|
||||
proxy_redirect https://localhost https://$host;
|
||||
proxy_redirect http://$host https://$host;
|
||||
|
||||
# Disable buffering for streaming responses (webmail)
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- mail.bakewise.ai
|
||||
secretName: bakery-ia-prod-tls-cert
|
||||
rules:
|
||||
- host: mail.bakewise.ai
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: mailu-front
|
||||
port:
|
||||
number: 80
|
||||
@@ -1,15 +1,15 @@
|
||||
# Production-tuned Mailu configuration
|
||||
global:
|
||||
# Using Kubernetes cluster DNS for name resolution
|
||||
custom_dns_servers: "10.96.0.10" # Kubernetes cluster DNS IP
|
||||
# Using Kubernetes CoreDNS for DNS resolution
|
||||
# CoreDNS is configured with DNS-over-TLS (Cloudflare) for DNSSEC validation
|
||||
custom_dns_servers: "10.152.183.10" # MicroK8s CoreDNS IP
|
||||
|
||||
# Redis configuration - use built-in Mailu Redis (no authentication needed for internal)
|
||||
externalRedis:
|
||||
enabled: false
|
||||
|
||||
# DNS configuration for production
|
||||
# Use Kubernetes DNS (ClusterFirst) which forwards to Unbound via CoreDNS
|
||||
# This is configured automatically by the mailu-helm Tilt resource
|
||||
# Use Kubernetes DNS (ClusterFirst) - CoreDNS provides DNSSEC via DNS-over-TLS
|
||||
admin:
|
||||
dnsPolicy: "ClusterFirst"
|
||||
|
||||
@@ -21,6 +21,11 @@ domain: "bakewise.ai"
|
||||
hostnames:
|
||||
- "mail.bakewise.ai"
|
||||
|
||||
# Network configuration for MicroK8s
|
||||
# This must match your cluster's pod CIDR
|
||||
# MicroK8s default is 10.1.0.0/16, but check with: kubectl cluster-info dump | grep -m 1 cluster-cidr
|
||||
subnet: "10.1.0.0/16"
|
||||
|
||||
# Initial admin account for production environment
|
||||
# Password is stored in mailu-admin-credentials secret
|
||||
initialAccount:
|
||||
@@ -31,25 +36,30 @@ initialAccount:
|
||||
existingSecretPasswordKey: "password"
|
||||
mode: "ifmissing"
|
||||
|
||||
# External relay configuration for production (Mailgun)
|
||||
# All outbound emails will be relayed through Mailgun SMTP
|
||||
# To configure:
|
||||
# 1. Register at mailgun.com and verify your domain (bakewise.ai)
|
||||
# 2. Get your SMTP credentials from Mailgun dashboard
|
||||
# 3. Update the secret in configs/mailgun-credentials-secret.yaml
|
||||
# 4. Apply the secret: kubectl apply -f configs/mailgun-credentials-secret.yaml -n bakery-ia
|
||||
# External relay configuration for production (MailerSend)
|
||||
# All outbound emails will be relayed through MailerSend SMTP
|
||||
# Secret already exists: mailu-mailersend-credentials
|
||||
externalRelay:
|
||||
host: "[smtp.mailgun.org]:587"
|
||||
# Credentials loaded from Kubernetes secret
|
||||
secretName: "mailu-mailgun-credentials"
|
||||
usernameKey: "RELAY_USERNAME"
|
||||
passwordKey: "RELAY_PASSWORD"
|
||||
host: "[smtp.mailersend.net]:2525"
|
||||
# Credentials loaded from existing Kubernetes secret
|
||||
# Key names use Helm chart defaults (relay-username, relay-password)
|
||||
existingSecret: "mailu-mailersend-credentials"
|
||||
|
||||
# Postfix configuration
|
||||
# CRITICAL: podAnnotations ensures Postfix restarts when credentials change
|
||||
# Without this, Mailu reads SASL credentials only at pod startup and won't pick up secret updates
|
||||
postfix:
|
||||
podAnnotations:
|
||||
# UPDATE THIS VALUE when changing mailu-mailersend-credentials secret
|
||||
# This triggers a rolling restart of Postfix to reload SASL credentials
|
||||
# Generate new value: date +%s or use the secret's resourceVersion
|
||||
credentials-version: "1706054400"
|
||||
|
||||
# Environment-specific configurations
|
||||
persistence:
|
||||
enabled: true
|
||||
# Production: use microk8s-hostpath or longhorn
|
||||
storageClass: "longhorn" # Assuming Longhorn is available in production
|
||||
# Production: use microk8s-hostpath (default storage class)
|
||||
storageClass: "" # Use cluster default storage class
|
||||
size: "20Gi" # Larger storage for production email volume
|
||||
|
||||
# Resource allocations for production
|
||||
@@ -103,18 +113,24 @@ replicaCount: 1 # Can be increased in production as needed
|
||||
secretKey: "generate-strong-key-here-for-production"
|
||||
|
||||
# Ingress configuration for production - disabled to use with existing ingress
|
||||
# External nginx-ingress handles TLS termination and proxies to Mailu front
|
||||
ingress:
|
||||
enabled: false # Disable chart's Ingress; use existing one
|
||||
enabled: false # Disable chart's Ingress; use existing mailu-ingress.yaml
|
||||
tls: false # Disable TLS in chart since ingress handles it
|
||||
tlsFlavorOverride: notls # No TLS on internal NGINX; expect external proxy to handle TLS
|
||||
realIpHeader: X-Forwarded-For # Header for client IP from your Ingress
|
||||
realIpFrom: 0.0.0.0/0 # Trust all proxies (restrict to your Ingress pod CIDR for security)
|
||||
tlsFlavorOverride: notls # No TLS on internal NGINX; external ingress handles TLS
|
||||
# CRITICAL: Real IP header configuration for proper client IP detection
|
||||
# This must match the header set by nginx-ingress (X-Real-IP)
|
||||
# Reference: https://mailu.io/2.0/reverse.html
|
||||
realIpHeader: X-Real-IP
|
||||
realIpFrom: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" # Trust cluster pod CIDRs
|
||||
path: /
|
||||
pathType: ImplementationSpecific
|
||||
|
||||
# TLS flavor for production (uses Let's Encrypt)
|
||||
# TLS flavor for production
|
||||
# "notls" because external ingress handles TLS termination
|
||||
# The ingress sends X-Forwarded-Proto: https to tell Mailu the original protocol
|
||||
tls:
|
||||
flavor: "cert"
|
||||
flavor: "notls"
|
||||
|
||||
# Welcome message (enabled in production)
|
||||
welcomeMessage:
|
||||
@@ -130,12 +146,31 @@ antivirus:
|
||||
enabled: true
|
||||
flavor: "clamav"
|
||||
|
||||
# Production-specific settings
|
||||
# Production-specific environment settings
|
||||
# CRITICAL: These must be consistent with the ingress/proxy setup
|
||||
env:
|
||||
DEBUG: "false"
|
||||
LOG_LEVEL: "WARNING"
|
||||
TLS_FLAVOR: "cert"
|
||||
REDIS_PASSWORD: "secure-redis-password"
|
||||
LOG_LEVEL: "INFO" # Temporarily set to INFO for debugging
|
||||
# TLS_FLAVOR must be "notls" when using external reverse proxy for TLS termination
|
||||
# The ingress handles TLS and sends X-Forwarded-Proto: https
|
||||
TLS_FLAVOR: "notls"
|
||||
# Session cookie settings for reverse proxy setup
|
||||
# SESSION_COOKIE_SECURE must be True since we're serving over HTTPS (via ingress)
|
||||
SESSION_COOKIE_SECURE: "true"
|
||||
# Increase session timeout to prevent premature logouts
|
||||
SESSION_TIMEOUT: "3600"
|
||||
PERMANENT_SESSION_LIFETIME: "108000"
|
||||
# CRITICAL: Tell Mailu it's behind a reverse proxy
|
||||
# This ensures proper URL generation for redirects
|
||||
PROXY_PROTOCOL: "false"
|
||||
# Trust the ingress controller's IP for real IP headers
|
||||
REAL_IP_HEADER: "X-Real-IP"
|
||||
REAL_IP_FROM: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
|
||||
# CRITICAL: Disable rate limiting temporarily to debug the sso.php redirect loop
|
||||
# Reference: https://github.com/Mailu/Mailu/issues/3094
|
||||
# The webmail can get rate-limited causing infinite redirect loops
|
||||
AUTH_RATELIMIT_IP: "10000/minute"
|
||||
AUTH_RATELIMIT_USER: "10000/day"
|
||||
|
||||
# Enable monitoring in production
|
||||
monitoring:
|
||||
@@ -148,15 +183,14 @@ securityContext:
|
||||
fsGroup: 1000
|
||||
|
||||
# Network policies for production
|
||||
# Note: MicroK8s uses 'ingress' namespace, not 'ingress-nginx'
|
||||
networkPolicy:
|
||||
enabled: true
|
||||
ingressController:
|
||||
namespace: ingress-nginx
|
||||
namespace: ingress
|
||||
podSelector: |
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: nginx-ingress-microk8s
|
||||
monitoring:
|
||||
namespace: monitoring
|
||||
podSelector: |
|
||||
|
||||
@@ -4,11 +4,10 @@
|
||||
# =============================================================================
|
||||
# This script automates the deployment of Mailu mail server for production.
|
||||
# It handles:
|
||||
# 1. Unbound DNS deployment (for DNSSEC validation)
|
||||
# 2. CoreDNS configuration (forward to Unbound)
|
||||
# 3. TLS certificate secret creation
|
||||
# 4. Admin credentials secret creation
|
||||
# 5. Mailu Helm deployment (admin user created automatically via initialAccount)
|
||||
# 1. CoreDNS configuration with DNS-over-TLS for DNSSEC validation
|
||||
# 2. TLS certificate secret creation
|
||||
# 3. Admin credentials secret creation
|
||||
# 4. Mailu Helm deployment (admin user created automatically via initialAccount)
|
||||
#
|
||||
# Usage:
|
||||
# ./deploy-mailu-prod.sh [--domain DOMAIN] [--admin-password PASSWORD]
|
||||
@@ -99,52 +98,15 @@ fi
|
||||
print_success "Prerequisites check passed"
|
||||
|
||||
# =============================================================================
|
||||
# Step 1: Deploy Unbound DNS Resolver
|
||||
# Step 1: Configure CoreDNS with DNS-over-TLS for DNSSEC
|
||||
# =============================================================================
|
||||
print_step "Step 1: Deploying Unbound DNS resolver..."
|
||||
print_step "Step 1: Configuring CoreDNS with DNS-over-TLS for DNSSEC validation..."
|
||||
|
||||
if kubectl get deployment unbound -n "$NAMESPACE" &>/dev/null; then
|
||||
print_success "Unbound already deployed"
|
||||
else
|
||||
helm upgrade --install unbound "$MAILU_HELM_DIR/../../networking/dns/unbound-helm" \
|
||||
-n "$NAMESPACE" \
|
||||
-f "$MAILU_HELM_DIR/../../networking/dns/unbound-helm/values.yaml" \
|
||||
-f "$MAILU_HELM_DIR/../../networking/dns/unbound-helm/prod/values.yaml" \
|
||||
--timeout 5m \
|
||||
--wait
|
||||
# Check if CoreDNS is already configured with DNS-over-TLS
|
||||
CURRENT_FORWARD=$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' 2>/dev/null | grep -o 'tls://1.1.1.1' || echo "")
|
||||
|
||||
print_success "Unbound deployed"
|
||||
fi
|
||||
|
||||
# Wait for Unbound to be ready
|
||||
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=unbound -n "$NAMESPACE" --timeout=120s
|
||||
|
||||
# Get Unbound service IP (dynamic resolution)
|
||||
echo "Waiting for Unbound service to get assigned IP..."
|
||||
for i in {1..30}; do
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n "$NAMESPACE" -o jsonpath='{.spec.clusterIP}' 2>/dev/null || echo "")
|
||||
if [ -n "$UNBOUND_IP" ] && [ "$UNBOUND_IP" != "<none>" ]; then
|
||||
echo "Unbound DNS service IP: $UNBOUND_IP"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
print_error "Failed to get Unbound service IP"
|
||||
exit 1
|
||||
fi
|
||||
sleep 2
|
||||
echo "Waiting for Unbound service IP... (attempt $i/30)"
|
||||
done
|
||||
|
||||
# =============================================================================
|
||||
# Step 2: Configure CoreDNS to Forward to Unbound
|
||||
# =============================================================================
|
||||
print_step "Step 2: Configuring CoreDNS for DNSSEC validation..."
|
||||
|
||||
# Check current CoreDNS forward configuration
|
||||
CURRENT_FORWARD=$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' | grep -o 'forward \. [0-9.]*' | awk '{print $3}' || echo "")
|
||||
|
||||
if [ "$CURRENT_FORWARD" != "$UNBOUND_IP" ]; then
|
||||
echo "Updating CoreDNS to forward to Unbound ($UNBOUND_IP)..."
|
||||
if [ -z "$CURRENT_FORWARD" ]; then
|
||||
echo "Updating CoreDNS to use DNS-over-TLS with Cloudflare for DNSSEC validation..."
|
||||
|
||||
# Create a temporary file with the CoreDNS configuration
|
||||
TEMP_COREFILE=$(mktemp)
|
||||
@@ -161,8 +123,9 @@ if [ "$CURRENT_FORWARD" != "$UNBOUND_IP" ]; then
|
||||
ttl 30
|
||||
}
|
||||
prometheus :9153
|
||||
forward . $UNBOUND_IP {
|
||||
max_concurrent 1000
|
||||
forward . tls://1.1.1.1 tls://1.0.0.1 {
|
||||
tls_servername cloudflare-dns.com
|
||||
health_check 5s
|
||||
}
|
||||
cache 30 {
|
||||
disable success cluster.local
|
||||
@@ -187,15 +150,19 @@ EOF
|
||||
kubectl rollout restart deployment coredns -n kube-system
|
||||
kubectl rollout status deployment coredns -n kube-system --timeout=60s
|
||||
|
||||
print_success "CoreDNS configured to forward to Unbound"
|
||||
print_success "CoreDNS configured with DNS-over-TLS for DNSSEC validation"
|
||||
else
|
||||
print_success "CoreDNS already configured for Unbound"
|
||||
print_success "CoreDNS already configured with DNS-over-TLS"
|
||||
fi
|
||||
|
||||
# Get CoreDNS service IP for Mailu configuration
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
echo "CoreDNS service IP: $COREDNS_IP"
|
||||
|
||||
# =============================================================================
|
||||
# Step 3: Create TLS Certificate Secret
|
||||
# Step 2: Create TLS Certificate Secret
|
||||
# =============================================================================
|
||||
print_step "Step 3: Creating TLS certificate secret..."
|
||||
print_step "Step 2: Creating TLS certificate secret..."
|
||||
|
||||
if kubectl get secret mailu-certificates -n "$NAMESPACE" &>/dev/null; then
|
||||
print_success "TLS certificate secret already exists"
|
||||
@@ -217,9 +184,9 @@ else
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# Step 4: Create Admin Credentials Secret
|
||||
# Step 3: Create Admin Credentials Secret
|
||||
# =============================================================================
|
||||
print_step "Step 4: Creating admin credentials secret..."
|
||||
print_step "Step 3: Creating admin credentials secret..."
|
||||
|
||||
if kubectl get secret mailu-admin-credentials -n "$NAMESPACE" &>/dev/null; then
|
||||
print_success "Admin credentials secret already exists"
|
||||
@@ -243,33 +210,28 @@ else
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# Step 5: Deploy Mailu via Helm
|
||||
# Step 4: Deploy Mailu via Helm
|
||||
# =============================================================================
|
||||
print_step "Step 5: Deploying Mailu via Helm..."
|
||||
print_step "Step 4: Deploying Mailu via Helm..."
|
||||
|
||||
# Add Mailu Helm repository
|
||||
helm repo add mailu https://mailu.github.io/helm-charts 2>/dev/null || true
|
||||
helm repo update mailu
|
||||
|
||||
# Create temporary values file with dynamic DNS server
|
||||
TEMP_VALUES=$(mktemp)
|
||||
cat "$MAILU_HELM_DIR/values.yaml" | sed "s/# custom_dns_servers: \"\" # Will be set dynamically by deployment script/custom_dns_servers: \"$UNBOUND_IP\"/" > "$TEMP_VALUES"
|
||||
|
||||
# Deploy Mailu with dynamic DNS configuration
|
||||
# Deploy Mailu with CoreDNS configuration
|
||||
helm upgrade --install mailu mailu/mailu \
|
||||
-n "$NAMESPACE" \
|
||||
-f "$TEMP_VALUES" \
|
||||
-f "$MAILU_HELM_DIR/values.yaml" \
|
||||
-f "$MAILU_HELM_DIR/prod/values.yaml" \
|
||||
--set global.custom_dns_servers="$COREDNS_IP" \
|
||||
--timeout 10m
|
||||
|
||||
rm -f "$TEMP_VALUES"
|
||||
|
||||
print_success "Mailu Helm release deployed (admin user will be created automatically)"
|
||||
|
||||
# =============================================================================
|
||||
# Step 6: Wait for Pods to be Ready
|
||||
# Step 5: Wait for Pods to be Ready
|
||||
# =============================================================================
|
||||
print_step "Step 6: Waiting for Mailu pods to be ready..."
|
||||
print_step "Step 5: Waiting for Mailu pods to be ready..."
|
||||
|
||||
echo "This may take 5-10 minutes (ClamAV takes time to initialize)..."
|
||||
|
||||
@@ -307,10 +269,27 @@ echo " Webmail: https://mail.$DOMAIN/webmail"
|
||||
echo " SMTP: mail.$DOMAIN:587 (STARTTLS)"
|
||||
echo " IMAP: mail.$DOMAIN:993 (SSL)"
|
||||
echo ""
|
||||
echo "DNS Configuration:"
|
||||
echo " CoreDNS is configured with DNS-over-TLS (Cloudflare) for DNSSEC validation"
|
||||
echo " CoreDNS IP: $COREDNS_IP"
|
||||
echo ""
|
||||
echo "Next Steps:"
|
||||
echo " 1. Configure DNS records (A, MX, SPF, DMARC)"
|
||||
echo " 2. Get DKIM key: kubectl exec -n $NAMESPACE deployment/mailu-admin -- cat /dkim/$DOMAIN.dkim.pub"
|
||||
echo " 3. Add DKIM TXT record to DNS"
|
||||
echo " 1. Configure MailerSend:"
|
||||
echo " - Sign up at https://accounts.mailersend.com/signup"
|
||||
echo " - Add domain '$DOMAIN' and verify DNS records"
|
||||
echo " - Generate SMTP credentials (Email -> Domains -> SMTP)"
|
||||
echo " - Update secret: kubectl edit secret mailu-mailersend-credentials -n $NAMESPACE"
|
||||
echo ""
|
||||
echo " 2. Configure DNS records in Cloudflare for '$DOMAIN':"
|
||||
echo " - A record: mail -> <your-server-IP>"
|
||||
echo " - MX record: @ -> mail.$DOMAIN (priority 10)"
|
||||
echo " - TXT (SPF): @ -> v=spf1 include:mailersend.net ~all"
|
||||
echo " - TXT (DKIM): mlsend._domainkey -> <from MailerSend dashboard>"
|
||||
echo " - TXT (DMARC): _dmarc -> v=DMARC1; p=quarantine; rua=mailto:admin@$DOMAIN"
|
||||
echo ""
|
||||
echo " 3. Get Mailu DKIM key (for direct sending):"
|
||||
echo " kubectl exec -n $NAMESPACE deployment/mailu-admin -- cat /dkim/$DOMAIN.dkim.pub"
|
||||
echo ""
|
||||
echo " 4. Configure Ingress for mail.$DOMAIN"
|
||||
echo ""
|
||||
echo "To check pod status:"
|
||||
|
||||
@@ -3,16 +3,13 @@
|
||||
# Phase 7: Deploy Optional Services - Fixed Version
|
||||
# =============================================================================
|
||||
# This script deploys the optional services for production:
|
||||
# 1. Unbound DNS (with dynamic IP resolution)
|
||||
# 2. CoreDNS configuration for DNSSEC
|
||||
# 3. Mailu Email Server
|
||||
# 4. SigNoz Monitoring
|
||||
# 1. CoreDNS configuration with DNS-over-TLS for DNSSEC validation
|
||||
# 2. Mailu Email Server
|
||||
# 3. SigNoz Monitoring
|
||||
#
|
||||
# Fixed issues:
|
||||
# - Removed static ClusterIP that caused CIDR range conflicts
|
||||
# - Implemented dynamic IP resolution for Unbound DNS
|
||||
# - Updated CoreDNS patching to use dynamic IP
|
||||
# - Updated Mailu configuration to use dynamic DNS server
|
||||
# DNS Architecture:
|
||||
# - CoreDNS uses DNS-over-TLS with Cloudflare (1.1.1.1) for DNSSEC validation
|
||||
# - Mailu uses CoreDNS for DNS resolution (internal K8s + external DNSSEC)
|
||||
# =============================================================================
|
||||
|
||||
set -e
|
||||
@@ -40,49 +37,15 @@ print_success() {
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# Step 7.1: Deploy Unbound DNS (with dynamic IP)
|
||||
# Step 7.1: Configure CoreDNS with DNS-over-TLS for DNSSEC
|
||||
# =============================================================================
|
||||
print_step "Step 7.1: Deploying Unbound DNS resolver (dynamic IP)..."
|
||||
print_step "Step 7.1: Configuring CoreDNS with DNS-over-TLS for DNSSEC validation..."
|
||||
|
||||
if kubectl get deployment unbound -n "$NAMESPACE" &>/dev/null; then
|
||||
print_success "Unbound already deployed"
|
||||
else
|
||||
helm upgrade --install unbound infrastructure/platform/networking/dns/unbound-helm \
|
||||
-n "$NAMESPACE" \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/values.yaml \
|
||||
-f infrastructure/platform/networking/dns/unbound-helm/prod/values.yaml \
|
||||
--timeout 5m \
|
||||
--wait
|
||||
|
||||
print_success "Unbound deployed"
|
||||
fi
|
||||
# Check if CoreDNS is already configured with DNS-over-TLS
|
||||
CURRENT_FORWARD=$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' 2>/dev/null | grep -o 'tls://1.1.1.1' || echo "")
|
||||
|
||||
# Wait for Unbound service to get assigned IP
|
||||
print_step "Waiting for Unbound service to get assigned IP..."
|
||||
for i in {1..30}; do
|
||||
UNBOUND_IP=$(kubectl get svc unbound-dns -n "$NAMESPACE" -o jsonpath='{.spec.clusterIP}' 2>/dev/null || echo "")
|
||||
if [ -n "$UNBOUND_IP" ] && [ "$UNBOUND_IP" != "<none>" ]; then
|
||||
echo "Unbound DNS service IP: $UNBOUND_IP"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
print_error "Failed to get Unbound service IP"
|
||||
exit 1
|
||||
fi
|
||||
sleep 2
|
||||
echo "Waiting for Unbound service IP... (attempt $i/30)"
|
||||
done
|
||||
|
||||
# =============================================================================
|
||||
# Step 7.2: Configure CoreDNS for DNSSEC (dynamic IP)
|
||||
# =============================================================================
|
||||
print_step "Step 7.2: Configuring CoreDNS for DNSSEC validation..."
|
||||
|
||||
# Check current CoreDNS forward configuration
|
||||
CURRENT_FORWARD=$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}' | grep -o 'forward \. [0-9.]*' | awk '{print $3}' || echo "")
|
||||
|
||||
if [ "$CURRENT_FORWARD" != "$UNBOUND_IP" ]; then
|
||||
echo "Updating CoreDNS to forward to Unbound ($UNBOUND_IP)..."
|
||||
if [ -z "$CURRENT_FORWARD" ]; then
|
||||
echo "Updating CoreDNS to use DNS-over-TLS with Cloudflare..."
|
||||
|
||||
# Create a temporary file with the CoreDNS configuration
|
||||
TEMP_COREFILE=$(mktemp)
|
||||
@@ -99,8 +62,9 @@ if [ "$CURRENT_FORWARD" != "$UNBOUND_IP" ]; then
|
||||
ttl 30
|
||||
}
|
||||
prometheus :9153
|
||||
forward . $UNBOUND_IP {
|
||||
max_concurrent 1000
|
||||
forward . tls://1.1.1.1 tls://1.0.0.1 {
|
||||
tls_servername cloudflare-dns.com
|
||||
health_check 5s
|
||||
}
|
||||
cache 30 {
|
||||
disable success cluster.local
|
||||
@@ -125,33 +89,32 @@ EOF
|
||||
kubectl rollout restart deployment coredns -n kube-system
|
||||
kubectl rollout status deployment coredns -n kube-system --timeout=60s
|
||||
|
||||
print_success "CoreDNS configured to forward to Unbound"
|
||||
print_success "CoreDNS configured with DNS-over-TLS"
|
||||
else
|
||||
print_success "CoreDNS already configured for Unbound"
|
||||
print_success "CoreDNS already configured with DNS-over-TLS"
|
||||
fi
|
||||
|
||||
# Get CoreDNS service IP
|
||||
COREDNS_IP=$(kubectl get svc kube-dns -n kube-system -o jsonpath='{.spec.clusterIP}')
|
||||
echo "CoreDNS service IP: $COREDNS_IP"
|
||||
|
||||
# =============================================================================
|
||||
# Step 7.3: Deploy Mailu Email Server (dynamic DNS)
|
||||
# Step 7.2: Deploy Mailu Email Server
|
||||
# =============================================================================
|
||||
print_step "Step 7.3: Deploying Mailu Email Server..."
|
||||
print_step "Step 7.2: Deploying Mailu Email Server..."
|
||||
|
||||
# Add Mailu Helm repository
|
||||
helm repo add mailu https://mailu.github.io/helm-charts 2>/dev/null || true
|
||||
helm repo update mailu
|
||||
|
||||
# Create temporary values file with dynamic DNS server
|
||||
TEMP_VALUES=$(mktemp)
|
||||
cat infrastructure/platform/mail/mailu-helm/values.yaml | sed "s/# custom_dns_servers: \"\" # Will be set dynamically by deployment script/custom_dns_servers: \"$UNBOUND_IP\"/" > "$TEMP_VALUES"
|
||||
|
||||
# Deploy Mailu with dynamic DNS configuration
|
||||
# Deploy Mailu with CoreDNS configuration
|
||||
helm upgrade --install mailu mailu/mailu \
|
||||
-n "$NAMESPACE" \
|
||||
-f "$TEMP_VALUES" \
|
||||
-f infrastructure/platform/mail/mailu-helm/values.yaml \
|
||||
-f infrastructure/platform/mail/mailu-helm/prod/values.yaml \
|
||||
--set global.custom_dns_servers="$COREDNS_IP" \
|
||||
--timeout 10m
|
||||
|
||||
rm -f "$TEMP_VALUES"
|
||||
|
||||
print_success "Mailu Helm release deployed"
|
||||
|
||||
# Wait for Mailu pods to be ready
|
||||
@@ -165,9 +128,9 @@ kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=admin -n "
|
||||
print_success "Mailu deployment completed"
|
||||
|
||||
# =============================================================================
|
||||
# Step 7.4: Deploy SigNoz Monitoring
|
||||
# Step 7.3: Deploy SigNoz Monitoring
|
||||
# =============================================================================
|
||||
print_step "Step 7.4: Deploying SigNoz Monitoring..."
|
||||
print_step "Step 7.3: Deploying SigNoz Monitoring..."
|
||||
|
||||
# Add SigNoz Helm repository
|
||||
helm repo add signoz https://charts.signoz.io 2>/dev/null || true
|
||||
@@ -196,9 +159,8 @@ echo -e "${GREEN}Phase 7 Deployment Complete!${NC}"
|
||||
echo "=============================================="
|
||||
echo ""
|
||||
echo "Deployed Services:"
|
||||
echo " ✓ Unbound DNS (IP: $UNBOUND_IP)"
|
||||
echo " ✓ CoreDNS (configured for DNSSEC)"
|
||||
echo " ✓ Mailu Email Server"
|
||||
echo " ✓ CoreDNS (configured with DNS-over-TLS for DNSSEC)"
|
||||
echo " ✓ Mailu Email Server (using CoreDNS IP: $COREDNS_IP)"
|
||||
echo " ✓ SigNoz Monitoring"
|
||||
echo ""
|
||||
echo "Next Steps:"
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
|
||||
# Global DNS configuration for DNSSEC validation
|
||||
global:
|
||||
# Using Unbound DNS resolver directly for DNSSEC validation
|
||||
# Unbound service is available at unbound-dns.bakery-ia.svc.cluster.local
|
||||
# Using Kubernetes CoreDNS with DNS-over-TLS for DNSSEC validation
|
||||
# CoreDNS is configured to forward external queries to Cloudflare (tls://1.1.1.1)
|
||||
# DNS server IP will be dynamically resolved during deployment
|
||||
# custom_dns_servers: "" # Will be set dynamically by deployment script
|
||||
|
||||
@@ -50,15 +50,15 @@ limits:
|
||||
messageRatelimit:
|
||||
value: "200/day"
|
||||
|
||||
# External relay configuration (Mailgun)
|
||||
# Mailu will relay all outbound emails through Mailgun SMTP
|
||||
# External relay configuration (MailerSend)
|
||||
# Mailu will relay all outbound emails through MailerSend SMTP
|
||||
# Credentials are loaded from Kubernetes secret for security
|
||||
# MailerSend requires TLS 1.2+ (supported by default on port 587)
|
||||
externalRelay:
|
||||
host: "[smtp.mailgun.org]:587"
|
||||
host: "[smtp.mailersend.net]:587"
|
||||
# Use existing secret for credentials (recommended for security)
|
||||
secretName: "mailu-mailgun-credentials"
|
||||
usernameKey: "RELAY_USERNAME"
|
||||
passwordKey: "RELAY_PASSWORD"
|
||||
# Key names use Helm chart defaults: relay-username, relay-password
|
||||
existingSecret: "mailu-mailersend-credentials"
|
||||
|
||||
# Webmail configuration
|
||||
webmail:
|
||||
@@ -218,18 +218,17 @@ rspamd:
|
||||
memory: 1Gi
|
||||
|
||||
# Network Policy
|
||||
# Note: MicroK8s uses 'ingress' namespace with different labels
|
||||
# For standard nginx-ingress, use namespace: ingress-nginx
|
||||
networkPolicy:
|
||||
enabled: true
|
||||
ingressController:
|
||||
namespace: ingress-nginx
|
||||
namespace: ingress
|
||||
podSelector: |
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: nginx-ingress-microk8s
|
||||
|
||||
# DNS Policy Configuration
|
||||
# Use Kubernetes DNS (ClusterFirst) for internal service resolution
|
||||
# DNSSEC validation for email is handled by rspamd component
|
||||
# Note: For production with DNSSEC needs, configure CoreDNS to forward to Unbound
|
||||
# DNSSEC validation is provided by CoreDNS with DNS-over-TLS (Cloudflare)
|
||||
dnsPolicy: "ClusterFirst"
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: unbound
|
||||
description: A Helm chart for deploying Unbound DNS resolver for Bakery-IA
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: "1.19.1"
|
||||
maintainers:
|
||||
- name: Bakery-IA Team
|
||||
email: devops@bakery-ia.com
|
||||
keywords:
|
||||
- dns
|
||||
- resolver
|
||||
- caching
|
||||
- unbound
|
||||
home: https://www.nlnetlabs.nl/projects/unbound/
|
||||
sources:
|
||||
- https://github.com/NLnetLabs/unbound
|
||||
- https://hub.docker.com/r/mvance/unbound
|
||||
@@ -1,64 +0,0 @@
|
||||
# Development values for unbound DNS resolver
|
||||
# Using same configuration as production for consistency
|
||||
|
||||
# Use official image for development (same as production)
|
||||
image:
|
||||
repository: "mvance/unbound"
|
||||
tag: "latest"
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Resource settings (slightly lower than production for dev)
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
limits:
|
||||
cpu: "300m"
|
||||
memory: "384Mi"
|
||||
|
||||
# Single replica for development (can be scaled if needed)
|
||||
replicaCount: 1
|
||||
|
||||
# Development annotations
|
||||
podAnnotations:
|
||||
environment: "development"
|
||||
managed-by: "helm"
|
||||
|
||||
# Probe settings (same as production but slightly faster)
|
||||
probes:
|
||||
readiness:
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
command: "drill @127.0.0.1 -p 53 example.org || echo 'DNS query test'"
|
||||
liveness:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
command: "drill @127.0.0.1 -p 53 example.org || echo 'DNS query test'"
|
||||
|
||||
# Custom Unbound forward records for Kubernetes DNS
|
||||
config:
|
||||
enabled: true
|
||||
# The mvance/unbound image includes forward-records.conf
|
||||
# We need to add Kubernetes-specific forwarding zones
|
||||
forwardRecords: |
|
||||
# Forward all queries to Cloudflare with DNSSEC (catch-all)
|
||||
forward-zone:
|
||||
name: "."
|
||||
forward-tls-upstream: yes
|
||||
forward-addr: 1.1.1.1@853#cloudflare-dns.com
|
||||
forward-addr: 1.0.0.1@853#cloudflare-dns.com
|
||||
|
||||
# Additional server config to mark cluster.local as insecure (no DNSSEC)
|
||||
# and use stub zones for Kubernetes internal DNS (more reliable than forward)
|
||||
serverConfig: |
|
||||
domain-insecure: "cluster.local."
|
||||
private-domain: "cluster.local."
|
||||
local-zone: "10.in-addr.arpa." nodefault
|
||||
|
||||
stub-zone:
|
||||
name: "cluster.local."
|
||||
stub-addr: 10.96.0.10
|
||||
|
||||
stub-zone:
|
||||
name: "10.in-addr.arpa."
|
||||
stub-addr: 10.96.0.10
|
||||
@@ -1,50 +0,0 @@
|
||||
# Production-specific values for unbound DNS resolver
|
||||
# Overrides for the production environment
|
||||
|
||||
# Use official image for production
|
||||
image:
|
||||
repository: "mvance/unbound"
|
||||
tag: "latest"
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Production resource settings (higher limits for reliability)
|
||||
resources:
|
||||
requests:
|
||||
cpu: "200m"
|
||||
memory: "256Mi"
|
||||
limits:
|
||||
cpu: "500m"
|
||||
memory: "512Mi"
|
||||
|
||||
# Production-specific settings
|
||||
replicaCount: 2
|
||||
|
||||
# Production annotations
|
||||
podAnnotations:
|
||||
environment: "production"
|
||||
critical: "true"
|
||||
|
||||
# Anti-affinity for high availability in production
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: In
|
||||
values:
|
||||
- unbound
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
|
||||
# Production probe settings (more conservative)
|
||||
probes:
|
||||
readiness:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 30
|
||||
command: "sh -c 'echo \"\" | nc -w 3 127.0.0.1 53 || exit 1'"
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 60
|
||||
command: "sh -c 'echo \"\" | nc -w 3 127.0.0.1 53 || exit 1'"
|
||||
@@ -1,63 +0,0 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "unbound.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "unbound.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "unbound.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "unbound.labels" -}}
|
||||
helm.sh/chart: {{ include "unbound.chart" . }}
|
||||
{{ include "unbound.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "unbound.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "unbound.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/component: dns
|
||||
app.kubernetes.io/part-of: bakery-ia
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "unbound.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "unbound.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,22 +0,0 @@
|
||||
{{- if .Values.config.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "unbound.fullname" . }}-config
|
||||
namespace: {{ .Values.global.namespace }}
|
||||
labels:
|
||||
{{- include "unbound.labels" . | nindent 4 }}
|
||||
data:
|
||||
{{- if .Values.config.forwardRecords }}
|
||||
forward-records.conf: |
|
||||
{{ .Values.config.forwardRecords | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.config.serverConfig }}
|
||||
a-records.conf: |
|
||||
{{ .Values.config.serverConfig | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.config.content }}
|
||||
unbound.conf: |
|
||||
{{ .Values.config.content | indent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,117 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "unbound.fullname" . }}
|
||||
namespace: {{ .Values.global.namespace }}
|
||||
labels:
|
||||
{{- include "unbound.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "unbound.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "unbound.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "unbound.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: dns-udp
|
||||
containerPort: {{ .Values.service.ports.dnsUdp }}
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
containerPort: {{ .Values.service.ports.dnsTcp }}
|
||||
protocol: TCP
|
||||
{{- if .Values.probes.readiness.enabled }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- {{ .Values.probes.readiness.command | quote }}
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.probes.readiness.periodSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.probes.liveness.enabled }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- {{ .Values.probes.liveness.command | quote }}
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.probes.liveness.periodSeconds }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
{{- if .Values.config.enabled }}
|
||||
{{- if .Values.config.forwardRecords }}
|
||||
- name: unbound-config
|
||||
mountPath: /opt/unbound/etc/unbound/forward-records.conf
|
||||
subPath: forward-records.conf
|
||||
{{- end }}
|
||||
{{- if .Values.config.serverConfig }}
|
||||
- name: unbound-config
|
||||
mountPath: /opt/unbound/etc/unbound/a-records.conf
|
||||
subPath: a-records.conf
|
||||
{{- end }}
|
||||
{{- if .Values.config.content }}
|
||||
- name: unbound-config
|
||||
mountPath: /opt/unbound/etc/unbound/unbound.conf
|
||||
subPath: unbound.conf
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.volumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.env }}
|
||||
env:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- if .Values.config.enabled }}
|
||||
- name: unbound-config
|
||||
configMap:
|
||||
name: {{ include "unbound.fullname" . }}-config
|
||||
{{- end }}
|
||||
{{- with .Values.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.extraInitContainers }}
|
||||
initContainers:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.extraContainers }}
|
||||
containers:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Values.global.dnsServiceName }}
|
||||
namespace: {{ .Values.global.namespace }}
|
||||
labels:
|
||||
{{- include "unbound.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: dns-udp
|
||||
port: {{ .Values.service.ports.dnsUdp }}
|
||||
targetPort: {{ .Values.service.ports.dnsUdp }}
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: {{ .Values.service.ports.dnsTcp }}
|
||||
targetPort: {{ .Values.service.ports.dnsTcp }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
{{- include "unbound.selectorLabels" . | nindent 4 }}
|
||||
@@ -1,13 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "unbound.serviceAccountName" . }}
|
||||
namespace: {{ .Values.global.namespace }}
|
||||
labels:
|
||||
{{- include "unbound.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,105 +0,0 @@
|
||||
# Default values for unbound DNS resolver
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# Global settings
|
||||
global:
|
||||
# DNS service name for other services to reference
|
||||
dnsServiceName: "unbound-dns"
|
||||
namespace: "bakery-ia"
|
||||
|
||||
# Unbound image configuration
|
||||
image:
|
||||
repository: "mvance/unbound"
|
||||
tag: "latest"
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Deployment configuration
|
||||
replicaCount: 1
|
||||
|
||||
# Resource limits and requests
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
limits:
|
||||
cpu: "300m"
|
||||
memory: "384Mi"
|
||||
|
||||
# Security context
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["NET_BIND_SERVICE"]
|
||||
|
||||
# Service configuration
|
||||
service:
|
||||
type: "ClusterIP"
|
||||
# Dynamic ClusterIP - Kubernetes will assign automatically
|
||||
# clusterIP: "" # Leave empty for automatic assignment
|
||||
ports:
|
||||
dnsUdp: 53
|
||||
dnsTcp: 53
|
||||
|
||||
# Health probes configuration
|
||||
probes:
|
||||
readiness:
|
||||
enabled: true
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 30
|
||||
# Simple TCP connectivity check - more reliable than DNS queries
|
||||
# Tests if the DNS port is listening and responding
|
||||
command: "sh -c 'echo \"\" | nc -w 2 127.0.0.1 53 || exit 1'"
|
||||
# Alternative: use curl if available
|
||||
# command: "curl -s --max-time 2 http://127.0.0.1:53 || exit 1"
|
||||
liveness:
|
||||
enabled: true
|
||||
initialDelaySeconds: 45
|
||||
periodSeconds: 60
|
||||
# Simple TCP connectivity check - more reliable than DNS queries
|
||||
# Tests if the DNS port is listening and responding
|
||||
command: "sh -c 'echo \"\" | nc -w 2 127.0.0.1 53 || exit 1'"
|
||||
# Alternative: use curl if available
|
||||
# command: "curl -s --max-time 2 http://127.0.0.1:53 || exit 1"
|
||||
|
||||
# Additional environment variables
|
||||
env: {}
|
||||
|
||||
# Additional volume mounts
|
||||
volumeMounts: []
|
||||
|
||||
# Additional volumes
|
||||
volumes: []
|
||||
|
||||
# Node selector
|
||||
nodeSelector: {}
|
||||
|
||||
# Tolerations
|
||||
tolerations: []
|
||||
|
||||
# Affinity
|
||||
affinity: {}
|
||||
|
||||
# Pod annotations
|
||||
podAnnotations: {}
|
||||
|
||||
# Service annotations
|
||||
serviceAnnotations: {}
|
||||
|
||||
# Custom unbound configuration
|
||||
config:
|
||||
enabled: false
|
||||
|
||||
# Additional containers (sidecars)
|
||||
extraContainers: []
|
||||
|
||||
# Additional init containers
|
||||
extraInitContainers: []
|
||||
|
||||
# Service account configuration
|
||||
serviceAccount:
|
||||
create: false
|
||||
annotations: {}
|
||||
name: ""
|
||||
|
||||
# Pod security context
|
||||
podSecurityContext: {}
|
||||
@@ -56,3 +56,5 @@ spec:
|
||||
# See infrastructure/cicd/gitea/values.yaml for ingress configuration
|
||||
# NOTE: Mail ingress is deployed separately via mailu-helm resource
|
||||
# to avoid 503 errors when Mailu is not running
|
||||
# NOTE: Monitoring ingress is deployed separately via SigNoz Helm chart
|
||||
# See infrastructure/monitoring/signoz/signoz-values-prod.yaml for monitoring configuration
|
||||
|
||||
@@ -25,3 +25,5 @@ patches:
|
||||
value: "https://localhost,https://localhost:3000,https://localhost:3001,https://127.0.0.1,https://127.0.0.1:3000,https://127.0.0.1:3001,https://bakery-ia.local,https://registry.bakery-ia.local,https://gitea.bakery-ia.local,http://localhost,http://localhost:3000,http://localhost:3001,http://127.0.0.1,http://127.0.0.1:3000"
|
||||
# NOTE: Gitea and Registry ingresses are managed by Gitea Helm chart (infrastructure/cicd/gitea/values.yaml)
|
||||
# NOTE: Mail ingress (mail.bakery-ia.dev) is deployed separately via mailu-helm Tilt resource
|
||||
# NOTE: Monitoring ingress (monitoring.bakery-ia.local) is deployed separately via SigNoz Helm chart
|
||||
# See infrastructure/monitoring/signoz/signoz-values-dev.yaml for monitoring configuration
|
||||
|
||||
@@ -37,4 +37,6 @@ patches:
|
||||
value: "http01"
|
||||
# NOTE: Gitea and Registry ingresses are managed by Gitea Helm chart
|
||||
# See infrastructure/cicd/gitea/values-prod.yaml for production ingress configuration
|
||||
# NOTE: mail.bakewise.ai is handled by separate mailu ingress
|
||||
# NOTE: mail.bakewise.ai is handled by separate mailu ingress
|
||||
# NOTE: monitoring.bakewise.ai is handled by separate SigNoz ingress
|
||||
# See infrastructure/monitoring/signoz/signoz-values-prod.yaml for monitoring ingress configuration
|
||||
@@ -12,5 +12,4 @@ spec:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
storageClassName: standard
|
||||
storage: 100Gi
|
||||
@@ -45,6 +45,7 @@ spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:17-alpine
|
||||
command: ["docker-entrypoint.sh", "-c", "config_file=/etc/postgresql/postgresql.conf"]
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
name: postgres
|
||||
@@ -66,11 +67,23 @@ spec:
|
||||
value: demo_session_db
|
||||
- name: PGDATA
|
||||
value: /var/lib/postgresql/data/pgdata
|
||||
- name: POSTGRES_HOST_SSL
|
||||
value: "on"
|
||||
- name: PGSSLCERT
|
||||
value: /tls/server-cert.pem
|
||||
- name: PGSSLKEY
|
||||
value: /tls/server-key.pem
|
||||
- name: PGSSLROOTCERT
|
||||
value: /tls/ca-cert.pem
|
||||
volumeMounts:
|
||||
- name: demo-session-db-data
|
||||
mountPath: /var/lib/postgresql/data
|
||||
- name: init-scripts
|
||||
mountPath: /docker-entrypoint-initdb.d
|
||||
- name: tls-certs-writable
|
||||
mountPath: /tls
|
||||
- name: postgres-config
|
||||
mountPath: /etc/postgresql
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
@@ -103,6 +116,9 @@ spec:
|
||||
- name: demo-session-db-data
|
||||
persistentVolumeClaim:
|
||||
claimName: demo-session-db-pvc
|
||||
- name: init-scripts
|
||||
configMap:
|
||||
name: postgres-init-config
|
||||
- name: tls-certs-source
|
||||
secret:
|
||||
secretName: postgres-tls
|
||||
@@ -115,6 +131,9 @@ spec:
|
||||
path: ca-cert.pem
|
||||
- name: tls-certs-writable
|
||||
emptyDir: {}
|
||||
- name: postgres-config
|
||||
configMap:
|
||||
name: postgres-logging-config
|
||||
|
||||
|
||||
---
|
||||
@@ -154,4 +173,3 @@ spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: standard
|
||||
@@ -88,7 +88,7 @@ spec:
|
||||
key: AI_INSIGHTS_DB_USER
|
||||
containers:
|
||||
- name: ai-insights-service
|
||||
image: bakery/ai-insights-service:dev
|
||||
image: registry.bakewise.ai/bakery-admin/ai_insights:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ai-insights-migration
|
||||
name: ai-insights-migration-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app.kubernetes.io/name: ai-insights-migration
|
||||
app.kubernetes.io/name: ai-insights-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
app.kubernetes.io/part-of: bakery-ia
|
||||
spec:
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ai-insights-migration
|
||||
app.kubernetes.io/name: ai-insights-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
spec:
|
||||
initContainers:
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
cpu: "100m"
|
||||
containers:
|
||||
- name: migrate
|
||||
image: bakery/ai-insights-service
|
||||
image: registry.bakewise.ai/bakery-admin/ai_insights:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
command: ["python", "/app/shared/scripts/run_migrations.py", "ai_insights"]
|
||||
env:
|
||||
- name: AI_INSIGHTS_DATABASE_URL
|
||||
|
||||
@@ -82,7 +82,7 @@ spec:
|
||||
key: ALERT_PROCESSOR_DB_USER
|
||||
containers:
|
||||
- name: alert-processor
|
||||
image: bakery/alert-processor:latest
|
||||
image: registry.bakewise.ai/bakery-admin/alert_processor:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
command: ["python", "-m", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
@@ -121,6 +121,15 @@ spec:
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
volumeMounts:
|
||||
- name: redis-tls
|
||||
mountPath: /tls
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: redis-tls
|
||||
secret:
|
||||
secretName: redis-tls-secret
|
||||
defaultMode: 0400
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: alert-processor-migration
|
||||
name: alert-processor-migration-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app.kubernetes.io/name: alert-processor-migration
|
||||
app.kubernetes.io/name: alert-processor-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
app.kubernetes.io/part-of: bakery-ia
|
||||
spec:
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: alert-processor-migration
|
||||
app.kubernetes.io/name: alert-processor-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
spec:
|
||||
initContainers:
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
cpu: "100m"
|
||||
containers:
|
||||
- name: migrate
|
||||
image: bakery/alert-processor
|
||||
image: registry.bakewise.ai/bakery-admin/alert_processor:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
command: ["python", "/app/shared/scripts/run_migrations.py", "alert_processor"]
|
||||
env:
|
||||
- name: ALERT_PROCESSOR_DATABASE_URL
|
||||
|
||||
@@ -110,7 +110,7 @@ spec:
|
||||
value: "auth_db"
|
||||
containers:
|
||||
- name: auth-service
|
||||
image: bakery/auth-service:latest
|
||||
image: registry.bakewise.ai/bakery-admin/auth:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
@@ -178,6 +178,10 @@ spec:
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 5
|
||||
failureThreshold: 5
|
||||
volumeMounts:
|
||||
- name: redis-tls
|
||||
mountPath: /tls
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: redis-tls
|
||||
secret:
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: auth-migration
|
||||
name: auth-migration-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app.kubernetes.io/name: auth-migration
|
||||
app.kubernetes.io/name: auth-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
app.kubernetes.io/part-of: bakery-ia
|
||||
spec:
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: auth-migration
|
||||
app.kubernetes.io/name: auth-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
spec:
|
||||
initContainers:
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
cpu: "100m"
|
||||
containers:
|
||||
- name: migrate
|
||||
image: bakery/auth-service
|
||||
image: registry.bakewise.ai/bakery-admin/auth:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
command: ["python", "/app/shared/scripts/run_migrations.py", "auth"]
|
||||
env:
|
||||
- name: AUTH_DATABASE_URL
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: demo-cleanup-worker
|
||||
name: demo-cleanup-worker-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app: demo-cleanup-worker
|
||||
@@ -53,7 +53,7 @@ spec:
|
||||
cpu: "100m"
|
||||
containers:
|
||||
- name: worker
|
||||
image: bakery/demo-session-service
|
||||
image: registry.bakewise.ai/bakery-admin/demo_session:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- python
|
||||
@@ -71,7 +71,7 @@ spec:
|
||||
name: redis-secrets
|
||||
key: REDIS_PASSWORD
|
||||
- name: REDIS_URL
|
||||
value: "rediss://:$(REDIS_PASSWORD)@redis-service:6379/0?ssl_cert_reqs=none"
|
||||
value: "rediss://:$(REDIS_PASSWORD)@redis-service:6379/0"
|
||||
- name: LOG_LEVEL
|
||||
value: "INFO"
|
||||
- name: INVENTORY_SERVICE_URL
|
||||
@@ -120,4 +120,13 @@ spec:
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: redis-tls
|
||||
mountPath: /tls
|
||||
readOnly: true
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: redis-tls
|
||||
secret:
|
||||
secretName: redis-tls-secret
|
||||
defaultMode: 0400
|
||||
|
||||
@@ -20,7 +20,7 @@ spec:
|
||||
serviceAccountName: demo-session-sa
|
||||
containers:
|
||||
- name: demo-session-service
|
||||
image: bakery/demo-session-service:latest
|
||||
image: registry.bakewise.ai/bakery-admin/demo_session:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
@@ -43,7 +43,7 @@ spec:
|
||||
name: redis-secrets
|
||||
key: REDIS_PASSWORD
|
||||
- name: REDIS_URL
|
||||
value: "rediss://:$(REDIS_PASSWORD)@redis-service:6379/0?ssl_cert_reqs=none"
|
||||
value: "rediss://:$(REDIS_PASSWORD)@redis-service:6379/0"
|
||||
- name: AUTH_SERVICE_URL
|
||||
value: "http://auth-service:8000"
|
||||
- name: TENANT_SERVICE_URL
|
||||
@@ -77,6 +77,10 @@ spec:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8000
|
||||
volumeMounts:
|
||||
- name: redis-tls
|
||||
mountPath: /tls
|
||||
readOnly: true
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 30
|
||||
readinessProbe:
|
||||
@@ -133,3 +137,8 @@ spec:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
volumes:
|
||||
- name: redis-tls
|
||||
secret:
|
||||
secretName: redis-tls-secret
|
||||
defaultMode: 0400
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: demo-session-migration
|
||||
name: demo-session-migration-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app.kubernetes.io/name: demo-session-migration
|
||||
app.kubernetes.io/name: demo-session-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
app.kubernetes.io/part-of: bakery-ia
|
||||
spec:
|
||||
@@ -12,7 +12,7 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: demo-session-migration
|
||||
app.kubernetes.io/name: demo-session-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
spec:
|
||||
initContainers:
|
||||
@@ -28,7 +28,7 @@ spec:
|
||||
cpu: "100m"
|
||||
containers:
|
||||
- name: migrate
|
||||
image: bakery/demo-session-service
|
||||
image: registry.bakewise.ai/bakery-admin/demo_session:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
command: ["python", "/app/shared/scripts/run_migrations.py", "demo_session"]
|
||||
env:
|
||||
- name: DEMO_SESSION_DATABASE_URL
|
||||
|
||||
@@ -109,7 +109,7 @@ spec:
|
||||
value: "distribution_db"
|
||||
containers:
|
||||
- name: distribution-service
|
||||
image: bakery/distribution-service:latest
|
||||
image: registry.bakewise.ai/bakery-admin/distribution:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: distribution-migration
|
||||
name: distribution-migration-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app.kubernetes.io/name: distribution-migration
|
||||
app.kubernetes.io/name: distribution-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
app.kubernetes.io/part-of: bakery-ia
|
||||
spec:
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: distribution-migration
|
||||
app.kubernetes.io/name: distribution-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
spec:
|
||||
initContainers:
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
cpu: "100m"
|
||||
containers:
|
||||
- name: migrate
|
||||
image: bakery/distribution-service
|
||||
image: registry.bakewise.ai/bakery-admin/distribution:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["python", "/app/shared/scripts/run_migrations.py", "distribution"]
|
||||
env:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: external-data-rotation
|
||||
name: external-data-rotation-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app: external-service
|
||||
@@ -35,7 +35,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: data-rotator
|
||||
image: bakery/external-service:latest
|
||||
image: registry.bakewise.ai/bakery-admin/external:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
imagePullPolicy: Always
|
||||
|
||||
command:
|
||||
|
||||
@@ -114,7 +114,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: external-service
|
||||
image: bakery/external-service:latest
|
||||
image: registry.bakewise.ai/bakery-admin/external:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
@@ -187,6 +187,10 @@ spec:
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 5
|
||||
failureThreshold: 5
|
||||
volumeMounts:
|
||||
- name: redis-tls
|
||||
mountPath: /tls
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: redis-tls
|
||||
secret:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: external-data-init
|
||||
name: external-data-init-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app: external-service
|
||||
@@ -51,7 +51,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: data-loader
|
||||
image: bakery/external-service:latest
|
||||
image: registry.bakewise.ai/bakery-admin/external:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
imagePullPolicy: Always
|
||||
|
||||
command:
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: external-migration
|
||||
name: external-migration-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app.kubernetes.io/name: external-migration
|
||||
app.kubernetes.io/name: external-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
app.kubernetes.io/part-of: bakery-ia
|
||||
spec:
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: external-migration
|
||||
app.kubernetes.io/name: external-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
spec:
|
||||
initContainers:
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
cpu: "100m"
|
||||
containers:
|
||||
- name: migrate
|
||||
image: bakery/external-service
|
||||
image: registry.bakewise.ai/bakery-admin/external:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
command: ["python", "/app/shared/scripts/run_migrations.py", "external"]
|
||||
env:
|
||||
- name: EXTERNAL_DATABASE_URL
|
||||
|
||||
@@ -88,7 +88,7 @@ spec:
|
||||
key: FORECASTING_DB_USER
|
||||
containers:
|
||||
- name: forecasting-service
|
||||
image: bakery/forecasting-service:latest
|
||||
image: registry.bakewise.ai/bakery-admin/forecasting:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: forecasting-migration
|
||||
name: forecasting-migration-c8dc021e
|
||||
namespace: bakery-ia
|
||||
labels:
|
||||
app.kubernetes.io/name: forecasting-migration
|
||||
app.kubernetes.io/name: forecasting-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
app.kubernetes.io/part-of: bakery-ia
|
||||
spec:
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: forecasting-migration
|
||||
app.kubernetes.io/name: forecasting-migration-c8dc021e
|
||||
app.kubernetes.io/component: migration
|
||||
spec:
|
||||
initContainers:
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
cpu: "100m"
|
||||
containers:
|
||||
- name: migrate
|
||||
image: bakery/forecasting-service
|
||||
image: registry.bakewise.ai/bakery-admin/forecasting:c8dc021e138c3ff834a50326a775ed4079f06c0c
|
||||
command: ["python", "/app/shared/scripts/run_migrations.py", "forecasting"]
|
||||
env:
|
||||
- name: FORECASTING_DATABASE_URL
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user