New enterprise feature2
This commit is contained in:
4
Tiltfile
4
Tiltfile
@@ -532,6 +532,10 @@ k8s_resource('demo-session-service',
|
|||||||
resource_deps=['demo-session-migration', 'redis'],
|
resource_deps=['demo-session-migration', 'redis'],
|
||||||
labels=['services'])
|
labels=['services'])
|
||||||
|
|
||||||
|
k8s_resource('demo-cleanup-worker',
|
||||||
|
resource_deps=['demo-session-service', 'redis'],
|
||||||
|
labels=['services', 'workers'])
|
||||||
|
|
||||||
k8s_resource('distribution-service',
|
k8s_resource('distribution-service',
|
||||||
resource_deps=['distribution-migration', 'redis', 'rabbitmq'],
|
resource_deps=['distribution-migration', 'redis', 'rabbitmq'],
|
||||||
labels=['services'])
|
labels=['services'])
|
||||||
|
|||||||
@@ -481,6 +481,19 @@ class AuthMiddleware(BaseHTTPMiddleware):
|
|||||||
b"x-is-demo", b"true"
|
b"x-is-demo", b"true"
|
||||||
))
|
))
|
||||||
|
|
||||||
|
# Add demo session context headers for backend services
|
||||||
|
demo_session_id = user_context.get("demo_session_id", "")
|
||||||
|
if demo_session_id:
|
||||||
|
request.headers.__dict__["_list"].append((
|
||||||
|
b"x-demo-session-id", demo_session_id.encode()
|
||||||
|
))
|
||||||
|
|
||||||
|
demo_account_type = user_context.get("demo_account_type", "")
|
||||||
|
if demo_account_type:
|
||||||
|
request.headers.__dict__["_list"].append((
|
||||||
|
b"x-demo-account-type", demo_account_type.encode()
|
||||||
|
))
|
||||||
|
|
||||||
# Add hierarchical access headers if tenant context exists
|
# Add hierarchical access headers if tenant context exists
|
||||||
if tenant_id:
|
if tenant_id:
|
||||||
tenant_access_type = getattr(request.state, 'tenant_access_type', 'direct')
|
tenant_access_type = getattr(request.state, 'tenant_access_type', 'direct')
|
||||||
|
|||||||
@@ -23,34 +23,31 @@ spec:
|
|||||||
app: demo-cleanup
|
app: demo-cleanup
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: cleanup
|
- name: cleanup-trigger
|
||||||
image: bakery/demo-session-service:latest
|
image: curlimages/curl:latest
|
||||||
command:
|
command:
|
||||||
- python
|
- sh
|
||||||
- -c
|
- -c
|
||||||
- |
|
- |
|
||||||
import asyncio
|
echo "Triggering demo session cleanup..."
|
||||||
import httpx
|
response=$(curl -s -w "\n%{http_code}" -X POST http://demo-session-service:8000/api/v1/demo/operations/cleanup)
|
||||||
async def cleanup():
|
http_code=$(echo "$response" | tail -n 1)
|
||||||
async with httpx.AsyncClient() as client:
|
body=$(echo "$response" | sed '$d')
|
||||||
response = await client.post("http://demo-session-service:8000/api/v1/demo/operations/cleanup")
|
echo "Response: $body"
|
||||||
print(response.json())
|
echo "HTTP Status: $http_code"
|
||||||
asyncio.run(cleanup())
|
if [ "$http_code" -ge 200 ] && [ "$http_code" -lt 300 ]; then
|
||||||
env:
|
echo "Cleanup job enqueued successfully"
|
||||||
- name: DEMO_SESSION_DATABASE_URL
|
exit 0
|
||||||
valueFrom:
|
else
|
||||||
secretKeyRef:
|
echo "Failed to enqueue cleanup job"
|
||||||
name: database-secrets
|
exit 1
|
||||||
key: DEMO_SESSION_DATABASE_URL
|
fi
|
||||||
- name: REDIS_URL
|
|
||||||
value: "redis://redis-service:6379/0"
|
|
||||||
- name: LOG_LEVEL
|
|
||||||
value: "INFO"
|
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
memory: "128Mi"
|
memory: "32Mi"
|
||||||
cpu: "50m"
|
cpu: "10m"
|
||||||
limits:
|
limits:
|
||||||
memory: "256Mi"
|
memory: "64Mi"
|
||||||
cpu: "200m"
|
cpu: "50m"
|
||||||
restartPolicy: OnFailure
|
restartPolicy: OnFailure
|
||||||
|
activeDeadlineSeconds: 30
|
||||||
|
|||||||
@@ -0,0 +1,96 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: demo-cleanup-worker
|
||||||
|
namespace: bakery-ia
|
||||||
|
labels:
|
||||||
|
app: demo-cleanup-worker
|
||||||
|
component: background-jobs
|
||||||
|
service: demo-session
|
||||||
|
spec:
|
||||||
|
replicas: 2
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: demo-cleanup-worker
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: demo-cleanup-worker
|
||||||
|
component: background-jobs
|
||||||
|
service: demo-session
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: worker
|
||||||
|
image: bakery/demo-session-service:latest
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
command:
|
||||||
|
- python
|
||||||
|
- -m
|
||||||
|
- app.jobs.cleanup_worker
|
||||||
|
env:
|
||||||
|
- name: DEMO_SESSION_DATABASE_URL
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: database-secrets
|
||||||
|
key: DEMO_SESSION_DATABASE_URL
|
||||||
|
- name: REDIS_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: redis-secrets
|
||||||
|
key: REDIS_PASSWORD
|
||||||
|
- name: REDIS_URL
|
||||||
|
value: "rediss://:$(REDIS_PASSWORD)@redis-service:6379/0?ssl_cert_reqs=none"
|
||||||
|
- name: LOG_LEVEL
|
||||||
|
value: "INFO"
|
||||||
|
- name: INTERNAL_API_KEY
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: demo-internal-api-key
|
||||||
|
key: INTERNAL_API_KEY
|
||||||
|
- name: INVENTORY_SERVICE_URL
|
||||||
|
value: "http://inventory-service:8000"
|
||||||
|
- name: RECIPES_SERVICE_URL
|
||||||
|
value: "http://recipes-service:8000"
|
||||||
|
- name: SALES_SERVICE_URL
|
||||||
|
value: "http://sales-service:8000"
|
||||||
|
- name: ORDERS_SERVICE_URL
|
||||||
|
value: "http://orders-service:8000"
|
||||||
|
- name: PRODUCTION_SERVICE_URL
|
||||||
|
value: "http://production-service:8000"
|
||||||
|
- name: SUPPLIERS_SERVICE_URL
|
||||||
|
value: "http://suppliers-service:8000"
|
||||||
|
- name: POS_SERVICE_URL
|
||||||
|
value: "http://pos-service:8000"
|
||||||
|
- name: PROCUREMENT_SERVICE_URL
|
||||||
|
value: "http://procurement-service:8000"
|
||||||
|
- name: DISTRIBUTION_SERVICE_URL
|
||||||
|
value: "http://distribution-service:8000"
|
||||||
|
- name: FORECASTING_SERVICE_URL
|
||||||
|
value: "http://forecasting-service:8000"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "256Mi"
|
||||||
|
cpu: "100m"
|
||||||
|
limits:
|
||||||
|
memory: "512Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
livenessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- python
|
||||||
|
- -c
|
||||||
|
- "import sys; sys.exit(0)"
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
periodSeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
failureThreshold: 3
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- python
|
||||||
|
- -c
|
||||||
|
- "import sys; sys.exit(0)"
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
periodSeconds: 30
|
||||||
|
timeoutSeconds: 5
|
||||||
|
restartPolicy: Always
|
||||||
@@ -15,6 +15,7 @@ resources:
|
|||||||
- configmaps/postgres-logging-config.yaml
|
- configmaps/postgres-logging-config.yaml
|
||||||
- secrets/postgres-tls-secret.yaml
|
- secrets/postgres-tls-secret.yaml
|
||||||
- secrets/redis-tls-secret.yaml
|
- secrets/redis-tls-secret.yaml
|
||||||
|
- secrets/demo-internal-api-key-secret.yaml
|
||||||
|
|
||||||
# Additional configs
|
# Additional configs
|
||||||
- configs/postgres-init-config.yaml
|
- configs/postgres-init-config.yaml
|
||||||
@@ -127,6 +128,9 @@ resources:
|
|||||||
- components/demo-session/service.yaml
|
- components/demo-session/service.yaml
|
||||||
- components/demo-session/deployment.yaml
|
- components/demo-session/deployment.yaml
|
||||||
|
|
||||||
|
# Demo cleanup worker (background job processor)
|
||||||
|
- deployments/demo-cleanup-worker.yaml
|
||||||
|
|
||||||
# Microservices
|
# Microservices
|
||||||
- components/auth/auth-service.yaml
|
- components/auth/auth-service.yaml
|
||||||
- components/tenant/tenant-service.yaml
|
- components/tenant/tenant-service.yaml
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ Demo Operations API - Business operations for demo session management
|
|||||||
from fastapi import APIRouter, Depends, HTTPException, Path
|
from fastapi import APIRouter, Depends, HTTPException, Path
|
||||||
import structlog
|
import structlog
|
||||||
import jwt
|
import jwt
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
from app.api.schemas import DemoSessionResponse, DemoSessionStats
|
from app.api.schemas import DemoSessionResponse, DemoSessionStats
|
||||||
from app.services import DemoSessionManager, DemoCleanupService
|
from app.services import DemoSessionManager, DemoCleanupService
|
||||||
@@ -83,10 +84,111 @@ async def run_cleanup(
|
|||||||
db: AsyncSession = Depends(get_db),
|
db: AsyncSession = Depends(get_db),
|
||||||
redis: DemoRedisWrapper = Depends(get_redis)
|
redis: DemoRedisWrapper = Depends(get_redis)
|
||||||
):
|
):
|
||||||
"""Manually trigger session cleanup (BUSINESS OPERATION - Internal endpoint for CronJob)"""
|
"""
|
||||||
cleanup_service = DemoCleanupService(db, redis)
|
Trigger session cleanup via background worker (async via Redis queue)
|
||||||
stats = await cleanup_service.cleanup_expired_sessions()
|
|
||||||
return stats
|
Returns immediately after enqueuing work - does not block
|
||||||
|
"""
|
||||||
|
from datetime import timedelta
|
||||||
|
from sqlalchemy import select
|
||||||
|
from app.models.demo_session import DemoSession, DemoSessionStatus
|
||||||
|
import uuid
|
||||||
|
import json
|
||||||
|
|
||||||
|
logger.info("Starting demo session cleanup enqueue")
|
||||||
|
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
stuck_threshold = now - timedelta(minutes=5)
|
||||||
|
|
||||||
|
# Find expired sessions
|
||||||
|
result = await db.execute(
|
||||||
|
select(DemoSession).where(
|
||||||
|
DemoSession.status.in_([
|
||||||
|
DemoSessionStatus.PENDING,
|
||||||
|
DemoSessionStatus.READY,
|
||||||
|
DemoSessionStatus.PARTIAL,
|
||||||
|
DemoSessionStatus.FAILED,
|
||||||
|
DemoSessionStatus.ACTIVE
|
||||||
|
]),
|
||||||
|
DemoSession.expires_at < now
|
||||||
|
)
|
||||||
|
)
|
||||||
|
expired_sessions = result.scalars().all()
|
||||||
|
|
||||||
|
# Find stuck sessions
|
||||||
|
stuck_result = await db.execute(
|
||||||
|
select(DemoSession).where(
|
||||||
|
DemoSession.status == DemoSessionStatus.PENDING,
|
||||||
|
DemoSession.created_at < stuck_threshold
|
||||||
|
)
|
||||||
|
)
|
||||||
|
stuck_sessions = stuck_result.scalars().all()
|
||||||
|
|
||||||
|
all_sessions = list(expired_sessions) + list(stuck_sessions)
|
||||||
|
|
||||||
|
if not all_sessions:
|
||||||
|
return {
|
||||||
|
"status": "no_sessions",
|
||||||
|
"message": "No sessions to cleanup",
|
||||||
|
"total_expired": 0,
|
||||||
|
"total_stuck": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create cleanup job
|
||||||
|
job_id = str(uuid.uuid4())
|
||||||
|
session_ids = [s.session_id for s in all_sessions]
|
||||||
|
|
||||||
|
job_data = {
|
||||||
|
"job_id": job_id,
|
||||||
|
"session_ids": session_ids,
|
||||||
|
"created_at": now.isoformat(),
|
||||||
|
"retry_count": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Enqueue job
|
||||||
|
client = await redis.get_client()
|
||||||
|
await client.lpush("cleanup:queue", json.dumps(job_data))
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Cleanup job enqueued",
|
||||||
|
job_id=job_id,
|
||||||
|
session_count=len(session_ids),
|
||||||
|
expired_count=len(expired_sessions),
|
||||||
|
stuck_count=len(stuck_sessions)
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "enqueued",
|
||||||
|
"job_id": job_id,
|
||||||
|
"session_count": len(session_ids),
|
||||||
|
"total_expired": len(expired_sessions),
|
||||||
|
"total_stuck": len(stuck_sessions),
|
||||||
|
"message": f"Cleanup job enqueued for {len(session_ids)} sessions"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get(
|
||||||
|
route_builder.build_operations_route("cleanup/{job_id}", include_tenant_prefix=False),
|
||||||
|
response_model=dict
|
||||||
|
)
|
||||||
|
async def get_cleanup_status(
|
||||||
|
job_id: str,
|
||||||
|
redis: DemoRedisWrapper = Depends(get_redis)
|
||||||
|
):
|
||||||
|
"""Get status of cleanup job"""
|
||||||
|
import json
|
||||||
|
|
||||||
|
client = await redis.get_client()
|
||||||
|
status_key = f"cleanup:job:{job_id}:status"
|
||||||
|
|
||||||
|
status_data = await client.get(status_key)
|
||||||
|
if not status_data:
|
||||||
|
return {
|
||||||
|
"status": "not_found",
|
||||||
|
"message": "Job not found or expired (jobs expire after 1 hour)"
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.loads(status_data)
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
@router.post(
|
||||||
|
|||||||
7
services/demo_session/app/jobs/__init__.py
Normal file
7
services/demo_session/app/jobs/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
"""
|
||||||
|
Background Jobs Package
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .cleanup_worker import CleanupWorker, run_cleanup_worker
|
||||||
|
|
||||||
|
__all__ = ["CleanupWorker", "run_cleanup_worker"]
|
||||||
272
services/demo_session/app/jobs/cleanup_worker.py
Normal file
272
services/demo_session/app/jobs/cleanup_worker.py
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
"""
|
||||||
|
Background Cleanup Worker
|
||||||
|
Processes demo session cleanup jobs from Redis queue
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import structlog
|
||||||
|
from datetime import datetime, timezone, timedelta
|
||||||
|
from typing import Dict, Any
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
from contextlib import asynccontextmanager
|
||||||
|
|
||||||
|
from sqlalchemy import select
|
||||||
|
from sqlalchemy.ext.asyncio import AsyncSession
|
||||||
|
|
||||||
|
from app.core.database import DatabaseManager
|
||||||
|
from app.core.redis_wrapper import DemoRedisWrapper
|
||||||
|
from app.services.data_cloner import DemoDataCloner
|
||||||
|
from app.models.demo_session import DemoSession, DemoSessionStatus
|
||||||
|
|
||||||
|
logger = structlog.get_logger()
|
||||||
|
|
||||||
|
|
||||||
|
@asynccontextmanager
|
||||||
|
async def get_db_session():
|
||||||
|
"""Get database session context manager"""
|
||||||
|
db_manager = DatabaseManager()
|
||||||
|
db_manager.initialize()
|
||||||
|
async with db_manager.session_factory() as session:
|
||||||
|
try:
|
||||||
|
yield session
|
||||||
|
await session.commit()
|
||||||
|
except Exception:
|
||||||
|
await session.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
await session.close()
|
||||||
|
|
||||||
|
|
||||||
|
class CleanupWorker:
|
||||||
|
"""Background worker for processing cleanup jobs"""
|
||||||
|
|
||||||
|
def __init__(self, redis: DemoRedisWrapper):
|
||||||
|
self.redis = redis
|
||||||
|
self.queue_key = "cleanup:queue"
|
||||||
|
self.processing_key = "cleanup:processing"
|
||||||
|
self.running = False
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
"""Start the worker (runs indefinitely)"""
|
||||||
|
self.running = True
|
||||||
|
logger.info("Cleanup worker started")
|
||||||
|
|
||||||
|
while self.running:
|
||||||
|
try:
|
||||||
|
await self._process_next_job()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Worker error", error=str(e), exc_info=True)
|
||||||
|
await asyncio.sleep(5) # Back off on error
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
"""Stop the worker gracefully"""
|
||||||
|
self.running = False
|
||||||
|
logger.info("Cleanup worker stopped")
|
||||||
|
|
||||||
|
async def _process_next_job(self):
|
||||||
|
"""Process next job from queue"""
|
||||||
|
client = await self.redis.get_client()
|
||||||
|
|
||||||
|
# Blocking pop from queue (5 second timeout)
|
||||||
|
result = await client.brpoplpush(
|
||||||
|
self.queue_key,
|
||||||
|
self.processing_key,
|
||||||
|
timeout=5
|
||||||
|
)
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
return # No job available
|
||||||
|
|
||||||
|
job_data = json.loads(result)
|
||||||
|
job_id = job_data["job_id"]
|
||||||
|
session_ids = job_data["session_ids"]
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Processing cleanup job",
|
||||||
|
job_id=job_id,
|
||||||
|
session_count=len(session_ids)
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Process cleanup
|
||||||
|
stats = await self._cleanup_sessions(session_ids)
|
||||||
|
|
||||||
|
# Mark job as complete
|
||||||
|
await self._mark_job_complete(job_id, stats)
|
||||||
|
|
||||||
|
# Remove from processing queue
|
||||||
|
await client.lrem(self.processing_key, 1, result)
|
||||||
|
|
||||||
|
logger.info("Job completed", job_id=job_id, stats=stats)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Job failed", job_id=job_id, error=str(e), exc_info=True)
|
||||||
|
|
||||||
|
# Check retry count
|
||||||
|
retry_count = job_data.get("retry_count", 0)
|
||||||
|
if retry_count < 3:
|
||||||
|
# Retry - put back in queue
|
||||||
|
job_data["retry_count"] = retry_count + 1
|
||||||
|
await client.lpush(self.queue_key, json.dumps(job_data))
|
||||||
|
logger.info("Job requeued for retry", job_id=job_id, retry_count=retry_count + 1)
|
||||||
|
else:
|
||||||
|
# Max retries reached - mark as failed
|
||||||
|
await self._mark_job_failed(job_id, str(e))
|
||||||
|
logger.error("Job failed after max retries", job_id=job_id)
|
||||||
|
|
||||||
|
# Remove from processing queue
|
||||||
|
await client.lrem(self.processing_key, 1, result)
|
||||||
|
|
||||||
|
async def _cleanup_sessions(self, session_ids: list) -> Dict[str, Any]:
|
||||||
|
"""Execute cleanup for list of sessions with parallelization"""
|
||||||
|
async with get_db_session() as db:
|
||||||
|
redis = DemoRedisWrapper()
|
||||||
|
data_cloner = DemoDataCloner(db, redis)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get sessions to cleanup
|
||||||
|
result = await db.execute(
|
||||||
|
select(DemoSession).where(
|
||||||
|
DemoSession.session_id.in_(session_ids)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
sessions = result.scalars().all()
|
||||||
|
|
||||||
|
stats = {
|
||||||
|
"cleaned_up": 0,
|
||||||
|
"failed": 0,
|
||||||
|
"errors": []
|
||||||
|
}
|
||||||
|
|
||||||
|
# Process each session
|
||||||
|
for session in sessions:
|
||||||
|
try:
|
||||||
|
# Mark session as expired
|
||||||
|
session.status = DemoSessionStatus.EXPIRED
|
||||||
|
await db.commit()
|
||||||
|
|
||||||
|
# Check if this is an enterprise demo with children
|
||||||
|
child_tenant_ids = []
|
||||||
|
if session.demo_account_type == "enterprise" and session.session_metadata:
|
||||||
|
child_tenant_ids = session.session_metadata.get("child_tenant_ids", [])
|
||||||
|
|
||||||
|
# Delete child tenants in parallel (for enterprise demos)
|
||||||
|
if child_tenant_ids:
|
||||||
|
logger.info(
|
||||||
|
"Cleaning up enterprise demo children",
|
||||||
|
session_id=session.session_id,
|
||||||
|
child_count=len(child_tenant_ids)
|
||||||
|
)
|
||||||
|
child_tasks = [
|
||||||
|
data_cloner.delete_session_data(
|
||||||
|
str(child_id),
|
||||||
|
session.session_id
|
||||||
|
)
|
||||||
|
for child_id in child_tenant_ids
|
||||||
|
]
|
||||||
|
child_results = await asyncio.gather(*child_tasks, return_exceptions=True)
|
||||||
|
|
||||||
|
# Log any child deletion failures
|
||||||
|
for child_id, result in zip(child_tenant_ids, child_results):
|
||||||
|
if isinstance(result, Exception):
|
||||||
|
logger.error(
|
||||||
|
"Failed to delete child tenant",
|
||||||
|
child_id=child_id,
|
||||||
|
error=str(result)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Delete parent/main session data
|
||||||
|
await data_cloner.delete_session_data(
|
||||||
|
str(session.virtual_tenant_id),
|
||||||
|
session.session_id
|
||||||
|
)
|
||||||
|
|
||||||
|
stats["cleaned_up"] += 1
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Session cleaned up",
|
||||||
|
session_id=session.session_id,
|
||||||
|
is_enterprise=(session.demo_account_type == "enterprise"),
|
||||||
|
children_deleted=len(child_tenant_ids)
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
stats["failed"] += 1
|
||||||
|
stats["errors"].append({
|
||||||
|
"session_id": session.session_id,
|
||||||
|
"error": str(e)
|
||||||
|
})
|
||||||
|
logger.error(
|
||||||
|
"Failed to cleanup session",
|
||||||
|
session_id=session.session_id,
|
||||||
|
error=str(e),
|
||||||
|
exc_info=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return stats
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Always close HTTP client
|
||||||
|
await data_cloner.close()
|
||||||
|
|
||||||
|
async def _mark_job_complete(self, job_id: str, stats: Dict[str, Any]):
|
||||||
|
"""Mark job as complete in Redis"""
|
||||||
|
client = await self.redis.get_client()
|
||||||
|
status_key = f"cleanup:job:{job_id}:status"
|
||||||
|
await client.setex(
|
||||||
|
status_key,
|
||||||
|
3600, # Keep status for 1 hour
|
||||||
|
json.dumps({
|
||||||
|
"status": "completed",
|
||||||
|
"stats": stats,
|
||||||
|
"completed_at": datetime.now(timezone.utc).isoformat()
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _mark_job_failed(self, job_id: str, error: str):
|
||||||
|
"""Mark job as failed in Redis"""
|
||||||
|
client = await self.redis.get_client()
|
||||||
|
status_key = f"cleanup:job:{job_id}:status"
|
||||||
|
await client.setex(
|
||||||
|
status_key,
|
||||||
|
3600,
|
||||||
|
json.dumps({
|
||||||
|
"status": "failed",
|
||||||
|
"error": error,
|
||||||
|
"failed_at": datetime.now(timezone.utc).isoformat()
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def run_cleanup_worker():
|
||||||
|
"""Entry point for worker process"""
|
||||||
|
# Initialize Redis client
|
||||||
|
import os
|
||||||
|
from shared.redis_utils import initialize_redis
|
||||||
|
|
||||||
|
redis_url = os.getenv("REDIS_URL", "redis://redis-service:6379/0")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Initialize Redis with connection pool settings
|
||||||
|
await initialize_redis(redis_url, db=0, max_connections=10)
|
||||||
|
logger.info("Redis initialized successfully", redis_url=redis_url.split('@')[-1])
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Failed to initialize Redis", error=str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
redis = DemoRedisWrapper()
|
||||||
|
worker = CleanupWorker(redis)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await worker.start()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.info("Received interrupt signal")
|
||||||
|
await worker.stop()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Worker crashed", error=str(e), exc_info=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(run_cleanup_worker())
|
||||||
@@ -4,11 +4,12 @@ Clones base demo data to session-specific virtual tenants
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from sqlalchemy.ext.asyncio import AsyncSession
|
from sqlalchemy.ext.asyncio import AsyncSession
|
||||||
from typing import Dict, Any, List
|
from typing import Dict, Any, List, Optional
|
||||||
import httpx
|
import httpx
|
||||||
import structlog
|
import structlog
|
||||||
import uuid
|
import uuid
|
||||||
import os
|
import os
|
||||||
|
import asyncio
|
||||||
|
|
||||||
from app.core.redis_wrapper import DemoRedisWrapper
|
from app.core.redis_wrapper import DemoRedisWrapper
|
||||||
from app.core import settings
|
from app.core import settings
|
||||||
@@ -22,6 +23,26 @@ class DemoDataCloner:
|
|||||||
def __init__(self, db: AsyncSession, redis: DemoRedisWrapper):
|
def __init__(self, db: AsyncSession, redis: DemoRedisWrapper):
|
||||||
self.db = db
|
self.db = db
|
||||||
self.redis = redis
|
self.redis = redis
|
||||||
|
self._http_client: Optional[httpx.AsyncClient] = None
|
||||||
|
|
||||||
|
async def get_http_client(self) -> httpx.AsyncClient:
|
||||||
|
"""Get or create shared HTTP client with connection pooling"""
|
||||||
|
if self._http_client is None:
|
||||||
|
self._http_client = httpx.AsyncClient(
|
||||||
|
timeout=httpx.Timeout(30.0, connect_timeout=10.0),
|
||||||
|
limits=httpx.Limits(
|
||||||
|
max_connections=20,
|
||||||
|
max_keepalive_connections=10,
|
||||||
|
keepalive_expiry=30.0
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return self._http_client
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
"""Close HTTP client on cleanup"""
|
||||||
|
if self._http_client:
|
||||||
|
await self._http_client.aclose()
|
||||||
|
self._http_client = None
|
||||||
|
|
||||||
async def clone_tenant_data(
|
async def clone_tenant_data(
|
||||||
self,
|
self,
|
||||||
@@ -214,7 +235,7 @@ class DemoDataCloner:
|
|||||||
|
|
||||||
async def _fetch_inventory_data(self, tenant_id: str) -> Dict[str, Any]:
|
async def _fetch_inventory_data(self, tenant_id: str) -> Dict[str, Any]:
|
||||||
"""Fetch inventory data for caching"""
|
"""Fetch inventory data for caching"""
|
||||||
async with httpx.AsyncClient() as client:
|
async with httpx.AsyncClient(timeout=httpx.Timeout(15.0, connect_timeout=5.0)) as client:
|
||||||
response = await client.get(
|
response = await client.get(
|
||||||
f"{settings.INVENTORY_SERVICE_URL}/api/inventory/summary",
|
f"{settings.INVENTORY_SERVICE_URL}/api/inventory/summary",
|
||||||
headers={"X-Tenant-Id": tenant_id}
|
headers={"X-Tenant-Id": tenant_id}
|
||||||
@@ -223,7 +244,7 @@ class DemoDataCloner:
|
|||||||
|
|
||||||
async def _fetch_pos_data(self, tenant_id: str) -> Dict[str, Any]:
|
async def _fetch_pos_data(self, tenant_id: str) -> Dict[str, Any]:
|
||||||
"""Fetch POS data for caching"""
|
"""Fetch POS data for caching"""
|
||||||
async with httpx.AsyncClient() as client:
|
async with httpx.AsyncClient(timeout=httpx.Timeout(15.0, connect_timeout=5.0)) as client:
|
||||||
response = await client.get(
|
response = await client.get(
|
||||||
f"{settings.POS_SERVICE_URL}/api/pos/current-session",
|
f"{settings.POS_SERVICE_URL}/api/pos/current-session",
|
||||||
headers={"X-Tenant-Id": tenant_id}
|
headers={"X-Tenant-Id": tenant_id}
|
||||||
@@ -261,7 +282,7 @@ class DemoDataCloner:
|
|||||||
session_id: str
|
session_id: str
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Delete all data for a session
|
Delete all data for a session using parallel deletion for performance
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
virtual_tenant_id: Virtual tenant ID to delete
|
virtual_tenant_id: Virtual tenant ID to delete
|
||||||
@@ -273,29 +294,40 @@ class DemoDataCloner:
|
|||||||
session_id=session_id
|
session_id=session_id
|
||||||
)
|
)
|
||||||
|
|
||||||
# Delete from each service
|
# Get shared HTTP client for all deletions
|
||||||
# Note: Services are deleted in reverse dependency order to avoid foreign key issues
|
client = await self.get_http_client()
|
||||||
|
|
||||||
|
# Services list - all can be deleted in parallel as deletion endpoints
|
||||||
|
# handle their own internal ordering if needed
|
||||||
services = [
|
services = [
|
||||||
"forecasting", # No dependencies
|
"forecasting",
|
||||||
"sales", # Depends on inventory, recipes
|
"sales",
|
||||||
"orders", # Depends on customers (within same service)
|
"orders",
|
||||||
"production", # Depends on recipes, equipment
|
"production",
|
||||||
"inventory", # Core data (ingredients, products)
|
"inventory",
|
||||||
"recipes", # Core data
|
"recipes",
|
||||||
"suppliers", # Core data
|
"suppliers",
|
||||||
"pos", # Point of sale data
|
"pos",
|
||||||
"distribution", # Distribution routes
|
"distribution",
|
||||||
"procurement" # Procurement and purchase orders
|
"procurement"
|
||||||
]
|
]
|
||||||
|
|
||||||
for service_name in services:
|
# Create deletion tasks for all services
|
||||||
try:
|
deletion_tasks = [
|
||||||
await self._delete_service_data(service_name, virtual_tenant_id)
|
self._delete_service_data(service_name, virtual_tenant_id, client)
|
||||||
except Exception as e:
|
for service_name in services
|
||||||
|
]
|
||||||
|
|
||||||
|
# Execute all deletions in parallel with exception handling
|
||||||
|
results = await asyncio.gather(*deletion_tasks, return_exceptions=True)
|
||||||
|
|
||||||
|
# Log any failures
|
||||||
|
for service_name, result in zip(services, results):
|
||||||
|
if isinstance(result, Exception):
|
||||||
logger.error(
|
logger.error(
|
||||||
"Failed to delete service data",
|
"Failed to delete service data",
|
||||||
service=service_name,
|
service=service_name,
|
||||||
error=str(e)
|
error=str(result)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Delete from Redis
|
# Delete from Redis
|
||||||
@@ -303,16 +335,20 @@ class DemoDataCloner:
|
|||||||
|
|
||||||
logger.info("Session data deleted", virtual_tenant_id=virtual_tenant_id)
|
logger.info("Session data deleted", virtual_tenant_id=virtual_tenant_id)
|
||||||
|
|
||||||
async def _delete_service_data(self, service_name: str, virtual_tenant_id: str):
|
async def _delete_service_data(
|
||||||
"""Delete data from a specific service"""
|
self,
|
||||||
|
service_name: str,
|
||||||
|
virtual_tenant_id: str,
|
||||||
|
client: httpx.AsyncClient
|
||||||
|
):
|
||||||
|
"""Delete data from a specific service using provided HTTP client"""
|
||||||
service_url = self._get_service_url(service_name)
|
service_url = self._get_service_url(service_name)
|
||||||
|
|
||||||
# Get internal API key from settings
|
# Get internal API key from settings
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
internal_api_key = settings.INTERNAL_API_KEY
|
internal_api_key = settings.INTERNAL_API_KEY
|
||||||
|
|
||||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
await client.delete(
|
||||||
await client.delete(
|
f"{service_url}/internal/demo/tenant/{virtual_tenant_id}",
|
||||||
f"{service_url}/internal/demo/tenant/{virtual_tenant_id}",
|
headers={"X-Internal-API-Key": internal_api_key}
|
||||||
headers={"X-Internal-API-Key": internal_api_key}
|
)
|
||||||
)
|
|
||||||
|
|||||||
@@ -28,10 +28,9 @@ from shared.clients import (
|
|||||||
get_production_client,
|
get_production_client,
|
||||||
get_sales_client,
|
get_sales_client,
|
||||||
get_inventory_client,
|
get_inventory_client,
|
||||||
get_procurement_client
|
get_procurement_client,
|
||||||
|
get_distribution_client
|
||||||
)
|
)
|
||||||
# TODO: Add distribution client when available
|
|
||||||
# from shared.clients import get_distribution_client
|
|
||||||
|
|
||||||
def get_enterprise_dashboard_service() -> EnterpriseDashboardService:
|
def get_enterprise_dashboard_service() -> EnterpriseDashboardService:
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
@@ -40,7 +39,7 @@ def get_enterprise_dashboard_service() -> EnterpriseDashboardService:
|
|||||||
production_client = get_production_client(settings)
|
production_client = get_production_client(settings)
|
||||||
sales_client = get_sales_client(settings)
|
sales_client = get_sales_client(settings)
|
||||||
inventory_client = get_inventory_client(settings)
|
inventory_client = get_inventory_client(settings)
|
||||||
distribution_client = None # TODO: Add when distribution service is ready
|
distribution_client = get_distribution_client(settings)
|
||||||
procurement_client = get_procurement_client(settings)
|
procurement_client = get_procurement_client(settings)
|
||||||
|
|
||||||
return EnterpriseDashboardService(
|
return EnterpriseDashboardService(
|
||||||
|
|||||||
@@ -110,7 +110,8 @@ from shared.clients import (
|
|||||||
get_production_client,
|
get_production_client,
|
||||||
get_sales_client,
|
get_sales_client,
|
||||||
get_inventory_client,
|
get_inventory_client,
|
||||||
get_procurement_client
|
get_procurement_client,
|
||||||
|
get_distribution_client
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_enterprise_dashboard_service() -> EnterpriseDashboardService:
|
def get_enterprise_dashboard_service() -> EnterpriseDashboardService:
|
||||||
@@ -119,7 +120,7 @@ def get_enterprise_dashboard_service() -> EnterpriseDashboardService:
|
|||||||
production_client = get_production_client(settings)
|
production_client = get_production_client(settings)
|
||||||
sales_client = get_sales_client(settings)
|
sales_client = get_sales_client(settings)
|
||||||
inventory_client = get_inventory_client(settings)
|
inventory_client = get_inventory_client(settings)
|
||||||
distribution_client = None # TODO: Add when distribution service is ready
|
distribution_client = get_distribution_client(settings)
|
||||||
procurement_client = get_procurement_client(settings)
|
procurement_client = get_procurement_client(settings)
|
||||||
|
|
||||||
return EnterpriseDashboardService(
|
return EnterpriseDashboardService(
|
||||||
|
|||||||
@@ -138,13 +138,8 @@ class EnterpriseDashboardService:
|
|||||||
async def _get_production_volume(self, parent_tenant_id: str) -> float:
|
async def _get_production_volume(self, parent_tenant_id: str) -> float:
|
||||||
"""Get total production volume for the parent tenant (central production)"""
|
"""Get total production volume for the parent tenant (central production)"""
|
||||||
try:
|
try:
|
||||||
start_date = date.today() - timedelta(days=30)
|
production_summary = await self.production_client.get_dashboard_summary(
|
||||||
end_date = date.today()
|
tenant_id=parent_tenant_id
|
||||||
|
|
||||||
production_summary = await self.production_client.get_production_summary(
|
|
||||||
tenant_id=parent_tenant_id,
|
|
||||||
start_date=start_date,
|
|
||||||
end_date=end_date
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Return total production value
|
# Return total production value
|
||||||
@@ -382,6 +377,16 @@ class EnterpriseDashboardService:
|
|||||||
total_demand = 0
|
total_demand = 0
|
||||||
daily_summary = {}
|
daily_summary = {}
|
||||||
|
|
||||||
|
if not forecast_data:
|
||||||
|
logger.warning("No forecast data returned", parent_tenant_id=parent_tenant_id)
|
||||||
|
return {
|
||||||
|
'parent_tenant_id': parent_tenant_id,
|
||||||
|
'days_forecast': days_ahead,
|
||||||
|
'total_predicted_demand': 0,
|
||||||
|
'daily_summary': {},
|
||||||
|
'last_updated': datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
for forecast_date_str, products in forecast_data.get('aggregated_forecasts', {}).items():
|
for forecast_date_str, products in forecast_data.get('aggregated_forecasts', {}).items():
|
||||||
day_total = sum(item.get('predicted_demand', 0) for item in products.values())
|
day_total = sum(item.get('predicted_demand', 0) for item in products.values())
|
||||||
total_demand += day_total
|
total_demand += day_total
|
||||||
@@ -500,10 +505,8 @@ class EnterpriseDashboardService:
|
|||||||
async def _get_tenant_production(self, tenant_id: str, start_date: date, end_date: date) -> float:
|
async def _get_tenant_production(self, tenant_id: str, start_date: date, end_date: date) -> float:
|
||||||
"""Helper to get production for a specific tenant"""
|
"""Helper to get production for a specific tenant"""
|
||||||
try:
|
try:
|
||||||
production_data = await self.production_client.get_production_summary(
|
production_data = await self.production_client.get_dashboard_summary(
|
||||||
tenant_id=tenant_id,
|
tenant_id=tenant_id
|
||||||
start_date=start_date,
|
|
||||||
end_date=end_date
|
|
||||||
)
|
)
|
||||||
return float(production_data.get('total_value', 0))
|
return float(production_data.get('total_value', 0))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -171,7 +171,8 @@ async def clone_demo_data(
|
|||||||
business_model=demo_account_type,
|
business_model=demo_account_type,
|
||||||
is_active=True,
|
is_active=True,
|
||||||
timezone="Europe/Madrid",
|
timezone="Europe/Madrid",
|
||||||
owner_id=demo_owner_uuid # Required field - matches seed_demo_users.py
|
owner_id=demo_owner_uuid, # Required field - matches seed_demo_users.py
|
||||||
|
tenant_type="parent" if demo_account_type in ["enterprise", "enterprise_parent"] else "standalone"
|
||||||
)
|
)
|
||||||
|
|
||||||
db.add(tenant)
|
db.add(tenant)
|
||||||
|
|||||||
@@ -354,7 +354,9 @@ def extract_user_from_headers(request: Request) -> Optional[Dict[str, Any]]:
|
|||||||
"permissions": request.headers.get("X-User-Permissions", "").split(",") if request.headers.get("X-User-Permissions") else [],
|
"permissions": request.headers.get("X-User-Permissions", "").split(",") if request.headers.get("X-User-Permissions") else [],
|
||||||
"full_name": request.headers.get("x-user-full-name", ""),
|
"full_name": request.headers.get("x-user-full-name", ""),
|
||||||
"subscription_tier": request.headers.get("x-subscription-tier", ""),
|
"subscription_tier": request.headers.get("x-subscription-tier", ""),
|
||||||
"is_demo": request.headers.get("x-is-demo", "").lower() == "true"
|
"is_demo": request.headers.get("x-is-demo", "").lower() == "true",
|
||||||
|
"demo_session_id": request.headers.get("x-demo-session-id", ""),
|
||||||
|
"demo_account_type": request.headers.get("x-demo-account-type", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
# ✅ ADD THIS: Handle service tokens properly
|
# ✅ ADD THIS: Handle service tokens properly
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from .tenant_client import TenantServiceClient
|
|||||||
from .ai_insights_client import AIInsightsClient
|
from .ai_insights_client import AIInsightsClient
|
||||||
from .alerts_client import AlertsServiceClient
|
from .alerts_client import AlertsServiceClient
|
||||||
from .procurement_client import ProcurementServiceClient
|
from .procurement_client import ProcurementServiceClient
|
||||||
|
from .distribution_client import DistributionServiceClient
|
||||||
|
|
||||||
# Import config
|
# Import config
|
||||||
from shared.config.base import BaseServiceSettings
|
from shared.config.base import BaseServiceSettings
|
||||||
@@ -146,6 +147,16 @@ def get_procurement_client(config: BaseServiceSettings = None, service_name: str
|
|||||||
_client_cache[cache_key] = ProcurementServiceClient(config, service_name)
|
_client_cache[cache_key] = ProcurementServiceClient(config, service_name)
|
||||||
return _client_cache[cache_key]
|
return _client_cache[cache_key]
|
||||||
|
|
||||||
|
def get_distribution_client(config: BaseServiceSettings = None, service_name: str = "unknown") -> DistributionServiceClient:
|
||||||
|
"""Get or create a distribution service client"""
|
||||||
|
if config is None:
|
||||||
|
from app.core.config import settings as config
|
||||||
|
|
||||||
|
cache_key = f"distribution_{service_name}"
|
||||||
|
if cache_key not in _client_cache:
|
||||||
|
_client_cache[cache_key] = DistributionServiceClient(config, service_name)
|
||||||
|
return _client_cache[cache_key]
|
||||||
|
|
||||||
|
|
||||||
class ServiceClients:
|
class ServiceClients:
|
||||||
"""Convenient wrapper for all service clients"""
|
"""Convenient wrapper for all service clients"""
|
||||||
@@ -257,6 +268,7 @@ __all__ = [
|
|||||||
'SuppliersServiceClient',
|
'SuppliersServiceClient',
|
||||||
'AlertsServiceClient',
|
'AlertsServiceClient',
|
||||||
'TenantServiceClient',
|
'TenantServiceClient',
|
||||||
|
'DistributionServiceClient',
|
||||||
'ServiceClients',
|
'ServiceClients',
|
||||||
'get_training_client',
|
'get_training_client',
|
||||||
'get_sales_client',
|
'get_sales_client',
|
||||||
@@ -270,6 +282,7 @@ __all__ = [
|
|||||||
'get_alerts_client',
|
'get_alerts_client',
|
||||||
'get_tenant_client',
|
'get_tenant_client',
|
||||||
'get_procurement_client',
|
'get_procurement_client',
|
||||||
|
'get_distribution_client',
|
||||||
'get_service_clients',
|
'get_service_clients',
|
||||||
'create_forecast_client'
|
'create_forecast_client'
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -386,6 +386,46 @@ class ForecastServiceClient(BaseServiceClient):
|
|||||||
forecast_days=forecast_days
|
forecast_days=forecast_days
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def get_aggregated_forecast(
|
||||||
|
self,
|
||||||
|
parent_tenant_id: str,
|
||||||
|
start_date: date,
|
||||||
|
end_date: date,
|
||||||
|
product_id: Optional[str] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get aggregated forecast for enterprise tenant and all children.
|
||||||
|
|
||||||
|
This method calls the enterprise forecasting aggregation endpoint which
|
||||||
|
combines demand forecasts across the parent tenant and all child tenants
|
||||||
|
in the network. Used for centralized production planning.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
parent_tenant_id: The parent tenant (central bakery) UUID
|
||||||
|
start_date: Start date for forecast range
|
||||||
|
end_date: End date for forecast range
|
||||||
|
product_id: Optional product ID to filter forecasts
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Aggregated forecast data including:
|
||||||
|
- total_demand: Sum of all child demands
|
||||||
|
- child_contributions: Per-child demand breakdown
|
||||||
|
- forecast_date_range: Date range for the forecast
|
||||||
|
- cached: Whether data was served from Redis cache
|
||||||
|
"""
|
||||||
|
params = {
|
||||||
|
"start_date": start_date.isoformat(),
|
||||||
|
"end_date": end_date.isoformat()
|
||||||
|
}
|
||||||
|
if product_id:
|
||||||
|
params["product_id"] = product_id
|
||||||
|
|
||||||
|
return await self.get(
|
||||||
|
"forecasting/enterprise/aggregated",
|
||||||
|
tenant_id=parent_tenant_id,
|
||||||
|
params=params
|
||||||
|
)
|
||||||
|
|
||||||
async def create_forecast(
|
async def create_forecast(
|
||||||
self,
|
self,
|
||||||
tenant_id: str,
|
tenant_id: str,
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ Handles all API calls to the sales service
|
|||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
import structlog
|
import structlog
|
||||||
|
from datetime import date
|
||||||
from typing import Dict, Any, Optional, List, Union
|
from typing import Dict, Any, Optional, List, Union
|
||||||
from .base_service_client import BaseServiceClient
|
from .base_service_client import BaseServiceClient
|
||||||
from shared.config.base import BaseServiceSettings
|
from shared.config.base import BaseServiceSettings
|
||||||
@@ -183,6 +184,37 @@ class SalesServiceClient(BaseServiceClient):
|
|||||||
tenant_id=tenant_id)
|
tenant_id=tenant_id)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
async def get_sales_summary(
|
||||||
|
self,
|
||||||
|
tenant_id: str,
|
||||||
|
start_date: date,
|
||||||
|
end_date: date
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get sales summary/analytics for a tenant.
|
||||||
|
|
||||||
|
This method calls the sales analytics summary endpoint which provides
|
||||||
|
aggregated sales metrics over a date range.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tenant_id: The tenant UUID
|
||||||
|
start_date: Start date for summary range
|
||||||
|
end_date: End date for summary range
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Sales summary data including metrics like total sales, revenue, etc.
|
||||||
|
"""
|
||||||
|
params = {
|
||||||
|
"start_date": start_date.isoformat(),
|
||||||
|
"end_date": end_date.isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
return await self.get(
|
||||||
|
"sales/analytics/summary",
|
||||||
|
tenant_id=tenant_id,
|
||||||
|
params=params
|
||||||
|
)
|
||||||
|
|
||||||
# ================================================================
|
# ================================================================
|
||||||
# DATA IMPORT
|
# DATA IMPORT
|
||||||
# ================================================================
|
# ================================================================
|
||||||
|
|||||||
@@ -310,7 +310,7 @@ class TenantServiceClient(BaseServiceClient):
|
|||||||
List of child tenant dictionaries
|
List of child tenant dictionaries
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
result = await self.get(f"tenants/{parent_tenant_id}/children", tenant_id=parent_tenant_id)
|
result = await self.get("children", tenant_id=parent_tenant_id)
|
||||||
if result:
|
if result:
|
||||||
logger.info("Retrieved child tenants",
|
logger.info("Retrieved child tenants",
|
||||||
parent_tenant_id=parent_tenant_id,
|
parent_tenant_id=parent_tenant_id,
|
||||||
|
|||||||
Reference in New Issue
Block a user