Add frontend loading imporvements

This commit is contained in:
Urtzi Alfaro
2025-12-27 21:30:42 +01:00
parent 6e3a6590d6
commit 54662dde79
21 changed files with 799 additions and 363 deletions

View File

@@ -0,0 +1,133 @@
"""
Onboarding Status API
Provides lightweight onboarding status checks by aggregating counts from multiple services
"""
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
import structlog
import asyncio
import httpx
import os
from app.core.database import get_db
from app.core.config import settings
from shared.auth.decorators import get_current_tenant_id_dep
from shared.routing.route_builder import RouteBuilder
logger = structlog.get_logger()
router = APIRouter()
route_builder = RouteBuilder("tenants")
@router.get(route_builder.build_base_route("{tenant_id}/onboarding/status", include_tenant_prefix=False))
async def get_onboarding_status(
tenant_id: str,
db: AsyncSession = Depends(get_db)
):
"""
Get lightweight onboarding status by fetching counts from each service.
Returns:
- ingredients_count: Number of active ingredients
- suppliers_count: Number of active suppliers
- recipes_count: Number of active recipes
- has_minimum_setup: Boolean indicating if minimum requirements are met
- progress_percentage: Overall onboarding progress (0-100)
"""
try:
# Service URLs from environment
inventory_url = os.getenv("INVENTORY_SERVICE_URL", "http://inventory-service:8000")
suppliers_url = os.getenv("SUPPLIERS_SERVICE_URL", "http://suppliers-service:8000")
recipes_url = os.getenv("RECIPES_SERVICE_URL", "http://recipes-service:8000")
internal_api_key = settings.INTERNAL_API_KEY
# Fetch counts from all services in parallel
async with httpx.AsyncClient(timeout=10.0) as client:
results = await asyncio.gather(
client.get(
f"{inventory_url}/internal/count",
params={"tenant_id": tenant_id},
headers={"X-Internal-API-Key": internal_api_key}
),
client.get(
f"{suppliers_url}/internal/count",
params={"tenant_id": tenant_id},
headers={"X-Internal-API-Key": internal_api_key}
),
client.get(
f"{recipes_url}/internal/count",
params={"tenant_id": tenant_id},
headers={"X-Internal-API-Key": internal_api_key}
),
return_exceptions=True
)
# Extract counts with fallback to 0
ingredients_count = 0
suppliers_count = 0
recipes_count = 0
if not isinstance(results[0], Exception) and results[0].status_code == 200:
ingredients_count = results[0].json().get("count", 0)
if not isinstance(results[1], Exception) and results[1].status_code == 200:
suppliers_count = results[1].json().get("count", 0)
if not isinstance(results[2], Exception) and results[2].status_code == 200:
recipes_count = results[2].json().get("count", 0)
# Calculate minimum setup requirements
# Minimum: 3 ingredients, 1 supplier, 1 recipe
has_minimum_ingredients = ingredients_count >= 3
has_minimum_suppliers = suppliers_count >= 1
has_minimum_recipes = recipes_count >= 1
has_minimum_setup = all([
has_minimum_ingredients,
has_minimum_suppliers,
has_minimum_recipes
])
# Calculate progress percentage
# Each requirement contributes 33.33%
progress = 0
if has_minimum_ingredients:
progress += 33
if has_minimum_suppliers:
progress += 33
if has_minimum_recipes:
progress += 34
return {
"ingredients_count": ingredients_count,
"suppliers_count": suppliers_count,
"recipes_count": recipes_count,
"has_minimum_setup": has_minimum_setup,
"progress_percentage": progress,
"requirements": {
"ingredients": {
"current": ingredients_count,
"minimum": 3,
"met": has_minimum_ingredients
},
"suppliers": {
"current": suppliers_count,
"minimum": 1,
"met": has_minimum_suppliers
},
"recipes": {
"current": recipes_count,
"minimum": 1,
"met": has_minimum_recipes
}
}
}
except Exception as e:
logger.error("Failed to get onboarding status", tenant_id=tenant_id, error=str(e))
raise HTTPException(
status_code=500,
detail=f"Failed to get onboarding status: {str(e)}"
)

View File

@@ -745,10 +745,30 @@ async def get_usage_summary(
current_user: Dict[str, Any] = Depends(get_current_user_dep),
limit_service: SubscriptionLimitService = Depends(get_subscription_limit_service)
):
"""Get usage summary vs limits for a tenant"""
"""Get usage summary vs limits for a tenant (cached for 30s for performance)"""
try:
# Try to get from cache first (30s TTL)
from shared.redis_utils import get_redis_client
import json
cache_key = f"usage_summary:{tenant_id}"
redis_client = await get_redis_client()
if redis_client:
cached = await redis_client.get(cache_key)
if cached:
logger.debug("Usage summary cache hit", tenant_id=str(tenant_id))
return json.loads(cached)
# Cache miss - fetch fresh data
usage = await limit_service.get_usage_summary(str(tenant_id))
# Store in cache with 30s TTL
if redis_client:
await redis_client.setex(cache_key, 30, json.dumps(usage))
logger.debug("Usage summary cached", tenant_id=str(tenant_id))
return usage
except Exception as e:

View File

@@ -7,7 +7,7 @@ from fastapi import FastAPI
from sqlalchemy import text
from app.core.config import settings
from app.core.database import database_manager
from app.api import tenants, tenant_members, tenant_operations, webhooks, plans, subscription, tenant_settings, whatsapp_admin, usage_forecast, enterprise_upgrade, tenant_locations, tenant_hierarchy, internal_demo, network_alerts
from app.api import tenants, tenant_members, tenant_operations, webhooks, plans, subscription, tenant_settings, whatsapp_admin, usage_forecast, enterprise_upgrade, tenant_locations, tenant_hierarchy, internal_demo, network_alerts, onboarding
from shared.service_base import StandardFastAPIService
@@ -158,6 +158,7 @@ service.add_router(internal_demo.router, tags=["internal-demo"]) # Internal dem
service.add_router(tenant_hierarchy.router, tags=["tenant-hierarchy"]) # Tenant hierarchy endpoints
service.add_router(internal_demo.router, tags=["internal-demo"]) # Internal demo data cloning
service.add_router(network_alerts.router, tags=["network-alerts"]) # Network alerts aggregation endpoints
service.add_router(onboarding.router, tags=["onboarding"]) # Onboarding status endpoints
if __name__ == "__main__":
import uvicorn

View File

@@ -437,18 +437,21 @@ class SubscriptionLimitService:
current_users = len(members)
current_locations = 1 # Each tenant has one primary location
# Get current usage - Products & Inventory
current_products = await self._get_ingredient_count(tenant_id)
current_recipes = await self._get_recipe_count(tenant_id)
current_suppliers = await self._get_supplier_count(tenant_id)
# Get current usage - Products & Inventory (parallel calls for performance)
import asyncio
current_products, current_recipes, current_suppliers = await asyncio.gather(
self._get_ingredient_count(tenant_id),
self._get_recipe_count(tenant_id),
self._get_supplier_count(tenant_id)
)
# Get current usage - IA & Analytics (Redis-based daily quotas)
training_jobs_usage = await self._get_training_jobs_today(tenant_id, subscription.plan)
forecasts_usage = await self._get_forecasts_today(tenant_id, subscription.plan)
# Get current usage - API & Storage (Redis-based)
api_calls_usage = await self._get_api_calls_this_hour(tenant_id, subscription.plan)
storage_usage = await self._get_file_storage_usage_gb(tenant_id, subscription.plan)
# Get current usage - IA & Analytics + API & Storage (parallel Redis calls for performance)
training_jobs_usage, forecasts_usage, api_calls_usage, storage_usage = await asyncio.gather(
self._get_training_jobs_today(tenant_id, subscription.plan),
self._get_forecasts_today(tenant_id, subscription.plan),
self._get_api_calls_this_hour(tenant_id, subscription.plan),
self._get_file_storage_usage_gb(tenant_id, subscription.plan)
)
# Get limits from subscription
recipes_limit = await self._get_limit_from_plan(subscription.plan, 'recipes')