2025-11-19 21:01:06 +01:00
|
|
|
"""
|
|
|
|
|
Usage Forecasting API
|
|
|
|
|
|
|
|
|
|
This endpoint predicts when a tenant will hit their subscription limits
|
|
|
|
|
based on historical usage growth rates.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
from datetime import datetime, timedelta
|
|
|
|
|
from typing import List, Optional
|
|
|
|
|
from fastapi import APIRouter, Depends, HTTPException, Query
|
|
|
|
|
from pydantic import BaseModel
|
|
|
|
|
import redis.asyncio as redis
|
|
|
|
|
|
|
|
|
|
from shared.auth.decorators import get_current_user_dep
|
|
|
|
|
from app.core.config import settings
|
2026-01-11 19:38:54 +01:00
|
|
|
from app.core.database import database_manager
|
2025-11-19 21:01:06 +01:00
|
|
|
from app.services.subscription_limit_service import SubscriptionLimitService
|
|
|
|
|
|
|
|
|
|
router = APIRouter(prefix="/usage-forecast", tags=["usage-forecast"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class UsageDataPoint(BaseModel):
|
|
|
|
|
"""Single usage data point"""
|
|
|
|
|
date: str
|
|
|
|
|
value: int
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class MetricForecast(BaseModel):
|
|
|
|
|
"""Forecast for a single metric"""
|
|
|
|
|
metric: str
|
|
|
|
|
label: str
|
|
|
|
|
current: int
|
|
|
|
|
limit: Optional[int] # None = unlimited
|
|
|
|
|
unit: str
|
|
|
|
|
daily_growth_rate: Optional[float] # None if not enough data
|
|
|
|
|
predicted_breach_date: Optional[str] # ISO date string, None if unlimited or no breach
|
|
|
|
|
days_until_breach: Optional[int] # None if unlimited or no breach
|
|
|
|
|
usage_percentage: float
|
|
|
|
|
status: str # 'safe', 'warning', 'critical', 'unlimited'
|
|
|
|
|
trend_data: List[UsageDataPoint] # 30-day history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class UsageForecastResponse(BaseModel):
|
|
|
|
|
"""Complete usage forecast response"""
|
|
|
|
|
tenant_id: str
|
|
|
|
|
forecasted_at: str
|
|
|
|
|
metrics: List[MetricForecast]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def get_redis_client() -> redis.Redis:
|
|
|
|
|
"""Get Redis client for usage tracking"""
|
|
|
|
|
return redis.from_url(
|
|
|
|
|
settings.REDIS_URL,
|
|
|
|
|
encoding="utf-8",
|
|
|
|
|
decode_responses=True
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def get_usage_history(
|
|
|
|
|
redis_client: redis.Redis,
|
|
|
|
|
tenant_id: str,
|
|
|
|
|
metric: str,
|
|
|
|
|
days: int = 30
|
|
|
|
|
) -> List[UsageDataPoint]:
|
|
|
|
|
"""
|
|
|
|
|
Get historical usage data for a metric from Redis
|
|
|
|
|
|
|
|
|
|
Usage data is stored with keys like:
|
|
|
|
|
usage:daily:{tenant_id}:{metric}:{date}
|
|
|
|
|
"""
|
|
|
|
|
history = []
|
|
|
|
|
today = datetime.utcnow().date()
|
|
|
|
|
|
|
|
|
|
for i in range(days):
|
|
|
|
|
date = today - timedelta(days=i)
|
|
|
|
|
date_str = date.isoformat()
|
|
|
|
|
key = f"usage:daily:{tenant_id}:{metric}:{date_str}"
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
value = await redis_client.get(key)
|
|
|
|
|
if value is not None:
|
|
|
|
|
history.append(UsageDataPoint(
|
|
|
|
|
date=date_str,
|
|
|
|
|
value=int(value)
|
|
|
|
|
))
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"Error fetching usage for {key}: {e}")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Return in chronological order (oldest first)
|
|
|
|
|
return list(reversed(history))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def calculate_growth_rate(history: List[UsageDataPoint]) -> Optional[float]:
|
|
|
|
|
"""
|
|
|
|
|
Calculate daily growth rate using linear regression
|
|
|
|
|
|
|
|
|
|
Returns average daily increase, or None if insufficient data
|
|
|
|
|
"""
|
|
|
|
|
if len(history) < 7: # Need at least 7 days of data
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
# Simple linear regression
|
|
|
|
|
n = len(history)
|
|
|
|
|
sum_x = sum(range(n))
|
|
|
|
|
sum_y = sum(point.value for point in history)
|
|
|
|
|
sum_xy = sum(i * point.value for i, point in enumerate(history))
|
|
|
|
|
sum_x_squared = sum(i * i for i in range(n))
|
|
|
|
|
|
|
|
|
|
# Calculate slope (daily growth rate)
|
|
|
|
|
denominator = (n * sum_x_squared) - (sum_x ** 2)
|
|
|
|
|
if denominator == 0:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
slope = ((n * sum_xy) - (sum_x * sum_y)) / denominator
|
|
|
|
|
|
|
|
|
|
return max(slope, 0) # Can't have negative growth for breach prediction
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def predict_breach_date(
|
|
|
|
|
current: int,
|
|
|
|
|
limit: int,
|
|
|
|
|
daily_growth_rate: float
|
|
|
|
|
) -> Optional[tuple[str, int]]:
|
|
|
|
|
"""
|
|
|
|
|
Predict when usage will breach the limit
|
|
|
|
|
|
|
|
|
|
Returns (breach_date_iso, days_until_breach) or None if no breach predicted
|
|
|
|
|
"""
|
|
|
|
|
if daily_growth_rate <= 0:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
remaining_capacity = limit - current
|
|
|
|
|
if remaining_capacity <= 0:
|
|
|
|
|
# Already at or over limit
|
|
|
|
|
return datetime.utcnow().date().isoformat(), 0
|
|
|
|
|
|
|
|
|
|
days_until_breach = int(remaining_capacity / daily_growth_rate)
|
|
|
|
|
|
|
|
|
|
if days_until_breach > 365: # Don't predict beyond 1 year
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
breach_date = datetime.utcnow().date() + timedelta(days=days_until_breach)
|
|
|
|
|
|
|
|
|
|
return breach_date.isoformat(), days_until_breach
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def determine_status(usage_percentage: float, days_until_breach: Optional[int]) -> str:
|
|
|
|
|
"""Determine metric status based on usage and time to breach"""
|
|
|
|
|
if usage_percentage >= 100:
|
|
|
|
|
return 'critical'
|
|
|
|
|
elif usage_percentage >= 90:
|
|
|
|
|
return 'critical'
|
|
|
|
|
elif usage_percentage >= 80 or (days_until_breach is not None and days_until_breach <= 14):
|
|
|
|
|
return 'warning'
|
|
|
|
|
else:
|
|
|
|
|
return 'safe'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.get("", response_model=UsageForecastResponse)
|
|
|
|
|
async def get_usage_forecast(
|
|
|
|
|
tenant_id: str = Query(..., description="Tenant ID"),
|
|
|
|
|
current_user: dict = Depends(get_current_user_dep)
|
|
|
|
|
) -> UsageForecastResponse:
|
|
|
|
|
"""
|
|
|
|
|
Get usage forecasts for all metrics
|
|
|
|
|
|
|
|
|
|
Predicts when the tenant will hit their subscription limits based on
|
|
|
|
|
historical usage growth rates from the past 30 days.
|
|
|
|
|
|
|
|
|
|
Returns predictions for:
|
|
|
|
|
- Users
|
|
|
|
|
- Locations
|
|
|
|
|
- Products
|
|
|
|
|
- Recipes
|
|
|
|
|
- Suppliers
|
|
|
|
|
- Training jobs (daily)
|
|
|
|
|
- Forecasts (daily)
|
|
|
|
|
- API calls (hourly average converted to daily)
|
|
|
|
|
- File storage
|
|
|
|
|
"""
|
|
|
|
|
# Initialize services
|
|
|
|
|
redis_client = await get_redis_client()
|
2026-01-11 19:38:54 +01:00
|
|
|
limit_service = SubscriptionLimitService(database_manager=database_manager)
|
2025-11-19 21:01:06 +01:00
|
|
|
|
|
|
|
|
try:
|
2026-01-11 19:38:54 +01:00
|
|
|
# Get current usage summary (includes limits)
|
2025-11-19 21:01:06 +01:00
|
|
|
usage_summary = await limit_service.get_usage_summary(tenant_id)
|
|
|
|
|
|
2026-01-11 19:38:54 +01:00
|
|
|
if not usage_summary or 'error' in usage_summary:
|
2025-11-19 21:01:06 +01:00
|
|
|
raise HTTPException(
|
|
|
|
|
status_code=404,
|
|
|
|
|
detail=f"No active subscription found for tenant {tenant_id}"
|
|
|
|
|
)
|
|
|
|
|
|
2026-01-11 19:38:54 +01:00
|
|
|
# Extract usage data
|
|
|
|
|
usage = usage_summary.get('usage', {})
|
|
|
|
|
|
2025-11-19 21:01:06 +01:00
|
|
|
# Define metrics to forecast
|
|
|
|
|
metric_configs = [
|
|
|
|
|
{
|
|
|
|
|
'key': 'users',
|
|
|
|
|
'label': 'Users',
|
2026-01-11 19:38:54 +01:00
|
|
|
'current': usage.get('users', {}).get('current', 0),
|
|
|
|
|
'limit': usage.get('users', {}).get('limit'),
|
2025-11-19 21:01:06 +01:00
|
|
|
'unit': ''
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
'key': 'locations',
|
|
|
|
|
'label': 'Locations',
|
2026-01-11 19:38:54 +01:00
|
|
|
'current': usage.get('locations', {}).get('current', 0),
|
|
|
|
|
'limit': usage.get('locations', {}).get('limit'),
|
2025-11-19 21:01:06 +01:00
|
|
|
'unit': ''
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
'key': 'products',
|
|
|
|
|
'label': 'Products',
|
2026-01-11 19:38:54 +01:00
|
|
|
'current': usage.get('products', {}).get('current', 0),
|
|
|
|
|
'limit': usage.get('products', {}).get('limit'),
|
2025-11-19 21:01:06 +01:00
|
|
|
'unit': ''
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
'key': 'recipes',
|
|
|
|
|
'label': 'Recipes',
|
2026-01-11 19:38:54 +01:00
|
|
|
'current': usage.get('recipes', {}).get('current', 0),
|
|
|
|
|
'limit': usage.get('recipes', {}).get('limit'),
|
2025-11-19 21:01:06 +01:00
|
|
|
'unit': ''
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
'key': 'suppliers',
|
|
|
|
|
'label': 'Suppliers',
|
2026-01-11 19:38:54 +01:00
|
|
|
'current': usage.get('suppliers', {}).get('current', 0),
|
|
|
|
|
'limit': usage.get('suppliers', {}).get('limit'),
|
2025-11-19 21:01:06 +01:00
|
|
|
'unit': ''
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
'key': 'training_jobs',
|
|
|
|
|
'label': 'Training Jobs',
|
2026-01-11 19:38:54 +01:00
|
|
|
'current': usage.get('training_jobs_today', {}).get('current', 0),
|
|
|
|
|
'limit': usage.get('training_jobs_today', {}).get('limit'),
|
2025-11-19 21:01:06 +01:00
|
|
|
'unit': '/day'
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
'key': 'forecasts',
|
|
|
|
|
'label': 'Forecasts',
|
2026-01-11 19:38:54 +01:00
|
|
|
'current': usage.get('forecasts_today', {}).get('current', 0),
|
|
|
|
|
'limit': usage.get('forecasts_today', {}).get('limit'),
|
2025-11-19 21:01:06 +01:00
|
|
|
'unit': '/day'
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
'key': 'api_calls',
|
|
|
|
|
'label': 'API Calls',
|
2026-01-11 19:38:54 +01:00
|
|
|
'current': usage.get('api_calls_this_hour', {}).get('current', 0),
|
|
|
|
|
'limit': usage.get('api_calls_this_hour', {}).get('limit'),
|
2025-11-19 21:01:06 +01:00
|
|
|
'unit': '/hour'
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
'key': 'storage',
|
|
|
|
|
'label': 'File Storage',
|
2026-01-11 19:38:54 +01:00
|
|
|
'current': int(usage.get('file_storage_used_gb', {}).get('current', 0)),
|
|
|
|
|
'limit': usage.get('file_storage_used_gb', {}).get('limit'),
|
2025-11-19 21:01:06 +01:00
|
|
|
'unit': ' GB'
|
|
|
|
|
}
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
forecasts: List[MetricForecast] = []
|
|
|
|
|
|
|
|
|
|
for config in metric_configs:
|
|
|
|
|
metric_key = config['key']
|
|
|
|
|
current = config['current']
|
|
|
|
|
limit = config['limit']
|
|
|
|
|
|
|
|
|
|
# Get usage history
|
|
|
|
|
history = await get_usage_history(redis_client, tenant_id, metric_key, days=30)
|
|
|
|
|
|
|
|
|
|
# Calculate usage percentage
|
|
|
|
|
if limit is None or limit == -1:
|
|
|
|
|
usage_percentage = 0.0
|
|
|
|
|
status = 'unlimited'
|
|
|
|
|
growth_rate = None
|
|
|
|
|
breach_date = None
|
|
|
|
|
days_until = None
|
|
|
|
|
else:
|
|
|
|
|
usage_percentage = (current / limit * 100) if limit > 0 else 0
|
|
|
|
|
|
|
|
|
|
# Calculate growth rate
|
|
|
|
|
growth_rate = calculate_growth_rate(history) if history else None
|
|
|
|
|
|
|
|
|
|
# Predict breach
|
|
|
|
|
if growth_rate is not None and growth_rate > 0:
|
|
|
|
|
breach_result = predict_breach_date(current, limit, growth_rate)
|
|
|
|
|
if breach_result:
|
|
|
|
|
breach_date, days_until = breach_result
|
|
|
|
|
else:
|
|
|
|
|
breach_date, days_until = None, None
|
|
|
|
|
else:
|
|
|
|
|
breach_date, days_until = None, None
|
|
|
|
|
|
|
|
|
|
# Determine status
|
|
|
|
|
status = determine_status(usage_percentage, days_until)
|
|
|
|
|
|
|
|
|
|
forecasts.append(MetricForecast(
|
|
|
|
|
metric=metric_key,
|
|
|
|
|
label=config['label'],
|
|
|
|
|
current=current,
|
|
|
|
|
limit=limit,
|
|
|
|
|
unit=config['unit'],
|
|
|
|
|
daily_growth_rate=growth_rate,
|
|
|
|
|
predicted_breach_date=breach_date,
|
|
|
|
|
days_until_breach=days_until,
|
|
|
|
|
usage_percentage=round(usage_percentage, 1),
|
|
|
|
|
status=status,
|
|
|
|
|
trend_data=history[-30:] # Last 30 days
|
|
|
|
|
))
|
|
|
|
|
|
|
|
|
|
return UsageForecastResponse(
|
|
|
|
|
tenant_id=tenant_id,
|
|
|
|
|
forecasted_at=datetime.utcnow().isoformat(),
|
|
|
|
|
metrics=forecasts
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
finally:
|
|
|
|
|
await redis_client.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.post("/track-usage")
|
|
|
|
|
async def track_daily_usage(
|
|
|
|
|
tenant_id: str,
|
|
|
|
|
metric: str,
|
|
|
|
|
value: int,
|
|
|
|
|
current_user: dict = Depends(get_current_user_dep)
|
|
|
|
|
):
|
|
|
|
|
"""
|
|
|
|
|
Manually track daily usage for a metric
|
|
|
|
|
|
|
|
|
|
This endpoint is called by services to record daily usage snapshots.
|
|
|
|
|
The data is stored in Redis with a 60-day TTL.
|
|
|
|
|
"""
|
|
|
|
|
redis_client = await get_redis_client()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
date_str = datetime.utcnow().date().isoformat()
|
|
|
|
|
key = f"usage:daily:{tenant_id}:{metric}:{date_str}"
|
|
|
|
|
|
|
|
|
|
# Store usage with 60-day TTL
|
|
|
|
|
await redis_client.setex(key, 60 * 24 * 60 * 60, str(value))
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
"success": True,
|
|
|
|
|
"tenant_id": tenant_id,
|
|
|
|
|
"metric": metric,
|
|
|
|
|
"value": value,
|
|
|
|
|
"date": date_str
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
finally:
|
|
|
|
|
await redis_client.close()
|