New alert service
This commit is contained in:
@@ -1,103 +0,0 @@
|
||||
-- Fix script for enterprise demo tenants created with old naming
|
||||
-- This fixes tenants that have:
|
||||
-- 1. Wrong owner_id (María instead of Carlos)
|
||||
-- 2. business_model = 'enterprise_parent' instead of 'enterprise'
|
||||
-- 3. Missing TenantMember records
|
||||
|
||||
-- Transaction to ensure atomicity
|
||||
BEGIN;
|
||||
|
||||
-- Carlos's user ID (correct enterprise owner)
|
||||
-- María's user ID (wrong - used for professional): c1a2b3c4-d5e6-47a8-b9c0-d1e2f3a4b5c6
|
||||
-- Carlos's user ID (correct - for enterprise): d2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7
|
||||
|
||||
-- 1. Update tenant to have correct owner and business model
|
||||
UPDATE tenants
|
||||
SET
|
||||
owner_id = 'd2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7',
|
||||
business_model = CASE
|
||||
WHEN business_model = 'enterprise_parent' THEN 'enterprise'
|
||||
WHEN business_model = 'enterprise_chain' THEN 'enterprise'
|
||||
ELSE business_model
|
||||
END
|
||||
WHERE id = '3fe07312-b325-4b40-97dd-c8d1c0a67ec7';
|
||||
|
||||
-- 2. Create TenantMember record for Carlos (owner)
|
||||
INSERT INTO tenant_members (
|
||||
id,
|
||||
tenant_id,
|
||||
user_id,
|
||||
role,
|
||||
permissions,
|
||||
is_active,
|
||||
invited_by,
|
||||
invited_at,
|
||||
joined_at,
|
||||
created_at
|
||||
) VALUES (
|
||||
gen_random_uuid(),
|
||||
'3fe07312-b325-4b40-97dd-c8d1c0a67ec7',
|
||||
'd2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7', -- Carlos
|
||||
'owner',
|
||||
'["read", "write", "admin", "delete"]',
|
||||
true,
|
||||
'd2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7', -- Self-invited
|
||||
NOW(),
|
||||
NOW(),
|
||||
NOW()
|
||||
) ON CONFLICT DO NOTHING;
|
||||
|
||||
-- 3. Create TenantMember records for enterprise staff
|
||||
-- Production Manager
|
||||
INSERT INTO tenant_members (id, tenant_id, user_id, role, permissions, is_active, invited_by, invited_at, joined_at, created_at)
|
||||
VALUES (gen_random_uuid(), '3fe07312-b325-4b40-97dd-c8d1c0a67ec7', '50000000-0000-0000-0000-000000000011', 'production_manager', '["read", "write"]', true, 'd2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7', NOW(), NOW(), NOW())
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- Quality Control
|
||||
INSERT INTO tenant_members (id, tenant_id, user_id, role, permissions, is_active, invited_by, invited_at, joined_at, created_at)
|
||||
VALUES (gen_random_uuid(), '3fe07312-b325-4b40-97dd-c8d1c0a67ec7', '50000000-0000-0000-0000-000000000012', 'quality_control', '["read", "write"]', true, 'd2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7', NOW(), NOW(), NOW())
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- Logistics
|
||||
INSERT INTO tenant_members (id, tenant_id, user_id, role, permissions, is_active, invited_by, invited_at, joined_at, created_at)
|
||||
VALUES (gen_random_uuid(), '3fe07312-b325-4b40-97dd-c8d1c0a67ec7', '50000000-0000-0000-0000-000000000013', 'logistics', '["read", "write"]', true, 'd2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7', NOW(), NOW(), NOW())
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- Sales
|
||||
INSERT INTO tenant_members (id, tenant_id, user_id, role, permissions, is_active, invited_by, invited_at, joined_at, created_at)
|
||||
VALUES (gen_random_uuid(), '3fe07312-b325-4b40-97dd-c8d1c0a67ec7', '50000000-0000-0000-0000-000000000014', 'sales', '["read", "write"]', true, 'd2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7', NOW(), NOW(), NOW())
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- Procurement
|
||||
INSERT INTO tenant_members (id, tenant_id, user_id, role, permissions, is_active, invited_by, invited_at, joined_at, created_at)
|
||||
VALUES (gen_random_uuid(), '3fe07312-b325-4b40-97dd-c8d1c0a67ec7', '50000000-0000-0000-0000-000000000015', 'procurement', '["read", "write"]', true, 'd2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7', NOW(), NOW(), NOW())
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- Maintenance
|
||||
INSERT INTO tenant_members (id, tenant_id, user_id, role, permissions, is_active, invited_by, invited_at, joined_at, created_at)
|
||||
VALUES (gen_random_uuid(), '3fe07312-b325-4b40-97dd-c8d1c0a67ec7', '50000000-0000-0000-0000-000000000016', 'maintenance', '["read", "write"]', true, 'd2e3f4a5-b6c7-48d9-e0f1-a2b3c4d5e6f7', NOW(), NOW(), NOW())
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- Verify the fix
|
||||
SELECT
|
||||
'Tenant' as type,
|
||||
id::text as id,
|
||||
name,
|
||||
business_model,
|
||||
owner_id::text as owner_id
|
||||
FROM tenants
|
||||
WHERE id = '3fe07312-b325-4b40-97dd-c8d1c0a67ec7'
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
'Member' as type,
|
||||
id::text,
|
||||
user_id::text as name,
|
||||
role as business_model,
|
||||
invited_by::text as owner_id
|
||||
FROM tenant_members
|
||||
WHERE tenant_id = '3fe07312-b325-4b40-97dd-c8d1c0a67ec7'
|
||||
ORDER BY type DESC;
|
||||
|
||||
COMMIT;
|
||||
@@ -1,271 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
One-time data migration script to populate demo_session_id for existing virtual tenants.
|
||||
|
||||
This script fixes existing demo sessions created before the demo_session_id fix was implemented.
|
||||
It links tenants to their sessions using DemoSession.virtual_tenant_id and session_metadata.child_tenant_ids.
|
||||
|
||||
Usage:
|
||||
python3 scripts/fix_existing_demo_sessions.py
|
||||
|
||||
Requirements:
|
||||
- Both demo_session and tenant services must be accessible
|
||||
- Database credentials must be available via environment variables
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import asyncpg
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Any
|
||||
from uuid import UUID
|
||||
|
||||
# Database connection URLs
|
||||
DEMO_SESSION_DB_URL = os.getenv(
|
||||
"DEMO_SESSION_DATABASE_URL",
|
||||
"postgresql://demo_session_user:demo_password@localhost:5432/demo_session_db"
|
||||
)
|
||||
TENANT_DB_URL = os.getenv(
|
||||
"TENANT_DATABASE_URL",
|
||||
"postgresql://tenant_user:T0uJnXs0r4TUmxSQeQ2DuQGP6HU0LEba@localhost:5432/tenant_db"
|
||||
)
|
||||
|
||||
|
||||
async def get_all_demo_sessions(demo_session_conn) -> List[Dict[str, Any]]:
|
||||
"""Fetch all demo sessions from demo_session database"""
|
||||
query = """
|
||||
SELECT
|
||||
id,
|
||||
session_id,
|
||||
virtual_tenant_id,
|
||||
demo_account_type,
|
||||
session_metadata,
|
||||
status,
|
||||
created_at
|
||||
FROM demo_sessions
|
||||
WHERE status IN ('ready', 'active', 'partial')
|
||||
ORDER BY created_at DESC
|
||||
"""
|
||||
|
||||
rows = await demo_session_conn.fetch(query)
|
||||
sessions = []
|
||||
|
||||
for row in rows:
|
||||
sessions.append({
|
||||
"id": row["id"],
|
||||
"session_id": row["session_id"],
|
||||
"virtual_tenant_id": row["virtual_tenant_id"],
|
||||
"demo_account_type": row["demo_account_type"],
|
||||
"session_metadata": row["session_metadata"],
|
||||
"status": row["status"],
|
||||
"created_at": row["created_at"]
|
||||
})
|
||||
|
||||
return sessions
|
||||
|
||||
|
||||
async def check_tenant_exists(tenant_conn, tenant_id: UUID) -> bool:
|
||||
"""Check if a tenant exists in the tenant database"""
|
||||
query = """
|
||||
SELECT id FROM tenants WHERE id = $1 AND is_demo = true
|
||||
"""
|
||||
|
||||
result = await tenant_conn.fetchrow(query, tenant_id)
|
||||
return result is not None
|
||||
|
||||
|
||||
async def update_tenant_session_id(tenant_conn, tenant_id: UUID, session_id: str):
|
||||
"""Update a tenant's demo_session_id"""
|
||||
query = """
|
||||
UPDATE tenants
|
||||
SET demo_session_id = $2
|
||||
WHERE id = $1 AND is_demo = true
|
||||
"""
|
||||
|
||||
await tenant_conn.execute(query, tenant_id, session_id)
|
||||
|
||||
|
||||
async def get_tenant_session_id(tenant_conn, tenant_id: UUID) -> str:
|
||||
"""Get the current demo_session_id for a tenant"""
|
||||
query = """
|
||||
SELECT demo_session_id FROM tenants WHERE id = $1 AND is_demo = true
|
||||
"""
|
||||
|
||||
result = await tenant_conn.fetchrow(query, tenant_id)
|
||||
return result["demo_session_id"] if result else None
|
||||
|
||||
|
||||
async def migrate_demo_sessions():
|
||||
"""Main migration function"""
|
||||
|
||||
print("=" * 80)
|
||||
print("Demo Session Migration Script")
|
||||
print("=" * 80)
|
||||
print(f"Started at: {datetime.now()}")
|
||||
print()
|
||||
|
||||
# Connect to both databases
|
||||
print("Connecting to databases...")
|
||||
demo_session_conn = await asyncpg.connect(DEMO_SESSION_DB_URL)
|
||||
tenant_conn = await asyncpg.connect(TENANT_DB_URL)
|
||||
print("✓ Connected to both databases")
|
||||
print()
|
||||
|
||||
try:
|
||||
# Fetch all demo sessions
|
||||
print("Fetching demo sessions...")
|
||||
sessions = await get_all_demo_sessions(demo_session_conn)
|
||||
print(f"✓ Found {len(sessions)} demo sessions")
|
||||
print()
|
||||
|
||||
# Statistics
|
||||
stats = {
|
||||
"sessions_processed": 0,
|
||||
"tenants_updated": 0,
|
||||
"tenants_already_set": 0,
|
||||
"tenants_not_found": 0,
|
||||
"errors": 0
|
||||
}
|
||||
|
||||
# Process each session
|
||||
for session in sessions:
|
||||
session_id = session["session_id"]
|
||||
virtual_tenant_id = session["virtual_tenant_id"]
|
||||
demo_account_type = session["demo_account_type"]
|
||||
session_metadata = session["session_metadata"] or {}
|
||||
|
||||
print(f"Processing session: {session_id}")
|
||||
print(f" Type: {demo_account_type}")
|
||||
print(f" Main tenant: {virtual_tenant_id}")
|
||||
|
||||
tenant_ids_to_update = [virtual_tenant_id]
|
||||
|
||||
# For enterprise sessions, also get child tenant IDs
|
||||
if demo_account_type in ["enterprise_chain", "enterprise_parent"]:
|
||||
child_tenant_ids = session_metadata.get("child_tenant_ids", [])
|
||||
if child_tenant_ids:
|
||||
# Convert string UUIDs to UUID objects
|
||||
child_uuids = [UUID(tid) if isinstance(tid, str) else tid for tid in child_tenant_ids]
|
||||
tenant_ids_to_update.extend(child_uuids)
|
||||
print(f" Child tenants: {len(child_uuids)}")
|
||||
|
||||
# Update each tenant
|
||||
session_tenants_updated = 0
|
||||
for tenant_id in tenant_ids_to_update:
|
||||
try:
|
||||
# Check if tenant exists
|
||||
exists = await check_tenant_exists(tenant_conn, tenant_id)
|
||||
if not exists:
|
||||
print(f" ⚠ Tenant {tenant_id} not found - skipping")
|
||||
stats["tenants_not_found"] += 1
|
||||
continue
|
||||
|
||||
# Check current session_id
|
||||
current_session_id = await get_tenant_session_id(tenant_conn, tenant_id)
|
||||
|
||||
if current_session_id == session_id:
|
||||
print(f" ✓ Tenant {tenant_id} already has session_id set")
|
||||
stats["tenants_already_set"] += 1
|
||||
continue
|
||||
|
||||
# Update the tenant
|
||||
await update_tenant_session_id(tenant_conn, tenant_id, session_id)
|
||||
print(f" ✓ Updated tenant {tenant_id}")
|
||||
stats["tenants_updated"] += 1
|
||||
session_tenants_updated += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Error updating tenant {tenant_id}: {e}")
|
||||
stats["errors"] += 1
|
||||
|
||||
stats["sessions_processed"] += 1
|
||||
print(f" Session complete: {session_tenants_updated} tenant(s) updated")
|
||||
print()
|
||||
|
||||
# Print summary
|
||||
print("=" * 80)
|
||||
print("Migration Complete!")
|
||||
print("=" * 80)
|
||||
print(f"Sessions processed: {stats['sessions_processed']}")
|
||||
print(f"Tenants updated: {stats['tenants_updated']}")
|
||||
print(f"Tenants already set: {stats['tenants_already_set']}")
|
||||
print(f"Tenants not found: {stats['tenants_not_found']}")
|
||||
print(f"Errors: {stats['errors']}")
|
||||
print()
|
||||
print(f"Finished at: {datetime.now()}")
|
||||
print("=" * 80)
|
||||
|
||||
# Return success status
|
||||
return stats["errors"] == 0
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Migration failed with error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
finally:
|
||||
# Close connections
|
||||
await demo_session_conn.close()
|
||||
await tenant_conn.close()
|
||||
print("Database connections closed")
|
||||
|
||||
|
||||
async def verify_migration():
|
||||
"""Verify that the migration was successful"""
|
||||
|
||||
print()
|
||||
print("=" * 80)
|
||||
print("Verification Check")
|
||||
print("=" * 80)
|
||||
|
||||
tenant_conn = await asyncpg.connect(TENANT_DB_URL)
|
||||
|
||||
try:
|
||||
# Count tenants without session_id
|
||||
query = """
|
||||
SELECT COUNT(*) as count
|
||||
FROM tenants
|
||||
WHERE is_demo = true AND demo_session_id IS NULL
|
||||
"""
|
||||
|
||||
result = await tenant_conn.fetchrow(query)
|
||||
null_count = result["count"]
|
||||
|
||||
if null_count == 0:
|
||||
print("✓ All demo tenants have demo_session_id set")
|
||||
else:
|
||||
print(f"⚠ {null_count} demo tenant(s) still have NULL demo_session_id")
|
||||
print(" These may be template tenants or orphaned records")
|
||||
|
||||
# Count tenants with session_id
|
||||
query2 = """
|
||||
SELECT COUNT(*) as count
|
||||
FROM tenants
|
||||
WHERE is_demo = true AND demo_session_id IS NOT NULL
|
||||
"""
|
||||
|
||||
result2 = await tenant_conn.fetchrow(query2)
|
||||
set_count = result2["count"]
|
||||
print(f"✓ {set_count} demo tenant(s) have demo_session_id set")
|
||||
|
||||
print("=" * 80)
|
||||
print()
|
||||
|
||||
finally:
|
||||
await tenant_conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run migration
|
||||
success = asyncio.run(migrate_demo_sessions())
|
||||
|
||||
# Run verification
|
||||
if success:
|
||||
asyncio.run(verify_migration())
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("Migration failed - see errors above")
|
||||
sys.exit(1)
|
||||
@@ -29,7 +29,7 @@ POS_URL="${POS_URL:-http://localhost:8006/api/v1/pos}"
|
||||
EXTERNAL_URL="${EXTERNAL_URL:-http://localhost:8007/api/v1/external}"
|
||||
FORECASTING_URL="${FORECASTING_URL:-http://localhost:8008/api/v1/forecasting}"
|
||||
TRAINING_URL="${TRAINING_URL:-http://localhost:8009/api/v1/training}"
|
||||
ALERT_PROCESSOR_URL="${ALERT_PROCESSOR_URL:-http://localhost:8010/api/v1/alerts}"
|
||||
ALERT_PROCESSOR_URL="${ALERT_PROCESSOR_URL:-http://localhost:8000/api/v1/alerts}"
|
||||
NOTIFICATION_URL="${NOTIFICATION_URL:-http://localhost:8011/api/v1/notifications}"
|
||||
|
||||
# Test results
|
||||
|
||||
@@ -1,214 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Daily Usage Tracker - Cron Job Script
|
||||
|
||||
Tracks daily usage snapshots for all active tenants to enable trend forecasting.
|
||||
Stores data in Redis with 60-day retention for predictive analytics.
|
||||
|
||||
Schedule: Run daily at 2 AM
|
||||
Crontab: 0 2 * * * /usr/bin/python3 /path/to/scripts/track_daily_usage.py >> /var/log/usage_tracking.log 2>&1
|
||||
|
||||
Or use Kubernetes CronJob (see deployment checklist).
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
# Add parent directory to path to import from services
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from sqlalchemy import select, func
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
# Import from tenant service
|
||||
from services.tenant.app.core.database import database_manager
|
||||
from services.tenant.app.models.tenants import Tenant, Subscription, TenantMember
|
||||
from services.tenant.app.api.usage_forecast import track_usage_snapshot
|
||||
from services.tenant.app.core.redis_client import get_redis_client
|
||||
|
||||
# Import models for counting (adjust these imports based on your actual model locations)
|
||||
# You may need to update these imports based on your project structure
|
||||
try:
|
||||
from services.inventory.app.models import Product
|
||||
from services.inventory.app.models import Location
|
||||
from services.inventory.app.models import Recipe
|
||||
from services.inventory.app.models import Supplier
|
||||
except ImportError:
|
||||
# Fallback: If models are in different locations, you'll need to update these
|
||||
print("Warning: Could not import all models. Some usage metrics may not be tracked.")
|
||||
Product = None
|
||||
Location = None
|
||||
Recipe = None
|
||||
Supplier = None
|
||||
|
||||
|
||||
async def get_tenant_current_usage(session: AsyncSession, tenant_id: str) -> dict:
|
||||
"""
|
||||
Get current usage counts for a tenant across all metrics.
|
||||
|
||||
This queries the actual database to get real-time counts.
|
||||
"""
|
||||
usage = {}
|
||||
|
||||
try:
|
||||
# Products count
|
||||
result = await session.execute(
|
||||
select(func.count()).select_from(Product).where(Product.tenant_id == tenant_id)
|
||||
)
|
||||
usage['products'] = result.scalar() or 0
|
||||
|
||||
# Users count
|
||||
result = await session.execute(
|
||||
select(func.count()).select_from(TenantMember).where(TenantMember.tenant_id == tenant_id)
|
||||
)
|
||||
usage['users'] = result.scalar() or 0
|
||||
|
||||
# Locations count
|
||||
result = await session.execute(
|
||||
select(func.count()).select_from(Location).where(Location.tenant_id == tenant_id)
|
||||
)
|
||||
usage['locations'] = result.scalar() or 0
|
||||
|
||||
# Recipes count
|
||||
result = await session.execute(
|
||||
select(func.count()).select_from(Recipe).where(Recipe.tenant_id == tenant_id)
|
||||
)
|
||||
usage['recipes'] = result.scalar() or 0
|
||||
|
||||
# Suppliers count
|
||||
result = await session.execute(
|
||||
select(func.count()).select_from(Supplier).where(Supplier.tenant_id == tenant_id)
|
||||
)
|
||||
usage['suppliers'] = result.scalar() or 0
|
||||
|
||||
# Training jobs today (from Redis)
|
||||
redis = await get_redis_client()
|
||||
today_key = f"quota:training_jobs:{tenant_id}:{datetime.now(timezone.utc).strftime('%Y-%m-%d')}"
|
||||
training_count = await redis.get(today_key)
|
||||
usage['training_jobs'] = int(training_count) if training_count else 0
|
||||
|
||||
# Forecasts today (from Redis)
|
||||
forecast_key = f"quota:forecasts:{tenant_id}:{datetime.now(timezone.utc).strftime('%Y-%m-%d')}"
|
||||
forecast_count = await redis.get(forecast_key)
|
||||
usage['forecasts'] = int(forecast_count) if forecast_count else 0
|
||||
|
||||
# Storage (placeholder - implement based on your file storage system)
|
||||
# For now, set to 0. Replace with actual storage calculation.
|
||||
usage['storage'] = 0.0
|
||||
|
||||
# API calls this hour (from Redis)
|
||||
hour_key = f"quota:api_calls:{tenant_id}:{datetime.now(timezone.utc).strftime('%Y-%m-%d-%H')}"
|
||||
api_count = await redis.get(hour_key)
|
||||
usage['api_calls'] = int(api_count) if api_count else 0
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error getting usage for tenant {tenant_id}: {e}")
|
||||
# Return empty dict on error
|
||||
return {}
|
||||
|
||||
return usage
|
||||
|
||||
|
||||
async def track_all_tenants():
|
||||
"""
|
||||
Main function to track usage for all active tenants.
|
||||
"""
|
||||
start_time = datetime.now(timezone.utc)
|
||||
print(f"[{start_time}] Starting daily usage tracking")
|
||||
|
||||
try:
|
||||
# Get database session
|
||||
async with database_manager.get_session() as session:
|
||||
# Query all active tenants
|
||||
result = await session.execute(
|
||||
select(Tenant, Subscription)
|
||||
.join(Subscription, Tenant.id == Subscription.tenant_id)
|
||||
.where(Tenant.is_active == True)
|
||||
.where(Subscription.status.in_(['active', 'trialing', 'cancelled']))
|
||||
)
|
||||
|
||||
tenants_data = result.all()
|
||||
total_tenants = len(tenants_data)
|
||||
print(f"Found {total_tenants} active tenants to track")
|
||||
|
||||
success_count = 0
|
||||
error_count = 0
|
||||
|
||||
# Process each tenant
|
||||
for tenant, subscription in tenants_data:
|
||||
try:
|
||||
# Get current usage for this tenant
|
||||
usage = await get_tenant_current_usage(session, tenant.id)
|
||||
|
||||
if not usage:
|
||||
print(f" ⚠️ {tenant.id}: No usage data available")
|
||||
error_count += 1
|
||||
continue
|
||||
|
||||
# Track each metric
|
||||
metrics_tracked = 0
|
||||
for metric_name, value in usage.items():
|
||||
try:
|
||||
await track_usage_snapshot(
|
||||
tenant_id=tenant.id,
|
||||
metric=metric_name,
|
||||
value=value
|
||||
)
|
||||
metrics_tracked += 1
|
||||
except Exception as e:
|
||||
print(f" ❌ {tenant.id} - {metric_name}: Error tracking - {e}")
|
||||
|
||||
print(f" ✅ {tenant.id}: Tracked {metrics_tracked} metrics")
|
||||
success_count += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ {tenant.id}: Error processing tenant - {e}")
|
||||
error_count += 1
|
||||
continue
|
||||
|
||||
# Summary
|
||||
end_time = datetime.now(timezone.utc)
|
||||
duration = (end_time - start_time).total_seconds()
|
||||
|
||||
print("\n" + "="*60)
|
||||
print(f"Daily Usage Tracking Complete")
|
||||
print(f"Started: {start_time.strftime('%Y-%m-%d %H:%M:%S UTC')}")
|
||||
print(f"Finished: {end_time.strftime('%Y-%m-%d %H:%M:%S UTC')}")
|
||||
print(f"Duration: {duration:.2f}s")
|
||||
print(f"Tenants: {total_tenants} total")
|
||||
print(f"Success: {success_count} tenants tracked")
|
||||
print(f"Errors: {error_count} tenants failed")
|
||||
print("="*60)
|
||||
|
||||
# Exit with error code if any failures
|
||||
if error_count > 0:
|
||||
sys.exit(1)
|
||||
else:
|
||||
sys.exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print(f"FATAL ERROR: Failed to track usage - {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def main():
|
||||
"""Entry point"""
|
||||
try:
|
||||
asyncio.run(track_all_tenants())
|
||||
except KeyboardInterrupt:
|
||||
print("\n⚠️ Interrupted by user")
|
||||
sys.exit(130)
|
||||
except Exception as e:
|
||||
print(f"FATAL ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user