Improve AI logic
This commit is contained in:
@@ -51,11 +51,22 @@ class BaseAlertService:
|
||||
redis_url = self.config.REDIS_URL
|
||||
|
||||
# Create Redis client from URL (supports TLS via rediss:// protocol)
|
||||
self.redis = await from_url(
|
||||
redis_url,
|
||||
decode_responses=True,
|
||||
max_connections=20
|
||||
)
|
||||
# For self-signed certificates, disable SSL verification
|
||||
redis_kwargs = {
|
||||
'decode_responses': True,
|
||||
'max_connections': 20
|
||||
}
|
||||
|
||||
# If using SSL/TLS, add SSL parameters to handle self-signed certificates
|
||||
if redis_url.startswith('rediss://'):
|
||||
redis_kwargs.update({
|
||||
'ssl_cert_reqs': None, # Disable certificate verification
|
||||
'ssl_ca_certs': None, # Don't require CA certificates
|
||||
'ssl_certfile': None, # Don't require client cert
|
||||
'ssl_keyfile': None # Don't require client key
|
||||
})
|
||||
|
||||
self.redis = await from_url(redis_url, **redis_kwargs)
|
||||
logger.info("Connected to Redis", service=self.config.SERVICE_NAME, redis_url=redis_url.split("@")[-1])
|
||||
|
||||
# Connect to RabbitMQ
|
||||
|
||||
@@ -16,6 +16,7 @@ from .production_client import ProductionServiceClient
|
||||
from .recipes_client import RecipesServiceClient
|
||||
from .suppliers_client import SuppliersServiceClient
|
||||
from .tenant_client import TenantServiceClient
|
||||
from .ai_insights_client import AIInsightsClient
|
||||
|
||||
# Import config
|
||||
from shared.config.base import BaseServiceSettings
|
||||
|
||||
391
shared/clients/ai_insights_client.py
Normal file
391
shared/clients/ai_insights_client.py
Normal file
@@ -0,0 +1,391 @@
|
||||
"""
|
||||
AI Insights Service HTTP Client
|
||||
Shared client for all services to post and retrieve AI insights
|
||||
"""
|
||||
|
||||
import httpx
|
||||
from typing import Dict, List, Any, Optional
|
||||
from uuid import UUID
|
||||
import structlog
|
||||
from datetime import datetime
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class AIInsightsClient:
|
||||
"""
|
||||
HTTP client for AI Insights Service.
|
||||
Allows services to post insights, retrieve orchestration-ready insights, and record feedback.
|
||||
"""
|
||||
|
||||
def __init__(self, base_url: str, timeout: int = 30):
|
||||
"""
|
||||
Initialize AI Insights client.
|
||||
|
||||
Args:
|
||||
base_url: Base URL of AI Insights Service (e.g., http://ai-insights-service:8000)
|
||||
timeout: Request timeout in seconds
|
||||
"""
|
||||
self.base_url = base_url.rstrip('/')
|
||||
self.timeout = timeout
|
||||
self.client = httpx.AsyncClient(timeout=self.timeout)
|
||||
|
||||
async def close(self):
|
||||
"""Close the HTTP client."""
|
||||
await self.client.aclose()
|
||||
|
||||
async def create_insight(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
insight_data: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Create a new insight in AI Insights Service.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
insight_data: Insight data dictionary with fields:
|
||||
- type: str (optimization, alert, prediction, recommendation, insight, anomaly)
|
||||
- priority: str (low, medium, high, critical)
|
||||
- category: str (forecasting, procurement, production, inventory, etc.)
|
||||
- title: str
|
||||
- description: str
|
||||
- impact_type: str
|
||||
- impact_value: float
|
||||
- impact_unit: str
|
||||
- confidence: int (0-100)
|
||||
- metrics_json: dict
|
||||
- actionable: bool
|
||||
- recommendation_actions: list (optional)
|
||||
- source_service: str
|
||||
- source_model: str (optional)
|
||||
|
||||
Returns:
|
||||
Created insight dict or None if failed
|
||||
"""
|
||||
url = f"{self.base_url}/api/v1/tenants/{tenant_id}/insights"
|
||||
|
||||
try:
|
||||
# Ensure tenant_id is in the data
|
||||
insight_data['tenant_id'] = str(tenant_id)
|
||||
|
||||
response = await self.client.post(url, json=insight_data)
|
||||
|
||||
if response.status_code == 201:
|
||||
logger.info(
|
||||
"Insight created successfully",
|
||||
tenant_id=str(tenant_id),
|
||||
insight_title=insight_data.get('title')
|
||||
)
|
||||
return response.json()
|
||||
else:
|
||||
logger.error(
|
||||
"Failed to create insight",
|
||||
status_code=response.status_code,
|
||||
response=response.text,
|
||||
insight_title=insight_data.get('title')
|
||||
)
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error creating insight",
|
||||
error=str(e),
|
||||
tenant_id=str(tenant_id)
|
||||
)
|
||||
return None
|
||||
|
||||
async def create_insights_bulk(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
insights: List[Dict[str, Any]]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create multiple insights in bulk.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
insights: List of insight data dictionaries
|
||||
|
||||
Returns:
|
||||
Dictionary with success/failure counts
|
||||
"""
|
||||
results = {
|
||||
'total': len(insights),
|
||||
'successful': 0,
|
||||
'failed': 0,
|
||||
'created_insights': []
|
||||
}
|
||||
|
||||
for insight_data in insights:
|
||||
result = await self.create_insight(tenant_id, insight_data)
|
||||
if result:
|
||||
results['successful'] += 1
|
||||
results['created_insights'].append(result)
|
||||
else:
|
||||
results['failed'] += 1
|
||||
|
||||
logger.info(
|
||||
"Bulk insight creation complete",
|
||||
total=results['total'],
|
||||
successful=results['successful'],
|
||||
failed=results['failed']
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
async def get_insights(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
filters: Optional[Dict[str, Any]] = None
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get insights for a tenant.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
filters: Optional filters:
|
||||
- category: str
|
||||
- priority: str
|
||||
- actionable_only: bool
|
||||
- min_confidence: int
|
||||
- page: int
|
||||
- page_size: int
|
||||
|
||||
Returns:
|
||||
Paginated insights response or None if failed
|
||||
"""
|
||||
url = f"{self.base_url}/api/v1/tenants/{tenant_id}/insights"
|
||||
|
||||
try:
|
||||
response = await self.client.get(url, params=filters or {})
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
logger.error(
|
||||
"Failed to get insights",
|
||||
status_code=response.status_code
|
||||
)
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting insights", error=str(e))
|
||||
return None
|
||||
|
||||
async def get_orchestration_ready_insights(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
target_date: datetime,
|
||||
min_confidence: int = 70
|
||||
) -> Optional[Dict[str, List[Dict[str, Any]]]]:
|
||||
"""
|
||||
Get insights ready for orchestration workflow.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
target_date: Target date for orchestration
|
||||
min_confidence: Minimum confidence threshold
|
||||
|
||||
Returns:
|
||||
Categorized insights or None if failed:
|
||||
{
|
||||
"forecast_adjustments": [...],
|
||||
"procurement_recommendations": [...],
|
||||
"production_adjustments": [...],
|
||||
"inventory_optimization": [...],
|
||||
"risk_alerts": [...]
|
||||
}
|
||||
"""
|
||||
url = f"{self.base_url}/api/v1/tenants/{tenant_id}/insights/orchestration-ready"
|
||||
|
||||
params = {
|
||||
'target_date': target_date.isoformat(),
|
||||
'min_confidence': min_confidence
|
||||
}
|
||||
|
||||
try:
|
||||
response = await self.client.get(url, params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
logger.error(
|
||||
"Failed to get orchestration insights",
|
||||
status_code=response.status_code
|
||||
)
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting orchestration insights", error=str(e))
|
||||
return None
|
||||
|
||||
async def record_feedback(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
insight_id: UUID,
|
||||
feedback_data: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Record feedback for an applied insight.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
insight_id: Insight UUID
|
||||
feedback_data: Feedback data with fields:
|
||||
- success: bool
|
||||
- applied_at: datetime (optional)
|
||||
- actual_impact_value: float (optional)
|
||||
- actual_impact_unit: str (optional)
|
||||
- notes: str (optional)
|
||||
|
||||
Returns:
|
||||
Feedback response or None if failed
|
||||
"""
|
||||
url = f"{self.base_url}/api/v1/tenants/{tenant_id}/insights/{insight_id}/feedback"
|
||||
|
||||
try:
|
||||
feedback_data['insight_id'] = str(insight_id)
|
||||
|
||||
response = await self.client.post(url, json=feedback_data)
|
||||
|
||||
if response.status_code in [200, 201]:
|
||||
logger.info(
|
||||
"Feedback recorded",
|
||||
insight_id=str(insight_id),
|
||||
success=feedback_data.get('success')
|
||||
)
|
||||
return response.json()
|
||||
else:
|
||||
logger.error(
|
||||
"Failed to record feedback",
|
||||
status_code=response.status_code
|
||||
)
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error recording feedback", error=str(e))
|
||||
return None
|
||||
|
||||
async def get_insights_summary(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
time_period_days: int = 30
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get aggregate metrics summary for insights.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
time_period_days: Time period for metrics (default 30 days)
|
||||
|
||||
Returns:
|
||||
Summary metrics or None if failed
|
||||
"""
|
||||
url = f"{self.base_url}/api/v1/tenants/{tenant_id}/insights/metrics/summary"
|
||||
|
||||
params = {'time_period_days': time_period_days}
|
||||
|
||||
try:
|
||||
response = await self.client.get(url, params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
logger.error(
|
||||
"Failed to get insights summary",
|
||||
status_code=response.status_code
|
||||
)
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error getting insights summary", error=str(e))
|
||||
return None
|
||||
|
||||
async def post_accuracy_metrics(
|
||||
self,
|
||||
tenant_id: UUID,
|
||||
validation_date: datetime,
|
||||
metrics: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Post forecast accuracy metrics to AI Insights Service.
|
||||
Creates an insight with accuracy validation results.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
validation_date: Date the forecasts were validated for
|
||||
metrics: Dictionary with accuracy metrics:
|
||||
- overall_mape: Mean Absolute Percentage Error
|
||||
- overall_rmse: Root Mean Squared Error
|
||||
- overall_mae: Mean Absolute Error
|
||||
- products_validated: Number of products validated
|
||||
- poor_accuracy_products: List of products with MAPE > 30%
|
||||
|
||||
Returns:
|
||||
Created insight or None if failed
|
||||
"""
|
||||
mape = metrics.get('overall_mape', 0)
|
||||
products_validated = metrics.get('products_validated', 0)
|
||||
poor_count = len(metrics.get('poor_accuracy_products', []))
|
||||
|
||||
# Determine priority based on MAPE
|
||||
if mape > 40:
|
||||
priority = 'critical'
|
||||
elif mape > 30:
|
||||
priority = 'high'
|
||||
elif mape > 20:
|
||||
priority = 'medium'
|
||||
else:
|
||||
priority = 'low'
|
||||
|
||||
# Create insight
|
||||
insight_data = {
|
||||
'type': 'insight',
|
||||
'priority': priority,
|
||||
'category': 'forecasting',
|
||||
'title': f'Forecast Accuracy Validation - {validation_date.strftime("%Y-%m-%d")}',
|
||||
'description': (
|
||||
f'Validated {products_validated} product forecasts against actual sales. '
|
||||
f'Overall MAPE: {mape:.2f}%. '
|
||||
f'{poor_count} products require retraining (MAPE > 30%).'
|
||||
),
|
||||
'impact_type': 'accuracy',
|
||||
'impact_value': mape,
|
||||
'impact_unit': 'mape_percentage',
|
||||
'confidence': 100, # Validation is based on actual data
|
||||
'metrics_json': {
|
||||
'validation_date': validation_date.isoformat() if hasattr(validation_date, 'isoformat') else str(validation_date),
|
||||
'overall_mape': mape,
|
||||
'overall_rmse': metrics.get('overall_rmse', 0),
|
||||
'overall_mae': metrics.get('overall_mae', 0),
|
||||
'products_validated': products_validated,
|
||||
'poor_accuracy_count': poor_count,
|
||||
'poor_accuracy_products': metrics.get('poor_accuracy_products', [])
|
||||
},
|
||||
'actionable': poor_count > 0,
|
||||
'recommendation_actions': [
|
||||
f'Retrain models for {poor_count} products with poor accuracy'
|
||||
] if poor_count > 0 else [],
|
||||
'source_service': 'forecasting',
|
||||
'source_model': 'forecast_validation'
|
||||
}
|
||||
|
||||
return await self.create_insight(tenant_id, insight_data)
|
||||
|
||||
async def health_check(self) -> bool:
|
||||
"""
|
||||
Check if AI Insights Service is healthy.
|
||||
|
||||
Returns:
|
||||
True if healthy, False otherwise
|
||||
"""
|
||||
url = f"{self.base_url}/health"
|
||||
|
||||
try:
|
||||
response = await self.client.get(url)
|
||||
return response.status_code == 200
|
||||
|
||||
except Exception as e:
|
||||
logger.error("AI Insights Service health check failed", error=str(e))
|
||||
return False
|
||||
@@ -141,6 +141,27 @@ class ForecastServiceClient(BaseServiceClient):
|
||||
}
|
||||
return await self.post("forecasting/operations/validate-predictions", params=params, tenant_id=tenant_id)
|
||||
|
||||
async def validate_forecasts(
|
||||
self,
|
||||
tenant_id: str,
|
||||
date: date
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Validate forecasts for a specific date against actual sales.
|
||||
Calculates MAPE, RMSE, MAE and identifies products with poor accuracy.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
date: Date to validate
|
||||
|
||||
Returns:
|
||||
Dict with overall metrics and poor accuracy products list
|
||||
"""
|
||||
params = {
|
||||
"validation_date": date.isoformat()
|
||||
}
|
||||
return await self.post("forecasting/operations/validate-forecasts", params=params, tenant_id=tenant_id)
|
||||
|
||||
async def get_forecast_statistics(
|
||||
self,
|
||||
tenant_id: str,
|
||||
@@ -179,10 +200,81 @@ class ForecastServiceClient(BaseServiceClient):
|
||||
|
||||
return await self.get("forecasting/analytics/predictions-performance", tenant_id=tenant_id, params=params)
|
||||
|
||||
# ================================================================
|
||||
# ML INSIGHTS: Dynamic Rules Generation
|
||||
# ================================================================
|
||||
|
||||
async def trigger_rules_generation(
|
||||
self,
|
||||
tenant_id: str,
|
||||
product_ids: Optional[List[str]] = None,
|
||||
lookback_days: int = 90,
|
||||
min_samples: int = 10
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Trigger dynamic business rules learning for demand forecasting.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
product_ids: Specific product IDs to analyze. If None, analyzes all products
|
||||
lookback_days: Days of historical data to analyze (30-365)
|
||||
min_samples: Minimum samples required for rule learning (5-100)
|
||||
|
||||
Returns:
|
||||
Dict with rules generation results including insights posted
|
||||
"""
|
||||
data = {
|
||||
"product_ids": product_ids,
|
||||
"lookback_days": lookback_days,
|
||||
"min_samples": min_samples
|
||||
}
|
||||
return await self.post("forecasting/ml/insights/generate-rules", data=data, tenant_id=tenant_id)
|
||||
|
||||
# ================================================================
|
||||
# Legacy/Compatibility Methods (deprecated)
|
||||
# ================================================================
|
||||
|
||||
async def generate_forecasts(
|
||||
self,
|
||||
tenant_id: str,
|
||||
forecast_days: int = 7,
|
||||
inventory_product_ids: Optional[List[str]] = None
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
COMPATIBILITY: Orchestrator-friendly method to generate forecasts
|
||||
|
||||
This method is called by the orchestrator service and generates batch forecasts
|
||||
for either specified products or all products.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
forecast_days: Number of days to forecast (default 7)
|
||||
inventory_product_ids: Optional list of product IDs. If None, forecasts all products.
|
||||
|
||||
Returns:
|
||||
Dict with forecast results
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
# If no product IDs specified, let the backend handle it
|
||||
if not inventory_product_ids:
|
||||
# Call the batch operation endpoint to forecast all products
|
||||
# The forecasting service will handle fetching all products internally
|
||||
data = {
|
||||
"batch_name": f"orchestrator-batch-{datetime.now().strftime('%Y%m%d')}",
|
||||
"inventory_product_ids": [], # Empty list will trigger fetching all products
|
||||
"forecast_days": forecast_days
|
||||
}
|
||||
return await self.post("forecasting/operations/batch", data=data, tenant_id=tenant_id)
|
||||
|
||||
# Otherwise use the standard batch forecast
|
||||
return await self.generate_batch_forecast(
|
||||
tenant_id=tenant_id,
|
||||
inventory_product_ids=inventory_product_ids,
|
||||
forecast_date=datetime.now().date(),
|
||||
forecast_days=forecast_days
|
||||
)
|
||||
|
||||
async def create_forecast(
|
||||
self,
|
||||
tenant_id: str,
|
||||
|
||||
@@ -17,8 +17,8 @@ logger = structlog.get_logger()
|
||||
class InventoryServiceClient(BaseServiceClient):
|
||||
"""Client for communicating with the inventory service via gateway"""
|
||||
|
||||
def __init__(self, config: BaseServiceSettings):
|
||||
super().__init__("inventory", config)
|
||||
def __init__(self, config: BaseServiceSettings, calling_service_name: str = "unknown"):
|
||||
super().__init__(calling_service_name, config)
|
||||
|
||||
def get_service_base_path(self) -> str:
|
||||
"""Return the base path for inventory service APIs"""
|
||||
@@ -610,6 +610,47 @@ class InventoryServiceClient(BaseServiceClient):
|
||||
)
|
||||
return {}
|
||||
|
||||
# ================================================================
|
||||
# ML INSIGHTS: Safety Stock Optimization
|
||||
# ================================================================
|
||||
|
||||
async def trigger_safety_stock_optimization(
|
||||
self,
|
||||
tenant_id: str,
|
||||
product_ids: Optional[List[str]] = None,
|
||||
lookback_days: int = 90,
|
||||
min_history_days: int = 30
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Trigger safety stock optimization for inventory products.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
product_ids: Specific product IDs to optimize. If None, optimizes all products
|
||||
lookback_days: Days of historical demand to analyze (30-365)
|
||||
min_history_days: Minimum days of history required (7-180)
|
||||
|
||||
Returns:
|
||||
Dict with optimization results including insights posted
|
||||
"""
|
||||
try:
|
||||
data = {
|
||||
"product_ids": product_ids,
|
||||
"lookback_days": lookback_days,
|
||||
"min_history_days": min_history_days
|
||||
}
|
||||
result = await self.post("inventory/ml/insights/optimize-safety-stock", data=data, tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Triggered safety stock optimization",
|
||||
products_optimized=result.get('products_optimized', 0),
|
||||
insights_posted=result.get('total_insights_posted', 0),
|
||||
tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error triggering safety stock optimization",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# UTILITY METHODS
|
||||
# ================================================================
|
||||
|
||||
@@ -16,11 +16,11 @@ logger = structlog.get_logger()
|
||||
class NotificationServiceClient(BaseServiceClient):
|
||||
"""Client for communicating with the Notification Service"""
|
||||
|
||||
def __init__(self, config: BaseServiceSettings):
|
||||
super().__init__("notification", config)
|
||||
def __init__(self, config: BaseServiceSettings, calling_service_name: str = "unknown"):
|
||||
super().__init__(calling_service_name, config)
|
||||
|
||||
def get_service_base_path(self) -> str:
|
||||
return "/api/v1/notifications"
|
||||
return "/api/v1"
|
||||
|
||||
# ================================================================
|
||||
# NOTIFICATION ENDPOINTS
|
||||
@@ -64,7 +64,7 @@ class NotificationServiceClient(BaseServiceClient):
|
||||
"metadata": metadata or {}
|
||||
}
|
||||
|
||||
result = await self.post("send", data=notification_data, tenant_id=tenant_id)
|
||||
result = await self.post("notifications/send", data=notification_data, tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Notification sent successfully",
|
||||
tenant_id=tenant_id,
|
||||
@@ -110,6 +110,62 @@ class NotificationServiceClient(BaseServiceClient):
|
||||
priority=priority
|
||||
)
|
||||
|
||||
async def send_workflow_summary(
|
||||
self,
|
||||
tenant_id: str,
|
||||
notification_data: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Send workflow summary notification
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
notification_data: Summary data to include in notification
|
||||
|
||||
Returns:
|
||||
Dictionary with notification result
|
||||
"""
|
||||
try:
|
||||
# Prepare workflow summary notification
|
||||
subject = f"Daily Workflow Summary - {notification_data.get('orchestration_run_id', 'N/A')}"
|
||||
|
||||
message_parts = [
|
||||
f"Daily workflow completed for tenant {tenant_id}",
|
||||
f"Orchestration Run ID: {notification_data.get('orchestration_run_id', 'N/A')}",
|
||||
f"Forecasts created: {notification_data.get('forecasts_created', 0)}",
|
||||
f"Production batches created: {notification_data.get('batches_created', 0)}",
|
||||
f"Procurement requirements created: {notification_data.get('requirements_created', 0)}",
|
||||
f"Purchase orders created: {notification_data.get('pos_created', 0)}"
|
||||
]
|
||||
|
||||
message = "\n".join(message_parts)
|
||||
|
||||
notification_payload = {
|
||||
"type": "email",
|
||||
"message": message,
|
||||
"priority": "normal",
|
||||
"subject": subject,
|
||||
"metadata": {
|
||||
"orchestration_run_id": notification_data.get('orchestration_run_id'),
|
||||
"forecast_id": notification_data.get('forecast_id'),
|
||||
"production_schedule_id": notification_data.get('production_schedule_id'),
|
||||
"procurement_plan_id": notification_data.get('procurement_plan_id'),
|
||||
"summary_type": "workflow_completion"
|
||||
}
|
||||
}
|
||||
|
||||
result = await self.post("notifications/send", data=notification_payload, tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Workflow summary notification sent successfully",
|
||||
tenant_id=tenant_id,
|
||||
orchestration_run_id=notification_data.get('orchestration_run_id'))
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error sending workflow summary notification",
|
||||
error=str(e),
|
||||
tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# UTILITY METHODS
|
||||
# ================================================================
|
||||
|
||||
@@ -24,8 +24,8 @@ logger = structlog.get_logger()
|
||||
class ProcurementServiceClient(BaseServiceClient):
|
||||
"""Enhanced client for communicating with the Procurement Service"""
|
||||
|
||||
def __init__(self, config: BaseServiceSettings):
|
||||
super().__init__("procurement", config)
|
||||
def __init__(self, config: BaseServiceSettings, calling_service_name: str = "unknown"):
|
||||
super().__init__(calling_service_name, config)
|
||||
|
||||
def get_service_base_path(self) -> str:
|
||||
return "/api/v1"
|
||||
@@ -63,7 +63,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
recipes_data: Optional recipes snapshot (NEW - to avoid duplicate fetching)
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/procurement/auto-generate"
|
||||
path = f"/tenants/{tenant_id}/procurement/operations/auto-generate"
|
||||
payload = {
|
||||
"forecast_data": forecast_data,
|
||||
"production_schedule_id": production_schedule_id,
|
||||
@@ -84,7 +84,9 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
tenant_id=tenant_id,
|
||||
has_forecast_data=bool(forecast_data))
|
||||
|
||||
response = await self._post(path, json=payload)
|
||||
# Remove tenant_id from path since it's passed as separate parameter
|
||||
endpoint = f"procurement/operations/auto-generate"
|
||||
response = await self.post(endpoint, data=payload, tenant_id=tenant_id)
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
@@ -127,7 +129,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
- items: List of plan items with full metadata
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/replenishment-plans/generate"
|
||||
path = f"/tenants/{tenant_id}/procurement/operations/replenishment-plans/generate"
|
||||
payload = {
|
||||
"tenant_id": tenant_id,
|
||||
"requirements": requirements,
|
||||
@@ -142,7 +144,9 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
tenant_id=tenant_id,
|
||||
requirements_count=len(requirements))
|
||||
|
||||
response = await self._post(path, json=payload)
|
||||
# Remove tenant_id from path since it's passed as separate parameter
|
||||
endpoint = f"procurement/operations/replenishment-plans/generate"
|
||||
response = await self.post(endpoint, data=payload, tenant_id=tenant_id)
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
@@ -166,7 +170,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
Dict with complete plan details
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/replenishment-plans/{plan_id}"
|
||||
path = f"/tenants/{tenant_id}/procurement/replenishment-plans/{plan_id}"
|
||||
|
||||
logger.debug("Getting replenishment plan",
|
||||
tenant_id=tenant_id, plan_id=plan_id)
|
||||
@@ -199,7 +203,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
List of plan summaries
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/replenishment-plans"
|
||||
path = f"/tenants/{tenant_id}/procurement/operations/replenishment-plans"
|
||||
params = {"skip": skip, "limit": limit}
|
||||
if status:
|
||||
params["status"] = status
|
||||
@@ -250,7 +254,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
- stockout_risk: Risk level (low/medium/high/critical)
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/replenishment-plans/inventory-projections/project"
|
||||
path = f"/tenants/{tenant_id}/procurement/operations/replenishment-plans/inventory-projections/project"
|
||||
payload = {
|
||||
"ingredient_id": ingredient_id,
|
||||
"ingredient_name": ingredient_name,
|
||||
@@ -264,7 +268,9 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
logger.info("Projecting inventory",
|
||||
tenant_id=tenant_id, ingredient_id=ingredient_id)
|
||||
|
||||
response = await self._post(path, json=payload)
|
||||
# Remove tenant_id from path since it's passed as separate parameter
|
||||
endpoint = f"procurement/operations/replenishment-plans/inventory-projections/project"
|
||||
response = await self.post(endpoint, data=payload, tenant_id=tenant_id)
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
@@ -296,7 +302,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
List of inventory projections
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/replenishment-plans/inventory-projections"
|
||||
path = f"/tenants/{tenant_id}/procurement/operations/replenishment-plans/inventory-projections"
|
||||
params = {
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
@@ -345,7 +351,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
- reasoning: Explanation
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/replenishment-plans/safety-stock/calculate"
|
||||
path = f"/tenants/{tenant_id}/procurement/operations/replenishment-plans/safety-stock/calculate"
|
||||
payload = {
|
||||
"ingredient_id": ingredient_id,
|
||||
"daily_demands": daily_demands,
|
||||
@@ -353,7 +359,9 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
"service_level": service_level
|
||||
}
|
||||
|
||||
response = await self._post(path, json=payload)
|
||||
# Remove tenant_id from path since it's passed as separate parameter
|
||||
endpoint = f"procurement/operations/replenishment-plans/safety-stock/calculate"
|
||||
response = await self.post(endpoint, data=payload, tenant_id=tenant_id)
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
@@ -391,7 +399,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
- diversification_applied: Whether diversification was applied
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/replenishment-plans/supplier-selections/evaluate"
|
||||
path = f"/tenants/{tenant_id}/procurement/operations/replenishment-plans/supplier-selections/evaluate"
|
||||
payload = {
|
||||
"ingredient_id": ingredient_id,
|
||||
"ingredient_name": ingredient_name,
|
||||
@@ -399,7 +407,9 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
"supplier_options": supplier_options
|
||||
}
|
||||
|
||||
response = await self._post(path, json=payload)
|
||||
# Remove tenant_id from path since it's passed as separate parameter
|
||||
endpoint = f"procurement/operations/replenishment-plans/supplier-selections/evaluate"
|
||||
response = await self.post(endpoint, data=payload, tenant_id=tenant_id)
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
@@ -429,7 +439,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
List of supplier allocations
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/replenishment-plans/supplier-allocations"
|
||||
path = f"/tenants/{tenant_id}/procurement/operations/replenishment-plans/supplier-allocations"
|
||||
params = {"skip": skip, "limit": limit}
|
||||
if requirement_id:
|
||||
params["requirement_id"] = requirement_id
|
||||
@@ -470,7 +480,7 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
- stockout_prevention_rate: Effectiveness metric
|
||||
"""
|
||||
try:
|
||||
path = f"/tenants/{tenant_id}/replenishment-plans/analytics"
|
||||
path = f"/tenants/{tenant_id}/procurement/analytics/replenishment-plans"
|
||||
params = {}
|
||||
if start_date:
|
||||
params["start_date"] = start_date
|
||||
@@ -484,3 +494,82 @@ class ProcurementServiceClient(BaseServiceClient):
|
||||
logger.error("Error getting replenishment analytics",
|
||||
tenant_id=tenant_id, error=str(e))
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# ML INSIGHTS: Supplier Analysis and Price Forecasting
|
||||
# ================================================================
|
||||
|
||||
async def trigger_supplier_analysis(
|
||||
self,
|
||||
tenant_id: str,
|
||||
supplier_ids: Optional[List[str]] = None,
|
||||
lookback_days: int = 180,
|
||||
min_orders: int = 10
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Trigger supplier performance analysis.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
supplier_ids: Specific supplier IDs to analyze. If None, analyzes all suppliers
|
||||
lookback_days: Days of historical orders to analyze (30-730)
|
||||
min_orders: Minimum orders required for analysis (5-100)
|
||||
|
||||
Returns:
|
||||
Dict with analysis results including insights posted
|
||||
"""
|
||||
try:
|
||||
data = {
|
||||
"supplier_ids": supplier_ids,
|
||||
"lookback_days": lookback_days,
|
||||
"min_orders": min_orders
|
||||
}
|
||||
result = await self.post("procurement/ml/insights/analyze-suppliers", data=data, tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Triggered supplier analysis",
|
||||
suppliers_analyzed=result.get('suppliers_analyzed', 0),
|
||||
insights_posted=result.get('total_insights_posted', 0),
|
||||
tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error triggering supplier analysis",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
async def trigger_price_forecasting(
|
||||
self,
|
||||
tenant_id: str,
|
||||
ingredient_ids: Optional[List[str]] = None,
|
||||
lookback_days: int = 180,
|
||||
forecast_horizon_days: int = 30
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Trigger price forecasting for procurement ingredients.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
ingredient_ids: Specific ingredient IDs to forecast. If None, forecasts all ingredients
|
||||
lookback_days: Days of historical price data to analyze (90-730)
|
||||
forecast_horizon_days: Days to forecast ahead (7-90)
|
||||
|
||||
Returns:
|
||||
Dict with forecasting results including insights posted
|
||||
"""
|
||||
try:
|
||||
data = {
|
||||
"ingredient_ids": ingredient_ids,
|
||||
"lookback_days": lookback_days,
|
||||
"forecast_horizon_days": forecast_horizon_days
|
||||
}
|
||||
result = await self.post("procurement/ml/insights/forecast-prices", data=data, tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Triggered price forecasting",
|
||||
ingredients_forecasted=result.get('ingredients_forecasted', 0),
|
||||
insights_posted=result.get('total_insights_posted', 0),
|
||||
buy_now_recommendations=result.get('buy_now_recommendations', 0),
|
||||
tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error triggering price forecasting",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
@@ -16,8 +16,8 @@ logger = structlog.get_logger()
|
||||
class ProductionServiceClient(BaseServiceClient):
|
||||
"""Client for communicating with the Production Service"""
|
||||
|
||||
def __init__(self, config: BaseServiceSettings):
|
||||
super().__init__("production", config)
|
||||
def __init__(self, config: BaseServiceSettings, calling_service_name: str = "unknown"):
|
||||
super().__init__(calling_service_name, config)
|
||||
|
||||
def get_service_base_path(self) -> str:
|
||||
return "/api/v1"
|
||||
@@ -63,7 +63,7 @@ class ProductionServiceClient(BaseServiceClient):
|
||||
request_data["recipes_data"] = recipes_data
|
||||
|
||||
result = await self.post(
|
||||
"production/generate-schedule",
|
||||
"production/operations/generate-schedule",
|
||||
data=request_data,
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
@@ -404,6 +404,47 @@ class ProductionServiceClient(BaseServiceClient):
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# ML INSIGHTS: Yield Prediction
|
||||
# ================================================================
|
||||
|
||||
async def trigger_yield_prediction(
|
||||
self,
|
||||
tenant_id: str,
|
||||
recipe_ids: Optional[List[str]] = None,
|
||||
lookback_days: int = 90,
|
||||
min_history_runs: int = 30
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Trigger yield prediction for production recipes.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
recipe_ids: Specific recipe IDs to analyze. If None, analyzes all recipes
|
||||
lookback_days: Days of historical production to analyze (30-365)
|
||||
min_history_runs: Minimum production runs required (10-100)
|
||||
|
||||
Returns:
|
||||
Dict with prediction results including insights posted
|
||||
"""
|
||||
try:
|
||||
data = {
|
||||
"recipe_ids": recipe_ids,
|
||||
"lookback_days": lookback_days,
|
||||
"min_history_runs": min_history_runs
|
||||
}
|
||||
result = await self.post("production/ml/insights/predict-yields", data=data, tenant_id=tenant_id)
|
||||
if result:
|
||||
logger.info("Triggered yield prediction",
|
||||
recipes_analyzed=result.get('recipes_analyzed', 0),
|
||||
insights_posted=result.get('total_insights_posted', 0),
|
||||
tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error triggering yield prediction",
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# UTILITY METHODS
|
||||
# ================================================================
|
||||
|
||||
@@ -16,8 +16,8 @@ logger = structlog.get_logger()
|
||||
class RecipesServiceClient(BaseServiceClient):
|
||||
"""Client for communicating with the Recipes Service"""
|
||||
|
||||
def __init__(self, config: BaseServiceSettings):
|
||||
super().__init__("recipes", config)
|
||||
def __init__(self, config: BaseServiceSettings, calling_service_name: str = "unknown"):
|
||||
super().__init__(calling_service_name, config)
|
||||
|
||||
def get_service_base_path(self) -> str:
|
||||
return "/api/v1"
|
||||
|
||||
@@ -43,9 +43,18 @@ class SalesServiceClient(BaseServiceClient):
|
||||
params["end_date"] = end_date
|
||||
if product_id:
|
||||
params["product_id"] = product_id
|
||||
|
||||
|
||||
result = await self.get("sales/sales", tenant_id=tenant_id, params=params)
|
||||
return result.get("sales", []) if result else None
|
||||
|
||||
# Handle both list and dict responses
|
||||
if result is None:
|
||||
return None
|
||||
elif isinstance(result, list):
|
||||
return result
|
||||
elif isinstance(result, dict):
|
||||
return result.get("sales", [])
|
||||
else:
|
||||
return None
|
||||
|
||||
async def get_all_sales_data(
|
||||
self,
|
||||
|
||||
@@ -15,8 +15,8 @@ logger = structlog.get_logger()
|
||||
class SuppliersServiceClient(BaseServiceClient):
|
||||
"""Client for communicating with the Suppliers Service"""
|
||||
|
||||
def __init__(self, config: BaseServiceSettings):
|
||||
super().__init__("suppliers", config)
|
||||
def __init__(self, config: BaseServiceSettings, calling_service_name: str = "unknown"):
|
||||
super().__init__(calling_service_name, config)
|
||||
|
||||
def get_service_base_path(self) -> str:
|
||||
return "/api/v1"
|
||||
@@ -45,9 +45,9 @@ class SuppliersServiceClient(BaseServiceClient):
|
||||
if is_active is not None:
|
||||
params["is_active"] = is_active
|
||||
|
||||
result = await self.get_paginated("suppliers/list", tenant_id=tenant_id, params=params)
|
||||
result = await self.get_paginated("suppliers", tenant_id=tenant_id, params=params)
|
||||
logger.info("Retrieved all suppliers from suppliers service",
|
||||
suppliers_count=len(result), tenant_id=tenant_id)
|
||||
suppliers_count=len(result) if result else 0, tenant_id=tenant_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error getting all suppliers",
|
||||
@@ -59,12 +59,12 @@ class SuppliersServiceClient(BaseServiceClient):
|
||||
try:
|
||||
params = {}
|
||||
if search:
|
||||
params["search"] = search
|
||||
params["search_term"] = search
|
||||
if category:
|
||||
params["category"] = category
|
||||
params["supplier_type"] = category
|
||||
|
||||
result = await self.get("suppliers/list/search", tenant_id=tenant_id, params=params)
|
||||
suppliers = result.get('suppliers', []) if result else []
|
||||
result = await self.get("suppliers", tenant_id=tenant_id, params=params)
|
||||
suppliers = result if result else []
|
||||
logger.info("Searched suppliers from suppliers service",
|
||||
search_term=search, suppliers_count=len(suppliers), tenant_id=tenant_id)
|
||||
return suppliers
|
||||
|
||||
@@ -200,6 +200,31 @@ class TenantServiceClient(BaseServiceClient):
|
||||
error=str(e), tenant_id=tenant_id)
|
||||
return None
|
||||
|
||||
async def get_active_tenants(self, skip: int = 0, limit: int = 100) -> Optional[list]:
|
||||
"""
|
||||
Get all active tenants
|
||||
|
||||
Args:
|
||||
skip: Number of records to skip (pagination)
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
List of active tenant dictionaries
|
||||
"""
|
||||
try:
|
||||
# Call tenants endpoint (not tenant-scoped)
|
||||
result = await self._make_request(
|
||||
"GET",
|
||||
f"tenants?skip={skip}&limit={limit}"
|
||||
)
|
||||
if result:
|
||||
logger.info("Retrieved active tenants from tenant service",
|
||||
count=len(result) if isinstance(result, list) else 0)
|
||||
return result if result else []
|
||||
except Exception as e:
|
||||
logger.error("Error getting active tenants", error=str(e))
|
||||
return []
|
||||
|
||||
# ================================================================
|
||||
# UTILITY METHODS
|
||||
# ================================================================
|
||||
|
||||
@@ -127,6 +127,36 @@ class TrainingServiceClient(BaseServiceClient):
|
||||
params["start_date"] = start_date
|
||||
if end_date:
|
||||
params["end_date"] = end_date
|
||||
|
||||
|
||||
result = await self.get(f"training/models/{model_id}/predictions", tenant_id=tenant_id, params=params)
|
||||
return result.get("predictions", []) if result else None
|
||||
return result.get("predictions", []) if result else None
|
||||
|
||||
async def trigger_retrain(
|
||||
self,
|
||||
tenant_id: str,
|
||||
inventory_product_id: str,
|
||||
reason: str = 'manual',
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Trigger model retraining for a specific product.
|
||||
Used by orchestrator when forecast accuracy degrades.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant UUID
|
||||
inventory_product_id: Product UUID to retrain model for
|
||||
reason: Reason for retraining (accuracy_degradation, manual, scheduled, etc.)
|
||||
metadata: Optional metadata (e.g., previous_mape, validation_date, etc.)
|
||||
|
||||
Returns:
|
||||
Training job details or None if failed
|
||||
"""
|
||||
data = {
|
||||
"inventory_product_id": inventory_product_id,
|
||||
"reason": reason,
|
||||
"metadata": metadata or {},
|
||||
"include_weather": True,
|
||||
"include_traffic": False,
|
||||
"min_data_points": 30
|
||||
}
|
||||
return await self.post("training/models/retrain", data=data, tenant_id=tenant_id)
|
||||
@@ -237,6 +237,7 @@ class BaseServiceSettings(BaseSettings):
|
||||
ALERT_PROCESSOR_SERVICE_URL: str = os.getenv("ALERT_PROCESSOR_SERVICE_URL", "http://alert-processor-api:8010")
|
||||
PROCUREMENT_SERVICE_URL: str = os.getenv("PROCUREMENT_SERVICE_URL", "http://procurement-service:8000")
|
||||
ORCHESTRATOR_SERVICE_URL: str = os.getenv("ORCHESTRATOR_SERVICE_URL", "http://orchestrator-service:8000")
|
||||
AI_INSIGHTS_SERVICE_URL: str = os.getenv("AI_INSIGHTS_SERVICE_URL", "http://ai-insights-service:8000")
|
||||
|
||||
# HTTP Client Settings
|
||||
HTTP_TIMEOUT: int = int(os.getenv("HTTP_TIMEOUT", "30"))
|
||||
|
||||
@@ -46,14 +46,28 @@ class RedisConnectionManager:
|
||||
"""
|
||||
try:
|
||||
# Create connection pool
|
||||
# Handle SSL parameters for self-signed certificates
|
||||
connection_kwargs = {
|
||||
'db': db,
|
||||
'max_connections': max_connections,
|
||||
'decode_responses': decode_responses,
|
||||
'retry_on_timeout': retry_on_timeout,
|
||||
'socket_keepalive': socket_keepalive,
|
||||
'health_check_interval': health_check_interval
|
||||
}
|
||||
|
||||
# If using SSL/TLS, add SSL parameters to handle self-signed certificates
|
||||
if redis_url.startswith('rediss://'):
|
||||
connection_kwargs.update({
|
||||
'ssl_cert_reqs': None, # Disable certificate verification
|
||||
'ssl_ca_certs': None, # Don't require CA certificates
|
||||
'ssl_certfile': None, # Don't require client cert
|
||||
'ssl_keyfile': None # Don't require client key
|
||||
})
|
||||
|
||||
self._pool = redis.ConnectionPool.from_url(
|
||||
redis_url,
|
||||
db=db,
|
||||
max_connections=max_connections,
|
||||
decode_responses=decode_responses,
|
||||
retry_on_timeout=retry_on_timeout,
|
||||
socket_keepalive=socket_keepalive,
|
||||
health_check_interval=health_check_interval
|
||||
**connection_kwargs
|
||||
)
|
||||
|
||||
# Create Redis client with pool
|
||||
|
||||
Reference in New Issue
Block a user