Add whatsapp feature
This commit is contained in:
@@ -118,14 +118,17 @@ class BaseAlertService:
|
||||
"""Leader election for scheduled jobs"""
|
||||
lock_key = f"scheduler_lock:{self.config.SERVICE_NAME}"
|
||||
lock_ttl = 60
|
||||
# Generate instance_id once for the lifetime of this leadership loop
|
||||
# IMPORTANT: Don't regenerate on each iteration or lock extension will always fail!
|
||||
instance_id = getattr(self.config, 'INSTANCE_ID', str(uuid.uuid4()))
|
||||
|
||||
logger.info("DEBUG: maintain_leadership starting",
|
||||
service=self.config.SERVICE_NAME,
|
||||
instance_id=instance_id,
|
||||
redis_client_type=str(type(self.redis)))
|
||||
|
||||
while True:
|
||||
try:
|
||||
instance_id = getattr(self.config, 'INSTANCE_ID', str(uuid.uuid4()))
|
||||
was_leader = self.is_leader
|
||||
|
||||
# Add jitter to avoid thundering herd when multiple instances start
|
||||
@@ -144,31 +147,37 @@ class BaseAlertService:
|
||||
acquired = result is not None
|
||||
self.is_leader = acquired
|
||||
else:
|
||||
# Already leader - try to extend the lock
|
||||
current_value = await self.redis.get(lock_key)
|
||||
# Note: decode_responses=True means Redis returns strings, not bytes
|
||||
if current_value and current_value == instance_id:
|
||||
# Still our lock, extend it using a Lua script for atomicity
|
||||
lua_script = """
|
||||
if redis.call("GET", KEYS[1]) == ARGV[1] then
|
||||
return redis.call("EXPIRE", KEYS[1], ARGV[2])
|
||||
else
|
||||
return 0
|
||||
end
|
||||
"""
|
||||
try:
|
||||
extend_result = await self.redis.eval(
|
||||
lua_script,
|
||||
keys=[lock_key],
|
||||
args=[instance_id, lock_ttl]
|
||||
)
|
||||
self.is_leader = extend_result == 1
|
||||
except:
|
||||
# If Lua script fails (Redis cluster), fall back to simple get/set
|
||||
self.is_leader = True # Keep current state if we can't verify
|
||||
else:
|
||||
# Lock expired or taken by someone else
|
||||
self.is_leader = False
|
||||
# Already leader - try to extend the lock atomically
|
||||
# Use SET with EX and GET to atomically refresh the lock
|
||||
try:
|
||||
# SET key value EX ttl GET returns the old value (atomic check-and-set)
|
||||
# This is atomic and works in both standalone and cluster mode
|
||||
old_value = await self.redis.set(
|
||||
lock_key,
|
||||
instance_id,
|
||||
ex=lock_ttl,
|
||||
get=True # Return old value (Python redis uses 'get' param for GET option)
|
||||
)
|
||||
# If old value matches our instance_id, we successfully extended
|
||||
self.is_leader = old_value == instance_id
|
||||
if self.is_leader:
|
||||
logger.debug("Lock extended successfully",
|
||||
service=self.config.SERVICE_NAME,
|
||||
instance_id=instance_id,
|
||||
ttl=lock_ttl)
|
||||
else:
|
||||
# Lock was taken by someone else or expired
|
||||
logger.info("Lost lock ownership during extension",
|
||||
service=self.config.SERVICE_NAME,
|
||||
old_owner=old_value,
|
||||
instance_id=instance_id)
|
||||
except Exception as e:
|
||||
# If extend fails, try to verify we still have the lock
|
||||
logger.warning("Failed to extend lock, verifying ownership",
|
||||
service=self.config.SERVICE_NAME,
|
||||
error=str(e))
|
||||
current_check = await self.redis.get(lock_key)
|
||||
self.is_leader = current_check == instance_id
|
||||
|
||||
# Handle leadership changes
|
||||
if self.is_leader and not was_leader:
|
||||
|
||||
@@ -366,4 +366,53 @@ class ExternalServiceClient(BaseServiceClient):
|
||||
return result
|
||||
else:
|
||||
logger.warning("No school calendars found for city", city_id=city_id)
|
||||
return None
|
||||
|
||||
# ================================================================
|
||||
# POI (POINT OF INTEREST) DATA
|
||||
# ================================================================
|
||||
|
||||
async def get_poi_context(
|
||||
self,
|
||||
tenant_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get POI context for a tenant including ML features for forecasting.
|
||||
|
||||
This retrieves stored POI detection results and calculated ML features
|
||||
that should be included in demand forecasting predictions.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
|
||||
Returns:
|
||||
Dict with POI context including:
|
||||
- ml_features: Dict of POI features for ML models (e.g., poi_retail_total_count)
|
||||
- poi_detection_results: Full detection results
|
||||
- location: Latitude/longitude
|
||||
- total_pois_detected: Count of POIs
|
||||
"""
|
||||
logger.info("Fetching POI context for forecasting", tenant_id=tenant_id)
|
||||
|
||||
# Note: POI context endpoint structure is /external/poi-context/{tenant_id}
|
||||
# We pass tenant_id to _make_request which will build: /api/v1/tenants/{tenant_id}/external/poi-context/{tenant_id}
|
||||
# But the actual endpoint in external service is just /poi-context/{tenant_id}
|
||||
# So we need to use the operations prefix correctly
|
||||
result = await self._make_request(
|
||||
"GET",
|
||||
f"external/operations/poi-context/{tenant_id}",
|
||||
tenant_id=None, # Don't auto-prefix, we're including tenant_id in the path
|
||||
timeout=5.0
|
||||
)
|
||||
|
||||
if result:
|
||||
logger.info(
|
||||
"Successfully fetched POI context",
|
||||
tenant_id=tenant_id,
|
||||
total_pois=result.get("total_pois_detected", 0),
|
||||
ml_features_count=len(result.get("ml_features", {}))
|
||||
)
|
||||
return result
|
||||
else:
|
||||
logger.info("No POI context found for tenant", tenant_id=tenant_id)
|
||||
return None
|
||||
@@ -100,6 +100,11 @@ class TenantServiceClient(BaseServiceClient):
|
||||
result = await self.get_category_settings(tenant_id, "order")
|
||||
return result.get('settings', {}) if result else {}
|
||||
|
||||
async def get_notification_settings(self, tenant_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get notification settings for a tenant"""
|
||||
result = await self.get_category_settings(tenant_id, "notification")
|
||||
return result.get('settings', {}) if result else {}
|
||||
|
||||
async def update_settings(self, tenant_id: str, settings_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Update settings for a tenant
|
||||
|
||||
Reference in New Issue
Block a user