Initial commit - production deployment

This commit is contained in:
2026-01-21 17:17:16 +01:00
commit c23d00dd92
2289 changed files with 638440 additions and 0 deletions

0
shared/utils/__init__.py Executable file
View File

110
shared/utils/batch_generator.py Executable file
View File

@@ -0,0 +1,110 @@
"""
Shared batch number generator utility
"""
from datetime import datetime
from typing import Optional, Protocol, Dict, Any
from sqlalchemy.ext.asyncio import AsyncSession
import structlog
logger = structlog.get_logger()
class BatchCountProvider(Protocol):
"""Protocol for providing batch counts for a specific tenant and date range"""
async def get_daily_batch_count(
self,
tenant_id: str,
date_start: datetime,
date_end: datetime,
prefix: Optional[str] = None
) -> int:
"""Get the count of batches created today for the given tenant"""
...
class BatchNumberGenerator:
"""Generates unique batch numbers across different services"""
def __init__(self, batch_provider: BatchCountProvider):
self.batch_provider = batch_provider
async def generate_batch_number(
self,
tenant_id: str,
prefix: str = "BATCH",
date: Optional[datetime] = None
) -> str:
"""
Generate a unique batch number with format: {PREFIX}-{YYYYMMDD}-{XXX}
Args:
tenant_id: The tenant ID
prefix: Prefix for the batch number (e.g., "INV", "PROD", "BATCH")
date: Date to use for the batch number (defaults to today)
Returns:
Unique batch number string
"""
try:
# Use provided date or current date
target_date = date or datetime.utcnow()
date_prefix = target_date.strftime("%Y%m%d")
# Calculate date range for the day
today_start = datetime.combine(target_date.date(), datetime.min.time())
today_end = datetime.combine(target_date.date(), datetime.max.time())
# Get count of batches created today with this prefix
daily_count = await self.batch_provider.get_daily_batch_count(
tenant_id=tenant_id,
date_start=today_start,
date_end=today_end,
prefix=prefix
)
# Generate sequential number (starting from 1)
sequence = daily_count + 1
batch_number = f"{prefix}-{date_prefix}-{sequence:03d}"
logger.info(
"Generated batch number",
tenant_id=tenant_id,
prefix=prefix,
date=target_date.date(),
sequence=sequence,
batch_number=batch_number
)
return batch_number
except Exception as e:
logger.error(
"Failed to generate batch number",
tenant_id=tenant_id,
prefix=prefix,
error=str(e)
)
raise
def create_fallback_batch_number(
prefix: str = "BATCH",
date: Optional[datetime] = None,
sequence: int = 1
) -> str:
"""
Create a fallback batch number when database access fails
Args:
prefix: Prefix for the batch number
date: Date to use (defaults to now)
sequence: Sequence number to use
Returns:
Fallback batch number string
"""
target_date = date or datetime.utcnow()
date_prefix = target_date.strftime("%Y%m%d")
return f"{prefix}-{date_prefix}-{sequence:03d}"

168
shared/utils/circuit_breaker.py Executable file
View File

@@ -0,0 +1,168 @@
"""
Circuit Breaker Pattern Implementation
Prevents cascading failures by stopping requests to failing services
and allowing them time to recover.
"""
import asyncio
import time
from enum import Enum
from typing import Callable, Any, Optional
from datetime import datetime, timedelta
import logging
logger = logging.getLogger(__name__)
class CircuitState(str, Enum):
"""Circuit breaker states"""
CLOSED = "closed" # Normal operation
OPEN = "open" # Circuit is open, requests fail immediately
HALF_OPEN = "half_open" # Testing if service has recovered
class CircuitBreakerOpenError(Exception):
"""Raised when circuit breaker is open"""
pass
class CircuitBreaker:
"""
Circuit Breaker implementation for protecting service calls.
States:
- CLOSED: Normal operation, requests pass through
- OPEN: Too many failures, requests fail immediately
- HALF_OPEN: Testing recovery, limited requests allowed
Args:
failure_threshold: Number of failures before opening circuit
timeout_duration: Seconds to wait before attempting recovery
success_threshold: Successful calls needed in HALF_OPEN to close circuit
expected_exceptions: Tuple of exceptions that count as failures
"""
def __init__(
self,
failure_threshold: int = 5,
timeout_duration: int = 60,
success_threshold: int = 2,
expected_exceptions: tuple = (Exception,)
):
self.failure_threshold = failure_threshold
self.timeout_duration = timeout_duration
self.success_threshold = success_threshold
self.expected_exceptions = expected_exceptions
self._state = CircuitState.CLOSED
self._failure_count = 0
self._success_count = 0
self._last_failure_time: Optional[datetime] = None
self._next_attempt_time: Optional[datetime] = None
@property
def state(self) -> CircuitState:
"""Get current circuit state"""
if self._state == CircuitState.OPEN and self._should_attempt_reset():
self._state = CircuitState.HALF_OPEN
self._success_count = 0
logger.info(f"Circuit breaker entering HALF_OPEN state")
return self._state
def _should_attempt_reset(self) -> bool:
"""Check if enough time has passed to attempt reset"""
if self._next_attempt_time is None:
return False
return datetime.now() >= self._next_attempt_time
async def call(self, func: Callable, *args, **kwargs) -> Any:
"""
Execute function with circuit breaker protection.
Args:
func: Function to execute
*args: Positional arguments for func
**kwargs: Keyword arguments for func
Returns:
Result of func execution
Raises:
CircuitBreakerOpenError: If circuit is open
Exception: Original exception from func if circuit is closed
"""
if self.state == CircuitState.OPEN:
raise CircuitBreakerOpenError(
f"Circuit breaker is OPEN. Next attempt at {self._next_attempt_time}"
)
try:
# Execute the function
if asyncio.iscoroutinefunction(func):
result = await func(*args, **kwargs)
else:
result = func(*args, **kwargs)
# Success
self._on_success()
return result
except self.expected_exceptions as e:
# Expected failure
self._on_failure()
raise
def _on_success(self):
"""Handle successful call"""
if self._state == CircuitState.HALF_OPEN:
self._success_count += 1
if self._success_count >= self.success_threshold:
self._close_circuit()
else:
# In CLOSED state, reset failure count on success
self._failure_count = 0
def _on_failure(self):
"""Handle failed call"""
self._failure_count += 1
self._last_failure_time = datetime.now()
if self._state == CircuitState.HALF_OPEN:
# Failure in HALF_OPEN returns to OPEN
self._open_circuit()
elif self._failure_count >= self.failure_threshold:
# Too many failures, open the circuit
self._open_circuit()
def _open_circuit(self):
"""Open the circuit"""
self._state = CircuitState.OPEN
self._next_attempt_time = datetime.now() + timedelta(seconds=self.timeout_duration)
logger.warning(
f"Circuit breaker opened after {self._failure_count} failures. "
f"Next attempt at {self._next_attempt_time}"
)
def _close_circuit(self):
"""Close the circuit"""
self._state = CircuitState.CLOSED
self._failure_count = 0
self._success_count = 0
self._next_attempt_time = None
logger.info(f"Circuit breaker closed after successful recovery")
def reset(self):
"""Manually reset circuit breaker to CLOSED state"""
self._close_circuit()
logger.info(f"Circuit breaker manually reset")
def get_stats(self) -> dict:
"""Get circuit breaker statistics"""
return {
"state": self.state.value,
"failure_count": self._failure_count,
"success_count": self._success_count,
"last_failure_time": self._last_failure_time.isoformat() if self._last_failure_time else None,
"next_attempt_time": self._next_attempt_time.isoformat() if self._next_attempt_time else None
}

View File

@@ -0,0 +1,127 @@
"""
City normalization utilities for converting free-text city names to normalized city IDs.
This module provides functions to normalize city names from tenant registration
(which are free-text strings) to standardized city_id values used by the
school calendar and location context systems.
"""
from typing import Optional
import logging
logger = logging.getLogger(__name__)
# Mapping of common city name variations to normalized city IDs
CITY_NAME_TO_ID_MAP = {
# Madrid variations
"Madrid": "madrid",
"madrid": "madrid",
"MADRID": "madrid",
# Barcelona variations
"Barcelona": "barcelona",
"barcelona": "barcelona",
"BARCELONA": "barcelona",
# Valencia variations
"Valencia": "valencia",
"valencia": "valencia",
"VALENCIA": "valencia",
# Seville variations
"Sevilla": "sevilla",
"sevilla": "sevilla",
"Seville": "sevilla",
"seville": "sevilla",
# Bilbao variations
"Bilbao": "bilbao",
"bilbao": "bilbao",
# Add more cities as needed
}
def normalize_city_id(city_name: Optional[str]) -> Optional[str]:
"""
Convert a free-text city name to a normalized city_id.
This function handles various capitalizations and spellings of city names,
converting them to standardized lowercase identifiers used by the
location context and school calendar systems.
Args:
city_name: Free-text city name from tenant registration (e.g., "Madrid", "MADRID")
Returns:
Normalized city_id (e.g., "madrid") or None if city_name is None
Falls back to lowercase city_name if not in mapping
Examples:
>>> normalize_city_id("Madrid")
'madrid'
>>> normalize_city_id("BARCELONA")
'barcelona'
>>> normalize_city_id("Unknown City")
'unknown city'
>>> normalize_city_id(None)
None
"""
if city_name is None:
return None
# Strip whitespace
city_name = city_name.strip()
if not city_name:
logger.warning("Empty city name provided to normalize_city_id")
return None
# Check if we have an explicit mapping
if city_name in CITY_NAME_TO_ID_MAP:
return CITY_NAME_TO_ID_MAP[city_name]
# Fallback: convert to lowercase for consistency
normalized = city_name.lower()
logger.info(
f"City name '{city_name}' not in explicit mapping, using lowercase fallback: '{normalized}'"
)
return normalized
def is_city_supported(city_id: str) -> bool:
"""
Check if a city has school calendars configured.
Currently only Madrid has school calendars in the system.
This function can be updated as more cities are added.
Args:
city_id: Normalized city_id (e.g., "madrid")
Returns:
True if the city has school calendars configured, False otherwise
Examples:
>>> is_city_supported("madrid")
True
>>> is_city_supported("barcelona")
False
"""
# Currently only Madrid has school calendars configured
supported_cities = {"madrid"}
return city_id in supported_cities
def get_supported_cities() -> list[str]:
"""
Get list of city IDs that have school calendars configured.
Returns:
List of supported city_id values
Examples:
>>> get_supported_cities()
['madrid']
"""
return ["madrid"]

385
shared/utils/demo_dates.py Executable file
View File

@@ -0,0 +1,385 @@
"""
Demo Date Utilities for Temporal Determinism
Adjusts dates from seed data to be relative to demo session creation time
"""
from datetime import datetime, timezone, timedelta
from typing import Optional
import pytz
# Fixed base reference date for all demo data
# This is the "day 0" that all seed data is defined relative to
BASE_REFERENCE_DATE = datetime(2025, 1, 15, 6, 0, 0, tzinfo=timezone.utc)
def get_base_reference_date(session_created_at: Optional[datetime] = None) -> datetime:
"""
Get the base reference date for demo data.
If session_created_at is provided, calculate relative to it.
Otherwise, use current time (for backwards compatibility with seed scripts).
Returns:
Base reference date at 6 AM UTC
"""
if session_created_at:
if session_created_at.tzinfo is None:
session_created_at = session_created_at.replace(tzinfo=timezone.utc)
# Reference is session creation time at 6 AM that day
return session_created_at.replace(
hour=6, minute=0, second=0, microsecond=0
)
# Fallback for seed scripts: use today at 6 AM
now = datetime.now(timezone.utc)
return now.replace(hour=6, minute=0, second=0, microsecond=0)
def adjust_date_for_demo(
original_date: Optional[datetime],
session_created_at: datetime
) -> Optional[datetime]:
"""
Adjust a date from seed data to be relative to demo session creation time.
This function calculates the offset between the original date and BASE_REFERENCE_DATE,
then applies that offset to the session creation time.
Example:
# Original seed date: 2025-01-20 06:00 (BASE_REFERENCE + 5 days)
# Demo session created: 2025-12-16 10:00
# Offset: 5 days
# Result: 2025-12-21 10:00 (session + 5 days)
"""
if original_date is None:
return None
# Ensure timezone-aware datetimes
if original_date.tzinfo is None:
original_date = original_date.replace(tzinfo=timezone.utc)
if session_created_at.tzinfo is None:
session_created_at = session_created_at.replace(tzinfo=timezone.utc)
# Calculate offset from base reference
offset = original_date - BASE_REFERENCE_DATE
# Apply offset to session creation date
return session_created_at + offset
def calculate_edge_case_times(session_created_at: datetime) -> dict:
"""
Calculate deterministic edge case times for demo sessions.
These times are designed to always create specific demo scenarios:
- One late delivery (should have arrived hours ago)
- One overdue production batch (should have started hours ago)
- One in-progress batch (started recently)
- One upcoming batch (starts soon)
- One arriving-soon delivery (arrives in a few hours)
Returns:
{
'late_delivery_expected': session - 4h,
'overdue_batch_planned_start': session - 2h,
'in_progress_batch_actual_start': session - 1h45m,
'upcoming_batch_planned_start': session + 1h30m,
'arriving_soon_delivery_expected': session + 2h30m,
'evening_batch_planned_start': today 17:00,
'tomorrow_morning_planned_start': tomorrow 05:00
}
"""
if session_created_at.tzinfo is None:
session_created_at = session_created_at.replace(tzinfo=timezone.utc)
# Calculate today at 6 AM (base reference)
base_reference = get_base_reference_date(session_created_at)
# Calculate tomorrow at 6 AM
tomorrow_base = base_reference + timedelta(days=1)
return {
'late_delivery_expected': session_created_at - timedelta(hours=4),
'overdue_batch_planned_start': session_created_at - timedelta(hours=2),
'in_progress_batch_actual_start': session_created_at - timedelta(hours=1, minutes=45),
'upcoming_batch_planned_start': session_created_at + timedelta(hours=1, minutes=30),
'arriving_soon_delivery_expected': session_created_at + timedelta(hours=2, minutes=30),
'evening_batch_planned_start': base_reference.replace(hour=17, minute=0, second=0, microsecond=0),
'tomorrow_morning_planned_start': tomorrow_base.replace(hour=5, minute=0, second=0, microsecond=0)
}
def ensure_future_time(
target_time: datetime,
reference_time: datetime,
min_hours_ahead: float = 1.0
) -> datetime:
"""
Ensure a target time is in the future relative to reference time.
If target_time is in the past or too close to reference_time,
shift it forward by at least min_hours_ahead.
"""
if target_time.tzinfo is None:
target_time = target_time.replace(tzinfo=timezone.utc)
if reference_time.tzinfo is None:
reference_time = reference_time.replace(tzinfo=timezone.utc)
time_diff = (target_time - reference_time).total_seconds() / 3600
if time_diff < min_hours_ahead:
# Shift forward to ensure minimum hours ahead
return reference_time + timedelta(hours=min_hours_ahead)
return target_time
def resolve_time_marker(
time_marker: str,
session_created_at: datetime,
base_reference_date: datetime = BASE_REFERENCE_DATE
) -> datetime:
"""
Resolve time markers like "BASE_TS + 1h30m" to actual datetimes.
Supports markers in the format:
- "BASE_TS + XhYm" (e.g., "BASE_TS + 1h30m")
- "BASE_TS - XhYm" (e.g., "BASE_TS - 2h")
- "BASE_TS + Xd" (e.g., "BASE_TS + 2d")
- "BASE_TS - Xd" (e.g., "BASE_TS - 1d")
Args:
time_marker: Time marker string to resolve
session_created_at: Demo session creation time
base_reference_date: Base reference date for calculation
Returns:
Resolved datetime adjusted for demo session
Raises:
ValueError: If time_marker format is invalid
Examples:
>>> resolve_time_marker("BASE_TS + 1h30m", session_time)
>>> # Returns session_created_at + 1h30m
>>> resolve_time_marker("BASE_TS - 2h", session_time)
>>> # Returns session_created_at - 2h
"""
if not time_marker or not time_marker.startswith("BASE_TS"):
raise ValueError(f"Invalid time marker format: {time_marker}")
# Extract the offset part
offset_part = time_marker[7:].strip() # Remove "BASE_TS "
if not offset_part:
# Just "BASE_TS" - return session_created_at
return session_created_at
# Handle complex multi-operation markers like "- 30d 6h + 4h 5m"
# Split by operators to handle multiple operations
import re
# Parse all operations in the format: [operator][value]
# Pattern matches: optional whitespace, operator (+/-), number with optional decimal, unit (d/h/m)
pattern = r'\s*([+-])\s*(\d+\.?\d*)\s*([dhm])'
operations = []
# Find all operations in the string
for match in re.finditer(pattern, offset_part):
operator = match.group(1)
value = float(match.group(2))
unit = match.group(3)
operations.append((operator, value, unit))
if not operations:
# Fallback to old simple parsing for backwards compatibility
operator = offset_part[0]
value_part = offset_part[1:].strip()
if operator not in ['+', '-']:
raise ValueError(f"Invalid operator in time marker: {time_marker}")
# Parse time components (supports decimals like 0.5d, 1.25h)
days = 0.0
hours = 0.0
minutes = 0.0
if 'd' in value_part:
# Handle days (supports decimals like 0.5d = 12 hours)
day_part, rest = value_part.split('d', 1)
days = float(day_part)
value_part = rest
if 'h' in value_part:
# Handle hours (supports decimals like 1.5h = 1h30m)
hour_part, rest = value_part.split('h', 1)
hours = float(hour_part)
value_part = rest
if 'm' in value_part:
# Handle minutes (supports decimals like 30.5m)
minute_part = value_part.split('m', 1)[0]
minutes = float(minute_part)
# Calculate offset using float values
offset = timedelta(days=days, hours=hours, minutes=minutes)
if operator == '+':
return session_created_at + offset
else:
return session_created_at - offset
# Process multiple operations
result_time = session_created_at
for operator, value, unit in operations:
if unit == 'd':
offset = timedelta(days=value)
elif unit == 'h':
offset = timedelta(hours=value)
elif unit == 'm':
offset = timedelta(minutes=value)
else:
raise ValueError(f"Invalid time unit '{unit}' in time marker: {time_marker}")
if operator == '+':
result_time = result_time + offset
elif operator == '-':
result_time = result_time - offset
else:
raise ValueError(f"Invalid operator '{operator}' in time marker: {time_marker}")
return result_time
def shift_to_session_time(
original_offset_days: int,
original_hour: int,
original_minute: int,
session_created_at: datetime,
base_reference: Optional[datetime] = None
) -> datetime:
"""
Shift a time from seed data to demo session time with same-day preservation.
Ensures that:
1. Items scheduled for "today" (offset_days=0) remain on the same day as session creation
2. Future items stay in the future, past items stay in the past
3. Times don't shift to invalid moments (e.g., past times for pending items)
Examples:
# Session created at noon, item originally scheduled for morning
>>> session = datetime(2025, 12, 12, 12, 0, tzinfo=timezone.utc)
>>> result = shift_to_session_time(0, 6, 0, session) # Today at 06:00
>>> # Returns today at 13:00 (shifted forward to stay in future)
# Session created at noon, item originally scheduled for evening
>>> result = shift_to_session_time(0, 18, 0, session) # Today at 18:00
>>> # Returns today at 18:00 (already in future)
"""
if session_created_at.tzinfo is None:
session_created_at = session_created_at.replace(tzinfo=timezone.utc)
if base_reference is None:
base_reference = get_base_reference_date(session_created_at)
# Calculate original time
original_time = base_reference.replace(
hour=original_hour,
minute=original_minute,
second=0,
microsecond=0
) + timedelta(days=original_offset_days)
# Calculate offset from base reference
offset = original_time - base_reference
# Apply offset to session creation date
new_time = session_created_at + offset
# Ensure the time is in the future for pending items
if original_offset_days >= 0: # Future or today
new_time = ensure_future_time(new_time, session_created_at, min_hours_ahead=0.5)
return new_time
def get_working_hours_time(
target_date: datetime,
hours_from_start: float = 2.0
) -> datetime:
"""
Get a time within working hours (8 AM - 6 PM) for a given date.
Args:
target_date: The date to calculate time for
hours_from_start: Hours from working day start (8 AM)
Returns:
Datetime within working hours
"""
if target_date.tzinfo is None:
target_date = target_date.replace(tzinfo=timezone.utc)
# Working hours: 8 AM - 6 PM (10 hours)
working_start = target_date.replace(hour=8, minute=0, second=0, microsecond=0)
working_end = target_date.replace(hour=18, minute=0, second=0, microsecond=0)
# Calculate time within working hours
result_time = working_start + timedelta(hours=hours_from_start)
# Ensure it's within working hours
if result_time > working_end:
result_time = working_end
return result_time
def get_next_workday(date: datetime) -> datetime:
"""
Get the next workday (Monday-Friday), skipping weekends.
If date is Friday, returns Monday.
If date is Saturday, returns Monday.
Otherwise returns next day.
"""
if date.tzinfo is None:
date = date.replace(tzinfo=timezone.utc)
next_day = date + timedelta(days=1)
# Skip weekends
while next_day.weekday() >= 5: # 5=Saturday, 6=Sunday
next_day += timedelta(days=1)
return next_day
def get_previous_workday(date: datetime) -> datetime:
"""
Get the previous workday (Monday-Friday), skipping weekends.
If date is Monday, returns Friday.
If date is Sunday, returns Friday.
Otherwise returns previous day.
"""
if date.tzinfo is None:
date = date.replace(tzinfo=timezone.utc)
prev_day = date - timedelta(days=1)
# Skip weekends
while prev_day.weekday() >= 5: # 5=Saturday, 6=Sunday
prev_day -= timedelta(days=1)
return prev_day
def format_iso_with_timezone(dt: datetime) -> str:
"""
Format datetime as ISO 8601 with timezone, replacing Z with +00:00 for compatibility.
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
iso_string = dt.isoformat()
return iso_string.replace('+00:00', 'Z') if iso_string.endswith('+00:00') else iso_string

View File

@@ -0,0 +1,113 @@
"""
Demo ID Transformer Utility
Provides XOR-based ID transformation for creating unique but deterministic
IDs across different demo tenants while maintaining cross-service consistency.
This ensures that:
1. Same base ID + same tenant ID = same transformed ID (deterministic)
2. Different tenant IDs = different transformed IDs (isolation)
3. Cross-service relationships are preserved (consistency)
"""
import uuid
from typing import Union
def transform_id(base_id: Union[str, uuid.UUID], tenant_id: Union[str, uuid.UUID]) -> uuid.UUID:
"""
Transform a base ID using XOR with tenant ID to create unique but deterministic IDs.
Args:
base_id: Original UUID (string or UUID object)
tenant_id: Tenant UUID (string or UUID object)
Returns:
Transformed UUID that is unique to this tenant but deterministic
Example:
>>> base_uuid = UUID('10000000-0000-0000-0000-000000000001')
>>> tenant_uuid = UUID('a1b2c3d4-e5f6-47a8-b9c0-d1e2f3a4b5c6')
>>> transform_id(base_uuid, tenant_uuid)
# Returns deterministic UUID based on XOR of the two
"""
# Convert inputs to UUID objects if they aren't already
if isinstance(base_id, str):
base_uuid = uuid.UUID(base_id)
else:
base_uuid = base_id
if isinstance(tenant_id, str):
tenant_uuid = uuid.UUID(tenant_id)
else:
tenant_uuid = tenant_id
# Convert UUIDs to 16-byte arrays
base_bytes = base_uuid.bytes
tenant_bytes = tenant_uuid.bytes
# Apply XOR transformation
transformed_bytes = bytes(b1 ^ b2 for b1, b2 in zip(base_bytes, tenant_bytes))
# Create new UUID from transformed bytes
transformed_uuid = uuid.UUID(bytes=transformed_bytes)
return transformed_uuid
def generate_deterministic_uuid_from_string(input_string: str, tenant_id: Union[str, uuid.UUID]) -> uuid.UUID:
"""
Generate a deterministic UUID from a string input and tenant ID.
Useful for transforming non-UUID identifiers (like SKUs) into UUIDs
while maintaining determinism across services.
Args:
input_string: String identifier (e.g., SKU, product code)
tenant_id: Tenant UUID for isolation
Returns:
Deterministic UUID based on the input string and tenant
"""
if isinstance(tenant_id, str):
tenant_uuid = uuid.UUID(tenant_id)
else:
tenant_uuid = tenant_id
# Create a combined string for hashing
combined = f"{input_string}-{tenant_uuid}"
# Use SHA-256 hash to create deterministic UUID
import hashlib
hash_obj = hashlib.sha256(combined.encode('utf-8'))
# Use first 16 bytes for UUID v5 namespace
hash_bytes = hash_obj.digest()[:16]
# Create UUID v5 using a standard namespace
namespace_uuid = uuid.NAMESPACE_DNS # Using DNS namespace as base
deterministic_uuid = uuid.uuid5(namespace_uuid, combined)
return deterministic_uuid
# Utility functions for common transformations
def transform_ingredient_id(base_ingredient_id: Union[str, uuid.UUID], tenant_id: Union[str, uuid.UUID]) -> uuid.UUID:
"""Transform an ingredient ID for a specific tenant"""
return transform_id(base_ingredient_id, tenant_id)
def transform_recipe_id(base_recipe_id: Union[str, uuid.UUID], tenant_id: Union[str, uuid.UUID]) -> uuid.UUID:
"""Transform a recipe ID for a specific tenant"""
return transform_id(base_recipe_id, tenant_id)
def transform_supplier_id(base_supplier_id: Union[str, uuid.UUID], tenant_id: Union[str, uuid.UUID]) -> uuid.UUID:
"""Transform a supplier ID for a specific tenant"""
return transform_id(base_supplier_id, tenant_id)
def transform_production_batch_id(base_batch_id: Union[str, uuid.UUID], tenant_id: Union[str, uuid.UUID]) -> uuid.UUID:
"""Transform a production batch ID for a specific tenant"""
return transform_id(base_batch_id, tenant_id)

480
shared/utils/optimization.py Executable file
View File

@@ -0,0 +1,480 @@
"""
Optimization Utilities
Provides optimization algorithms for procurement planning including
MOQ rounding, economic order quantity, and multi-objective optimization.
"""
import math
from decimal import Decimal
from typing import List, Tuple, Dict, Optional
from dataclasses import dataclass
@dataclass
class OrderOptimizationResult:
"""Result of order quantity optimization"""
optimal_quantity: Decimal
order_cost: Decimal
holding_cost: Decimal
total_cost: Decimal
orders_per_year: float
reasoning_data: dict
def calculate_economic_order_quantity(
annual_demand: float,
ordering_cost: float,
holding_cost_per_unit: float
) -> float:
"""
Calculate Economic Order Quantity (EOQ).
EOQ = sqrt((2 × D × S) / H)
where:
- D = Annual demand
- S = Ordering cost per order
- H = Holding cost per unit per year
Args:
annual_demand: Annual demand in units
ordering_cost: Cost per order placement
holding_cost_per_unit: Annual holding cost per unit
Returns:
Optimal order quantity
"""
if annual_demand <= 0 or ordering_cost <= 0 or holding_cost_per_unit <= 0:
return 0.0
eoq = math.sqrt((2 * annual_demand * ordering_cost) / holding_cost_per_unit)
return eoq
def optimize_order_quantity(
required_quantity: Decimal,
annual_demand: float,
ordering_cost: float = 50.0,
holding_cost_rate: float = 0.25,
unit_price: float = 1.0,
min_order_qty: Optional[Decimal] = None,
max_order_qty: Optional[Decimal] = None
) -> OrderOptimizationResult:
"""
Optimize order quantity considering EOQ and constraints.
Args:
required_quantity: Quantity needed for current period
annual_demand: Estimated annual demand
ordering_cost: Fixed cost per order
holding_cost_rate: Annual holding cost as % of unit price
unit_price: Cost per unit
min_order_qty: Minimum order quantity (MOQ)
max_order_qty: Maximum order quantity (storage limit)
Returns:
OrderOptimizationResult with optimal quantity and costs
"""
holding_cost_per_unit = unit_price * holding_cost_rate
# Calculate EOQ
eoq = calculate_economic_order_quantity(
annual_demand,
ordering_cost,
holding_cost_per_unit
)
# Start with EOQ or required quantity, whichever is larger
optimal_qty = max(float(required_quantity), eoq)
# Build structured reasoning data
reasoning_data = {
'type': 'eoq_base',
'parameters': {
'eoq': round(eoq, 2),
'required_quantity': float(required_quantity),
'annual_demand': round(annual_demand, 2),
'ordering_cost': ordering_cost,
'holding_cost_rate': holding_cost_rate
},
'constraints_applied': []
}
# Apply minimum order quantity
if min_order_qty and Decimal(optimal_qty) < min_order_qty:
optimal_qty = float(min_order_qty)
reasoning_data['constraints_applied'].append({
'type': 'moq_applied',
'moq': float(min_order_qty)
})
# Apply maximum order quantity
if max_order_qty and Decimal(optimal_qty) > max_order_qty:
optimal_qty = float(max_order_qty)
reasoning_data['constraints_applied'].append({
'type': 'max_applied',
'max_qty': float(max_order_qty)
})
reasoning_data['parameters']['optimal_quantity'] = round(optimal_qty, 2)
# Calculate costs
orders_per_year = annual_demand / optimal_qty if optimal_qty > 0 else 0
annual_ordering_cost = orders_per_year * ordering_cost
annual_holding_cost = (optimal_qty / 2) * holding_cost_per_unit
total_annual_cost = annual_ordering_cost + annual_holding_cost
return OrderOptimizationResult(
optimal_quantity=Decimal(str(optimal_qty)),
order_cost=Decimal(str(annual_ordering_cost)),
holding_cost=Decimal(str(annual_holding_cost)),
total_cost=Decimal(str(total_annual_cost)),
orders_per_year=orders_per_year,
reasoning_data=reasoning_data
)
def round_to_moq(
quantity: Decimal,
moq: Decimal,
round_up: bool = True
) -> Decimal:
"""
Round quantity to meet minimum order quantity.
Args:
quantity: Desired quantity
moq: Minimum order quantity
round_up: If True, always round up to next MOQ multiple
Returns:
Rounded quantity
"""
if quantity <= 0 or moq <= 0:
return quantity
if quantity < moq:
return moq
# Calculate how many MOQs needed
multiples = quantity / moq
if round_up:
return Decimal(math.ceil(float(multiples))) * moq
else:
return Decimal(round(float(multiples))) * moq
def round_to_package_size(
quantity: Decimal,
package_size: Decimal,
allow_partial: bool = False
) -> Decimal:
"""
Round quantity to package size.
Args:
quantity: Desired quantity
package_size: Size of one package
allow_partial: If False, always round up to full packages
Returns:
Rounded quantity
"""
if quantity <= 0 or package_size <= 0:
return quantity
packages_needed = quantity / package_size
if allow_partial:
return quantity
else:
return Decimal(math.ceil(float(packages_needed))) * package_size
def apply_price_tier_optimization(
base_quantity: Decimal,
unit_price: Decimal,
price_tiers: List[Dict]
) -> Tuple[Decimal, Decimal, dict]:
"""
Optimize quantity to take advantage of price tiers.
Args:
base_quantity: Base quantity needed
unit_price: Current unit price
price_tiers: List of dicts with 'min_quantity' and 'unit_price'
Returns:
Tuple of (optimized_quantity, unit_price, reasoning_data)
"""
if not price_tiers:
return base_quantity, unit_price, {
'type': 'no_tiers',
'parameters': {
'base_quantity': float(base_quantity),
'unit_price': float(unit_price)
}
}
# Sort tiers by min_quantity
sorted_tiers = sorted(price_tiers, key=lambda x: x['min_quantity'])
# Calculate cost at base quantity
base_cost = base_quantity * unit_price
# Find current tier
current_tier_price = unit_price
for tier in sorted_tiers:
if base_quantity >= Decimal(str(tier['min_quantity'])):
current_tier_price = Decimal(str(tier['unit_price']))
# Check if moving to next tier would save money
best_quantity = base_quantity
best_price = current_tier_price
best_savings = Decimal('0')
reasoning_data = {
'type': 'current_tier',
'parameters': {
'base_quantity': float(base_quantity),
'current_tier_price': float(current_tier_price),
'base_cost': float(base_cost)
}
}
for tier in sorted_tiers:
tier_min_qty = Decimal(str(tier['min_quantity']))
tier_price = Decimal(str(tier['unit_price']))
if tier_min_qty > base_quantity:
# Calculate cost at this tier
tier_cost = tier_min_qty * tier_price
# Calculate savings
savings = base_cost - tier_cost
if savings > best_savings:
# Additional quantity needed
additional_qty = tier_min_qty - base_quantity
# Check if savings justify additional inventory
# Simple heuristic: savings should be > 10% of additional cost
additional_cost = additional_qty * tier_price
if savings > additional_cost * Decimal('0.1'):
best_quantity = tier_min_qty
best_price = tier_price
best_savings = savings
reasoning_data = {
'type': 'tier_upgraded',
'parameters': {
'base_quantity': float(base_quantity),
'tier_min_qty': float(tier_min_qty),
'base_price': float(current_tier_price),
'tier_price': float(tier_price),
'savings': round(float(savings), 2),
'additional_qty': float(additional_qty)
}
}
return best_quantity, best_price, reasoning_data
def aggregate_requirements_for_moq(
requirements: List[Dict],
moq: Decimal
) -> List[Dict]:
"""
Aggregate multiple requirements to meet MOQ efficiently.
Args:
requirements: List of requirement dicts with 'quantity' and 'date'
moq: Minimum order quantity
Returns:
List of aggregated orders
"""
if not requirements:
return []
# Sort requirements by date
sorted_reqs = sorted(requirements, key=lambda x: x['date'])
orders = []
current_batch = []
current_total = Decimal('0')
for req in sorted_reqs:
req_qty = Decimal(str(req['quantity']))
# Check if adding this requirement would exceed reasonable aggregation
# (e.g., don't aggregate more than 30 days worth)
if current_batch:
days_span = (req['date'] - current_batch[0]['date']).days
if days_span > 30:
# Finalize current batch
if current_total > 0:
orders.append({
'quantity': round_to_moq(current_total, moq),
'date': current_batch[0]['date'],
'requirements': current_batch.copy()
})
current_batch = []
current_total = Decimal('0')
current_batch.append(req)
current_total += req_qty
# If we've met MOQ, finalize this batch
if current_total >= moq:
orders.append({
'quantity': round_to_moq(current_total, moq),
'date': current_batch[0]['date'],
'requirements': current_batch.copy()
})
current_batch = []
current_total = Decimal('0')
# Handle remaining requirements
if current_batch:
orders.append({
'quantity': round_to_moq(current_total, moq),
'date': current_batch[0]['date'],
'requirements': current_batch
})
return orders
def calculate_order_splitting(
total_quantity: Decimal,
suppliers: List[Dict],
max_supplier_capacity: Optional[Decimal] = None
) -> List[Dict]:
"""
Split large order across multiple suppliers.
Args:
total_quantity: Total quantity needed
suppliers: List of supplier dicts with 'id', 'capacity', 'reliability'
max_supplier_capacity: Maximum any single supplier should provide
Returns:
List of allocations with 'supplier_id' and 'quantity'
"""
if not suppliers:
return []
# Sort suppliers by reliability (descending)
sorted_suppliers = sorted(
suppliers,
key=lambda x: x.get('reliability', 0.5),
reverse=True
)
allocations = []
remaining = total_quantity
for supplier in sorted_suppliers:
if remaining <= 0:
break
supplier_capacity = Decimal(str(supplier.get('capacity', float('inf'))))
# Apply max capacity constraint
if max_supplier_capacity:
supplier_capacity = min(supplier_capacity, max_supplier_capacity)
# Allocate to this supplier
allocated = min(remaining, supplier_capacity)
allocations.append({
'supplier_id': supplier['id'],
'quantity': allocated,
'reliability': supplier.get('reliability', 0.5)
})
remaining -= allocated
# If still remaining, distribute across suppliers
if remaining > 0:
# Distribute remaining proportionally to reliability
total_reliability = sum(s.get('reliability', 0.5) for s in sorted_suppliers)
for i, supplier in enumerate(sorted_suppliers):
if total_reliability > 0:
proportion = supplier.get('reliability', 0.5) / total_reliability
additional = remaining * Decimal(str(proportion))
allocations[i]['quantity'] += additional
return allocations
def calculate_buffer_stock(
lead_time_days: int,
daily_demand: float,
demand_variability: float,
service_level: float = 0.95
) -> Decimal:
"""
Calculate buffer stock based on demand variability.
Buffer Stock = Z × σ × √(lead_time)
where:
- Z = service level z-score
- σ = demand standard deviation
- lead_time = lead time in days
Args:
lead_time_days: Supplier lead time in days
daily_demand: Average daily demand
demand_variability: Coefficient of variation (CV = σ/μ)
service_level: Target service level (0-1)
Returns:
Buffer stock quantity
"""
if lead_time_days <= 0 or daily_demand <= 0:
return Decimal('0')
# Z-scores for common service levels
z_scores = {
0.90: 1.28,
0.95: 1.65,
0.975: 1.96,
0.99: 2.33,
0.995: 2.58
}
# Get z-score for service level
z_score = z_scores.get(service_level, 1.65) # Default to 95%
# Calculate standard deviation
stddev = daily_demand * demand_variability
# Buffer stock formula
buffer = z_score * stddev * math.sqrt(lead_time_days)
return Decimal(str(buffer))
def calculate_reorder_point(
daily_demand: float,
lead_time_days: int,
safety_stock: Decimal
) -> Decimal:
"""
Calculate reorder point.
Reorder Point = (Daily Demand × Lead Time) + Safety Stock
Args:
daily_demand: Average daily demand
lead_time_days: Supplier lead time in days
safety_stock: Safety stock quantity
Returns:
Reorder point
"""
lead_time_demand = Decimal(str(daily_demand * lead_time_days))
return lead_time_demand + safety_stock

64
shared/utils/retry.py Normal file
View File

@@ -0,0 +1,64 @@
"""
Retry utilities for shared use across services
"""
import asyncio
import random
from typing import Callable, Any, Tuple, Type
import logging
logger = logging.getLogger(__name__)
async def retry_with_backoff(
func,
max_retries: int = 3,
exceptions: Tuple[Type[Exception], ...] = (Exception,),
base_delay: float = 1.0,
max_delay: float = 60.0,
backoff_factor: float = 2.0
):
"""
Retry a function with exponential backoff.
Args:
func: The function to retry (can be sync or async)
max_retries: Maximum number of retry attempts
exceptions: Tuple of exception types to catch and retry
base_delay: Initial delay in seconds
max_delay: Maximum delay between retries
backoff_factor: Factor by which delay increases after each retry
Returns:
Result of the function call
Raises:
The original exception if all retries are exhausted
"""
for attempt in range(max_retries + 1): # +1 because first attempt doesn't count as retry
try:
result = func()
# Handle both async functions and lambdas that return coroutines
if asyncio.iscoroutine(result):
result = await result
return result
except exceptions as e:
if attempt == max_retries:
# Exhausted all retries, re-raise the exception
raise e
# Calculate delay with exponential backoff and jitter
delay = min(base_delay * (backoff_factor ** attempt), max_delay)
# Add jitter to prevent thundering herd
delay = delay * (0.5 + random.random() * 0.5)
logger.warning(
f"Attempt {attempt + 1} failed, retrying in {delay:.2f}s: {str(e)}",
extra={
"attempt": attempt + 1,
"max_retries": max_retries,
"exception": str(e)
}
)
await asyncio.sleep(delay)

293
shared/utils/saga_pattern.py Executable file
View File

@@ -0,0 +1,293 @@
"""
Saga Pattern Implementation
Provides distributed transaction coordination with compensation logic
for microservices architecture.
"""
import asyncio
import uuid
from typing import Callable, List, Dict, Any, Optional, Tuple
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
import logging
logger = logging.getLogger(__name__)
class SagaStepStatus(str, Enum):
"""Status of a saga step"""
PENDING = "pending"
IN_PROGRESS = "in_progress"
COMPLETED = "completed"
FAILED = "failed"
COMPENSATING = "compensating"
COMPENSATED = "compensated"
class SagaStatus(str, Enum):
"""Overall saga status"""
PENDING = "pending"
IN_PROGRESS = "in_progress"
COMPLETED = "completed"
FAILED = "failed"
COMPENSATING = "compensating"
COMPENSATED = "compensated"
@dataclass
class SagaStep:
"""
A single step in a saga with compensation logic.
Args:
name: Human-readable step name
action: Async function to execute
compensation: Async function to undo the action
action_args: Arguments for the action function
action_kwargs: Keyword arguments for the action function
"""
name: str
action: Callable
compensation: Optional[Callable] = None
action_args: tuple = field(default_factory=tuple)
action_kwargs: dict = field(default_factory=dict)
# Runtime state
status: SagaStepStatus = SagaStepStatus.PENDING
result: Any = None
error: Optional[Exception] = None
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
@dataclass
class SagaExecution:
"""Tracks execution state of a saga"""
saga_id: str
status: SagaStatus = SagaStatus.PENDING
steps: List[SagaStep] = field(default_factory=list)
current_step: int = 0
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
error: Optional[Exception] = None
class SagaCoordinator:
"""
Coordinates saga execution with automatic compensation on failure.
Example:
```python
saga = SagaCoordinator()
saga.add_step(
"create_order",
action=create_order,
compensation=delete_order,
action_args=(order_data,)
)
saga.add_step(
"reserve_inventory",
action=reserve_inventory,
compensation=release_inventory,
action_args=(order_id, items)
)
result = await saga.execute()
```
"""
def __init__(self, saga_id: Optional[str] = None):
self.execution = SagaExecution(
saga_id=saga_id or str(uuid.uuid4())
)
self._completed_steps: List[SagaStep] = []
def add_step(
self,
name: str,
action: Callable,
compensation: Optional[Callable] = None,
action_args: tuple = (),
action_kwargs: dict = None
):
"""
Add a step to the saga.
Args:
name: Human-readable step name
action: Async function to execute
compensation: Async function to undo the action (optional)
action_args: Arguments for the action function
action_kwargs: Keyword arguments for the action function
"""
step = SagaStep(
name=name,
action=action,
compensation=compensation,
action_args=action_args,
action_kwargs=action_kwargs or {}
)
self.execution.steps.append(step)
logger.debug(f"Added step '{name}' to saga {self.execution.saga_id}")
async def execute(self) -> Tuple[bool, Optional[Any], Optional[Exception]]:
"""
Execute all saga steps in sequence.
Returns:
Tuple of (success: bool, final_result: Any, error: Optional[Exception])
"""
self.execution.status = SagaStatus.IN_PROGRESS
self.execution.started_at = datetime.now()
logger.info(
f"Starting saga {self.execution.saga_id} with {len(self.execution.steps)} steps"
)
try:
# Execute each step
for idx, step in enumerate(self.execution.steps):
self.execution.current_step = idx
success = await self._execute_step(step)
if not success:
# Step failed, trigger compensation
logger.error(
f"Saga {self.execution.saga_id} failed at step '{step.name}': {step.error}"
)
await self._compensate()
self.execution.status = SagaStatus.COMPENSATED
self.execution.completed_at = datetime.now()
self.execution.error = step.error
return False, None, step.error
# Step succeeded
self._completed_steps.append(step)
# All steps completed successfully
self.execution.status = SagaStatus.COMPLETED
self.execution.completed_at = datetime.now()
# Return the result of the last step
final_result = self.execution.steps[-1].result if self.execution.steps else None
logger.info(f"Saga {self.execution.saga_id} completed successfully")
return True, final_result, None
except Exception as e:
logger.exception(f"Unexpected error in saga {self.execution.saga_id}: {e}")
await self._compensate()
self.execution.status = SagaStatus.FAILED
self.execution.completed_at = datetime.now()
self.execution.error = e
return False, None, e
async def _execute_step(self, step: SagaStep) -> bool:
"""
Execute a single saga step.
Returns:
True if step succeeded, False otherwise
"""
step.status = SagaStepStatus.IN_PROGRESS
step.started_at = datetime.now()
logger.info(f"Executing saga step '{step.name}'")
try:
# Execute the action
if asyncio.iscoroutinefunction(step.action):
result = await step.action(*step.action_args, **step.action_kwargs)
else:
result = step.action(*step.action_args, **step.action_kwargs)
step.result = result
step.status = SagaStepStatus.COMPLETED
step.completed_at = datetime.now()
logger.info(f"Saga step '{step.name}' completed successfully")
return True
except Exception as e:
step.error = e
step.status = SagaStepStatus.FAILED
step.completed_at = datetime.now()
logger.error(f"Saga step '{step.name}' failed: {e}")
return False
async def _compensate(self):
"""
Execute compensation logic for all completed steps in reverse order.
"""
if not self._completed_steps:
logger.info(f"No steps to compensate for saga {self.execution.saga_id}")
return
self.execution.status = SagaStatus.COMPENSATING
logger.info(
f"Starting compensation for saga {self.execution.saga_id} "
f"({len(self._completed_steps)} steps to compensate)"
)
# Compensate in reverse order
for step in reversed(self._completed_steps):
if step.compensation is None:
logger.warning(
f"Step '{step.name}' has no compensation function, skipping"
)
continue
step.status = SagaStepStatus.COMPENSATING
try:
logger.info(f"Compensating step '{step.name}'")
# Execute compensation with the result from the original action
compensation_args = (step.result,) if step.result is not None else ()
if asyncio.iscoroutinefunction(step.compensation):
await step.compensation(*compensation_args)
else:
step.compensation(*compensation_args)
step.status = SagaStepStatus.COMPENSATED
logger.info(f"Step '{step.name}' compensated successfully")
except Exception as e:
logger.error(f"Failed to compensate step '{step.name}': {e}")
# Continue compensating other steps even if one fails
logger.info(f"Compensation completed for saga {self.execution.saga_id}")
def get_execution_summary(self) -> Dict[str, Any]:
"""Get summary of saga execution"""
return {
"saga_id": self.execution.saga_id,
"status": self.execution.status.value,
"total_steps": len(self.execution.steps),
"current_step": self.execution.current_step,
"completed_steps": len(self._completed_steps),
"started_at": self.execution.started_at.isoformat() if self.execution.started_at else None,
"completed_at": self.execution.completed_at.isoformat() if self.execution.completed_at else None,
"error": str(self.execution.error) if self.execution.error else None,
"steps": [
{
"name": step.name,
"status": step.status.value,
"has_compensation": step.compensation is not None,
"error": str(step.error) if step.error else None
}
for step in self.execution.steps
]
}

View File

@@ -0,0 +1,45 @@
"""
Seed Data Path Utilities
Provides functions to locate seed data files for demo data creation
"""
from pathlib import Path
def get_seed_data_path(profile: str, filename: str, child_profile: str = None, child_id: str = None) -> Path:
"""
Get the path to a seed data file.
Args:
profile: Demo profile (professional/enterprise)
filename: Seed data filename
child_profile: Not used (kept for API compatibility)
child_id: Optional child tenant ID for enterprise child locations
Returns:
Path to the seed data file
Raises:
FileNotFoundError: If seed data file cannot be found
"""
base_path = Path(__file__).parent.parent / "demo" / "fixtures"
if child_id:
# Enterprise child location: enterprise/children/{child_id}/{filename}
file_path = base_path / profile / "children" / child_id / filename
elif profile == "enterprise":
# Enterprise parent: enterprise/parent/{filename}
file_path = base_path / profile / "parent" / filename
else:
# Professional: professional/{filename}
file_path = base_path / profile / filename
if not file_path.exists():
raise FileNotFoundError(
f"Seed data file not found: {file_path}\n"
f"Profile: {profile}\n"
f"Child ID: {child_id}\n"
f"Filename: {filename}"
)
return file_path

View File

@@ -0,0 +1,360 @@
# shared/utils/tenant_settings_client.py
"""
Tenant Settings Client
Shared utility for services to fetch tenant-specific settings from Tenant Service
Includes Redis caching for performance
"""
import httpx
import json
from typing import Dict, Any, Optional
from uuid import UUID
import redis.asyncio as aioredis
from datetime import timedelta
import logging
logger = logging.getLogger(__name__)
class TenantSettingsClient:
"""
Client for fetching tenant settings from Tenant Service
Features:
- HTTP client to fetch settings from Tenant Service API
- Redis caching with configurable TTL (default 5 minutes)
- Automatic cache invalidation support
- Fallback to defaults if Tenant Service is unavailable
"""
def __init__(
self,
tenant_service_url: str,
redis_client: Optional[aioredis.Redis] = None,
cache_ttl: int = 300, # 5 minutes default
http_timeout: int = 10
):
"""
Initialize TenantSettingsClient
Args:
tenant_service_url: Base URL of Tenant Service (e.g., "http://tenant-service:8000")
redis_client: Optional Redis client for caching
cache_ttl: Cache TTL in seconds (default 300 = 5 minutes)
http_timeout: HTTP request timeout in seconds
"""
self.tenant_service_url = tenant_service_url.rstrip('/')
self.redis = redis_client
self.cache_ttl = cache_ttl
self.http_timeout = http_timeout
# HTTP client with connection pooling
self.http_client = httpx.AsyncClient(
timeout=http_timeout,
limits=httpx.Limits(max_keepalive_connections=20, max_connections=100)
)
async def get_procurement_settings(self, tenant_id: UUID) -> Dict[str, Any]:
"""
Get procurement settings for a tenant
Args:
tenant_id: UUID of the tenant
Returns:
Dictionary with procurement settings
"""
return await self._get_category_settings(tenant_id, "procurement")
async def get_inventory_settings(self, tenant_id: UUID) -> Dict[str, Any]:
"""
Get inventory settings for a tenant
Args:
tenant_id: UUID of the tenant
Returns:
Dictionary with inventory settings
"""
return await self._get_category_settings(tenant_id, "inventory")
async def get_production_settings(self, tenant_id: UUID) -> Dict[str, Any]:
"""
Get production settings for a tenant
Args:
tenant_id: UUID of the tenant
Returns:
Dictionary with production settings
"""
return await self._get_category_settings(tenant_id, "production")
async def get_supplier_settings(self, tenant_id: UUID) -> Dict[str, Any]:
"""
Get supplier settings for a tenant
Args:
tenant_id: UUID of the tenant
Returns:
Dictionary with supplier settings
"""
return await self._get_category_settings(tenant_id, "supplier")
async def get_pos_settings(self, tenant_id: UUID) -> Dict[str, Any]:
"""
Get POS settings for a tenant
Args:
tenant_id: UUID of the tenant
Returns:
Dictionary with POS settings
"""
return await self._get_category_settings(tenant_id, "pos")
async def get_order_settings(self, tenant_id: UUID) -> Dict[str, Any]:
"""
Get order settings for a tenant
Args:
tenant_id: UUID of the tenant
Returns:
Dictionary with order settings
"""
return await self._get_category_settings(tenant_id, "order")
async def get_all_settings(self, tenant_id: UUID) -> Dict[str, Any]:
"""
Get all settings for a tenant
Args:
tenant_id: UUID of the tenant
Returns:
Dictionary with all setting categories
"""
cache_key = f"tenant_settings:{tenant_id}:all"
# Try cache first
if self.redis:
cached = await self._get_from_cache(cache_key)
if cached:
return cached
# Fetch from Tenant Service
try:
url = f"{self.tenant_service_url}/api/v1/tenants/{tenant_id}/settings"
response = await self.http_client.get(url)
response.raise_for_status()
settings = response.json()
# Cache the result
if self.redis:
await self._set_in_cache(cache_key, settings)
return settings
except Exception as e:
logger.error(f"Failed to fetch all settings for tenant {tenant_id}: {e}")
return self._get_default_settings()
async def invalidate_cache(self, tenant_id: UUID, category: Optional[str] = None):
"""
Invalidate cache for a tenant's settings
Args:
tenant_id: UUID of the tenant
category: Optional category to invalidate. If None, invalidates all categories.
"""
if not self.redis:
return
if category:
cache_key = f"tenant_settings:{tenant_id}:{category}"
await self.redis.delete(cache_key)
logger.info(f"Invalidated cache for tenant {tenant_id}, category {category}")
else:
# Invalidate all categories
pattern = f"tenant_settings:{tenant_id}:*"
keys = await self.redis.keys(pattern)
if keys:
await self.redis.delete(*keys)
logger.info(f"Invalidated all cached settings for tenant {tenant_id}")
async def _get_category_settings(self, tenant_id: UUID, category: str) -> Dict[str, Any]:
"""
Internal method to fetch settings for a specific category
Args:
tenant_id: UUID of the tenant
category: Category name
Returns:
Dictionary with category settings
"""
cache_key = f"tenant_settings:{tenant_id}:{category}"
# Try cache first
if self.redis:
cached = await self._get_from_cache(cache_key)
if cached:
return cached
# Fetch from Tenant Service
try:
url = f"{self.tenant_service_url}/api/v1/tenants/{tenant_id}/settings/{category}"
response = await self.http_client.get(url)
response.raise_for_status()
data = response.json()
settings = data.get("settings", {})
# Cache the result
if self.redis:
await self._set_in_cache(cache_key, settings)
return settings
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
logger.warning(f"Settings not found for tenant {tenant_id}, using defaults")
else:
logger.error(f"HTTP error fetching {category} settings for tenant {tenant_id}: {e}")
return self._get_default_category_settings(category)
except Exception as e:
logger.error(f"Failed to fetch {category} settings for tenant {tenant_id}: {e}")
return self._get_default_category_settings(category)
async def _get_from_cache(self, key: str) -> Optional[Dict[str, Any]]:
"""Get value from Redis cache"""
try:
cached = await self.redis.get(key)
if cached:
return json.loads(cached)
except Exception as e:
logger.warning(f"Redis get error for key {key}: {e}")
return None
async def _set_in_cache(self, key: str, value: Dict[str, Any]):
"""Set value in Redis cache with TTL"""
try:
await self.redis.setex(
key,
timedelta(seconds=self.cache_ttl),
json.dumps(value)
)
except Exception as e:
logger.warning(f"Redis set error for key {key}: {e}")
def _get_default_category_settings(self, category: str) -> Dict[str, Any]:
"""Get default settings for a category as fallback"""
defaults = self._get_default_settings()
return defaults.get(f"{category}_settings", {})
def _get_default_settings(self) -> Dict[str, Any]:
"""Get default settings for all categories as fallback"""
return {
"procurement_settings": {
"auto_approve_enabled": True,
"auto_approve_threshold_eur": 500.0,
"auto_approve_min_supplier_score": 0.80,
"require_approval_new_suppliers": True,
"require_approval_critical_items": True,
"procurement_lead_time_days": 3,
"demand_forecast_days": 14,
"safety_stock_percentage": 20.0,
"po_approval_reminder_hours": 24,
"po_critical_escalation_hours": 12
},
"inventory_settings": {
"low_stock_threshold": 10,
"reorder_point": 20,
"reorder_quantity": 50,
"expiring_soon_days": 7,
"expiration_warning_days": 3,
"quality_score_threshold": 8.0,
"temperature_monitoring_enabled": True,
"refrigeration_temp_min": 1.0,
"refrigeration_temp_max": 4.0,
"freezer_temp_min": -20.0,
"freezer_temp_max": -15.0,
"room_temp_min": 18.0,
"room_temp_max": 25.0,
"temp_deviation_alert_minutes": 15,
"critical_temp_deviation_minutes": 5
},
"production_settings": {
"planning_horizon_days": 7,
"minimum_batch_size": 1.0,
"maximum_batch_size": 100.0,
"production_buffer_percentage": 10.0,
"working_hours_per_day": 12,
"max_overtime_hours": 4,
"capacity_utilization_target": 0.85,
"capacity_warning_threshold": 0.95,
"quality_check_enabled": True,
"minimum_yield_percentage": 85.0,
"quality_score_threshold": 8.0,
"schedule_optimization_enabled": True,
"prep_time_buffer_minutes": 30,
"cleanup_time_buffer_minutes": 15,
"labor_cost_per_hour_eur": 15.0,
"overhead_cost_percentage": 20.0
},
"supplier_settings": {
"default_payment_terms_days": 30,
"default_delivery_days": 3,
"excellent_delivery_rate": 95.0,
"good_delivery_rate": 90.0,
"excellent_quality_rate": 98.0,
"good_quality_rate": 95.0,
"critical_delivery_delay_hours": 24,
"critical_quality_rejection_rate": 10.0,
"high_cost_variance_percentage": 15.0
},
"pos_settings": {
"sync_interval_minutes": 5,
"auto_sync_products": True,
"auto_sync_transactions": True
},
"order_settings": {
"max_discount_percentage": 50.0,
"default_delivery_window_hours": 48,
"dynamic_pricing_enabled": False,
"discount_enabled": True,
"delivery_tracking_enabled": True
}
}
async def close(self):
"""Close HTTP client connections"""
await self.http_client.aclose()
# Factory function for easy instantiation
def create_tenant_settings_client(
tenant_service_url: str,
redis_client: Optional[aioredis.Redis] = None,
cache_ttl: int = 300
) -> TenantSettingsClient:
"""
Factory function to create a TenantSettingsClient
Args:
tenant_service_url: Base URL of Tenant Service
redis_client: Optional Redis client for caching
cache_ttl: Cache TTL in seconds
Returns:
TenantSettingsClient instance
"""
return TenantSettingsClient(
tenant_service_url=tenant_service_url,
redis_client=redis_client,
cache_ttl=cache_ttl
)

536
shared/utils/time_series_utils.py Executable file
View File

@@ -0,0 +1,536 @@
"""
Time Series Utilities
Provides utilities for time-series analysis, projection, and calculations
used in forecasting and inventory planning.
"""
import statistics
from datetime import date, datetime, timedelta
from typing import List, Dict, Tuple, Optional
from decimal import Decimal
import math
def generate_date_range(
start_date: date,
end_date: date,
include_end: bool = True
) -> List[date]:
"""
Generate a list of dates between start and end.
Args:
start_date: Start date (inclusive)
end_date: End date
include_end: Whether to include end date
Returns:
List of dates
"""
dates = []
current = start_date
while current < end_date or (include_end and current == end_date):
dates.append(current)
current += timedelta(days=1)
return dates
def generate_future_dates(
start_date: date,
num_days: int
) -> List[date]:
"""
Generate a list of future dates starting from start_date.
Args:
start_date: Starting date
num_days: Number of days to generate
Returns:
List of dates
"""
return [start_date + timedelta(days=i) for i in range(num_days)]
def calculate_moving_average(
values: List[float],
window_size: int
) -> List[float]:
"""
Calculate moving average over a window.
Args:
values: List of values
window_size: Size of moving window
Returns:
List of moving averages
"""
if len(values) < window_size:
return []
moving_averages = []
for i in range(len(values) - window_size + 1):
window = values[i:i + window_size]
moving_averages.append(sum(window) / window_size)
return moving_averages
def calculate_standard_deviation(values: List[float]) -> float:
"""
Calculate standard deviation of values.
Args:
values: List of values
Returns:
Standard deviation
"""
if len(values) < 2:
return 0.0
return statistics.stdev(values)
def calculate_variance(values: List[float]) -> float:
"""
Calculate variance of values.
Args:
values: List of values
Returns:
Variance
"""
if len(values) < 2:
return 0.0
return statistics.variance(values)
def calculate_mean(values: List[float]) -> float:
"""
Calculate mean of values.
Args:
values: List of values
Returns:
Mean
"""
if not values:
return 0.0
return statistics.mean(values)
def calculate_median(values: List[float]) -> float:
"""
Calculate median of values.
Args:
values: List of values
Returns:
Median
"""
if not values:
return 0.0
return statistics.median(values)
def calculate_percentile(values: List[float], percentile: float) -> float:
"""
Calculate percentile of values.
Args:
values: List of values
percentile: Percentile to calculate (0-100)
Returns:
Percentile value
"""
if not values:
return 0.0
sorted_values = sorted(values)
k = (len(sorted_values) - 1) * percentile / 100
f = math.floor(k)
c = math.ceil(k)
if f == c:
return sorted_values[int(k)]
d0 = sorted_values[int(f)] * (c - k)
d1 = sorted_values[int(c)] * (k - f)
return d0 + d1
def calculate_coefficient_of_variation(values: List[float]) -> float:
"""
Calculate coefficient of variation (CV = stddev / mean).
Args:
values: List of values
Returns:
Coefficient of variation
"""
if not values:
return 0.0
mean = calculate_mean(values)
if mean == 0:
return 0.0
stddev = calculate_standard_deviation(values)
return stddev / mean
def aggregate_by_date(
data: List[Tuple[date, float]],
aggregation: str = "sum"
) -> Dict[date, float]:
"""
Aggregate time-series data by date.
Args:
data: List of (date, value) tuples
aggregation: Aggregation method ('sum', 'mean', 'max', 'min')
Returns:
Dictionary mapping date to aggregated value
"""
by_date: Dict[date, List[float]] = {}
for dt, value in data:
if dt not in by_date:
by_date[dt] = []
by_date[dt].append(value)
result = {}
for dt, values in by_date.items():
if aggregation == "sum":
result[dt] = sum(values)
elif aggregation == "mean":
result[dt] = calculate_mean(values)
elif aggregation == "max":
result[dt] = max(values)
elif aggregation == "min":
result[dt] = min(values)
else:
result[dt] = sum(values)
return result
def fill_missing_dates(
data: Dict[date, float],
start_date: date,
end_date: date,
fill_value: float = 0.0
) -> Dict[date, float]:
"""
Fill missing dates in time-series data.
Args:
data: Dictionary mapping date to value
start_date: Start date
end_date: End date
fill_value: Value to use for missing dates
Returns:
Dictionary with all dates filled
"""
date_range = generate_date_range(start_date, end_date)
filled_data = {}
for dt in date_range:
filled_data[dt] = data.get(dt, fill_value)
return filled_data
def calculate_trend(
values: List[float]
) -> Tuple[float, float]:
"""
Calculate linear trend (slope and intercept) using least squares.
Args:
values: List of values
Returns:
Tuple of (slope, intercept)
"""
if len(values) < 2:
return 0.0, values[0] if values else 0.0
n = len(values)
x = list(range(n))
y = values
# Calculate means
x_mean = sum(x) / n
y_mean = sum(y) / n
# Calculate slope
numerator = sum((x[i] - x_mean) * (y[i] - y_mean) for i in range(n))
denominator = sum((x[i] - x_mean) ** 2 for i in range(n))
if denominator == 0:
return 0.0, y_mean
slope = numerator / denominator
intercept = y_mean - slope * x_mean
return slope, intercept
def project_value(
historical_values: List[float],
periods_ahead: int,
method: str = "mean"
) -> List[float]:
"""
Project future values based on historical data.
Args:
historical_values: Historical values
periods_ahead: Number of periods to project
method: Projection method ('mean', 'trend', 'last')
Returns:
List of projected values
"""
if not historical_values:
return [0.0] * periods_ahead
if method == "mean":
# Use historical mean
projected_value = calculate_mean(historical_values)
return [projected_value] * periods_ahead
elif method == "last":
# Use last value
return [historical_values[-1]] * periods_ahead
elif method == "trend":
# Use trend projection
slope, intercept = calculate_trend(historical_values)
n = len(historical_values)
return [slope * (n + i) + intercept for i in range(periods_ahead)]
else:
# Default to mean
projected_value = calculate_mean(historical_values)
return [projected_value] * periods_ahead
def calculate_cumulative_sum(values: List[float]) -> List[float]:
"""
Calculate cumulative sum of values.
Args:
values: List of values
Returns:
List of cumulative sums
"""
cumulative = []
total = 0.0
for value in values:
total += value
cumulative.append(total)
return cumulative
def calculate_rolling_sum(
values: List[float],
window_size: int
) -> List[float]:
"""
Calculate rolling sum over a window.
Args:
values: List of values
window_size: Size of rolling window
Returns:
List of rolling sums
"""
if len(values) < window_size:
return []
rolling_sums = []
for i in range(len(values) - window_size + 1):
window = values[i:i + window_size]
rolling_sums.append(sum(window))
return rolling_sums
def normalize_values(
values: List[float],
method: str = "minmax"
) -> List[float]:
"""
Normalize values to a standard range.
Args:
values: List of values
method: Normalization method ('minmax' or 'zscore')
Returns:
List of normalized values
"""
if not values:
return []
if method == "minmax":
# Scale to [0, 1]
min_val = min(values)
max_val = max(values)
if max_val == min_val:
return [0.5] * len(values)
return [(v - min_val) / (max_val - min_val) for v in values]
elif method == "zscore":
# Z-score normalization
mean = calculate_mean(values)
stddev = calculate_standard_deviation(values)
if stddev == 0:
return [0.0] * len(values)
return [(v - mean) / stddev for v in values]
else:
return values
def detect_outliers(
values: List[float],
method: str = "iqr",
threshold: float = 1.5
) -> List[bool]:
"""
Detect outliers in values.
Args:
values: List of values
method: Detection method ('iqr' or 'zscore')
threshold: Threshold for outlier detection
Returns:
List of booleans indicating outliers
"""
if not values:
return []
if method == "iqr":
# Interquartile range method
q1 = calculate_percentile(values, 25)
q3 = calculate_percentile(values, 75)
iqr = q3 - q1
lower_bound = q1 - threshold * iqr
upper_bound = q3 + threshold * iqr
return [v < lower_bound or v > upper_bound for v in values]
elif method == "zscore":
# Z-score method
mean = calculate_mean(values)
stddev = calculate_standard_deviation(values)
if stddev == 0:
return [False] * len(values)
z_scores = [(v - mean) / stddev for v in values]
return [abs(z) > threshold for z in z_scores]
else:
return [False] * len(values)
def interpolate_missing_values(
values: List[Optional[float]],
method: str = "linear"
) -> List[float]:
"""
Interpolate missing values in a time series.
Args:
values: List of values with possible None values
method: Interpolation method ('linear', 'forward', 'backward')
Returns:
List with interpolated values
"""
if not values:
return []
result = []
if method == "forward":
# Forward fill
last_valid = None
for v in values:
if v is not None:
last_valid = v
result.append(last_valid if last_valid is not None else 0.0)
elif method == "backward":
# Backward fill
next_valid = None
for v in reversed(values):
if v is not None:
next_valid = v
result.insert(0, next_valid if next_valid is not None else 0.0)
else: # linear
# Linear interpolation
result = list(values)
for i in range(len(result)):
if result[i] is None:
# Find previous and next valid values
prev_idx = None
next_idx = None
for j in range(i - 1, -1, -1):
if values[j] is not None:
prev_idx = j
break
for j in range(i + 1, len(values)):
if values[j] is not None:
next_idx = j
break
if prev_idx is not None and next_idx is not None:
# Linear interpolation
x0, y0 = prev_idx, values[prev_idx]
x1, y1 = next_idx, values[next_idx]
result[i] = y0 + (y1 - y0) * (i - x0) / (x1 - x0)
elif prev_idx is not None:
# Forward fill
result[i] = values[prev_idx]
elif next_idx is not None:
# Backward fill
result[i] = values[next_idx]
else:
# No valid values
result[i] = 0.0
return result

67
shared/utils/validation.py Executable file
View File

@@ -0,0 +1,67 @@
"""
Validation utilities for microservices
"""
import re
from typing import Any, Optional
from email_validator import validate_email, EmailNotValidError
def validate_spanish_phone(phone: str) -> bool:
"""Validate Spanish phone number"""
# Spanish phone pattern: +34 followed by 9 digits
pattern = r'^(\+34|0034|34)?[6-9]\d{8}$'
return bool(re.match(pattern, phone.replace(' ', '').replace('-', '')))
def validate_email_address(email: str) -> bool:
"""Validate email address"""
try:
validate_email(email)
return True
except EmailNotValidError:
return False
def validate_tenant_name(name: str) -> bool:
"""Validate tenant name"""
# Must be 2-50 characters, letters, numbers, spaces, hyphens, apostrophes
pattern = r"^[a-zA-ZÀ-ÿ0-9\s\-']{2,50}$"
return bool(re.match(pattern, name))
def validate_address(address: str) -> bool:
"""Validate address"""
# Must be 5-200 characters
return 5 <= len(address.strip()) <= 200
def validate_coordinates(latitude: float, longitude: float) -> bool:
"""Validate Madrid coordinates"""
# Madrid is roughly between these coordinates
madrid_bounds = {
'lat_min': 40.3,
'lat_max': 40.6,
'lon_min': -3.8,
'lon_max': -3.5
}
return (
madrid_bounds['lat_min'] <= latitude <= madrid_bounds['lat_max'] and
madrid_bounds['lon_min'] <= longitude <= madrid_bounds['lon_max']
)
def validate_product_name(name: str) -> bool:
"""Validate product name"""
# Must be 1-50 characters, letters, numbers, spaces
pattern = r"^[a-zA-ZÀ-ÿ0-9\s]{1,50}$"
return bool(re.match(pattern, name))
def validate_positive_number(value: Any) -> bool:
"""Validate positive number"""
try:
return float(value) > 0
except (ValueError, TypeError):
return False
def validate_non_negative_number(value: Any) -> bool:
"""Validate non-negative number"""
try:
return float(value) >= 0
except (ValueError, TypeError):
return False