Fix shared issues

This commit is contained in:
Urtzi Alfaro
2025-07-18 12:34:28 +02:00
parent 592a810762
commit e989e3b362
9 changed files with 913 additions and 386 deletions

View File

@@ -1,3 +1,6 @@
# ================================================================
# shared/monitoring/logging.py
# ================================================================
"""
Centralized logging configuration for microservices
"""
@@ -5,53 +8,109 @@ Centralized logging configuration for microservices
import logging
import logging.config
import os
import sys
from typing import Dict, Any
def setup_logging(service_name: str, log_level: str = "INFO") -> None:
"""Set up logging configuration for a microservice"""
def setup_logging(service_name: str, log_level: str = "INFO",
enable_json: bool = False, enable_file: bool = True) -> None:
"""
Set up logging configuration for a microservice with improved error handling.
config: Dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
},
"detailed": {
"format": "%(asctime)s [%(levelname)s] %(name)s [%(filename)s:%(lineno)d] %(message)s"
},
"json": {
"()": "pythonjsonlogger.jsonlogger.JsonFormatter",
"format": "%(asctime)s %(name)s %(levelname)s %(message)s"
}
Args:
service_name: Name of the service for log identification
log_level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
enable_json: Whether to use JSON formatting
enable_file: Whether to enable file logging
"""
# Create logs directory if it doesn't exist and file logging is enabled
log_dir = "/var/log"
if enable_file:
try:
os.makedirs(log_dir, exist_ok=True)
except PermissionError:
# Fallback to local directory if can't write to /var/log
log_dir = "./logs"
os.makedirs(log_dir, exist_ok=True)
print(f"Warning: Could not write to /var/log, using {log_dir}")
# Define formatters
formatters = {
"standard": {
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": log_level,
"formatter": "standard",
"stream": "ext://sys.stdout"
},
"file": {
"class": "logging.FileHandler",
"level": log_level,
"formatter": "detailed",
"filename": f"/var/log/{service_name}.log",
"mode": "a"
},
"logstash": {
"detailed": {
"format": "%(asctime)s [%(levelname)s] %(name)s [%(filename)s:%(lineno)d] %(funcName)s(): %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
}
}
# Add JSON formatter if requested and available
if enable_json:
try:
import pythonjsonlogger.jsonlogger
formatters["json"] = {
"()": "pythonjsonlogger.jsonlogger.JsonFormatter",
"format": "%(asctime)s %(name)s %(levelname)s %(message)s %(filename)s %(lineno)d"
}
except ImportError:
print("Warning: pythonjsonlogger not available, falling back to standard formatting")
enable_json = False
# Define handlers
handlers = {
"console": {
"class": "logging.StreamHandler",
"level": log_level,
"formatter": "json" if enable_json else "standard",
"stream": "ext://sys.stdout"
}
}
# Add file handler if enabled
if enable_file:
handlers["file"] = {
"class": "logging.FileHandler",
"level": log_level,
"formatter": "detailed",
"filename": f"{log_dir}/{service_name}.log",
"mode": "a",
"encoding": "utf-8"
}
# Add logstash handler if in production
logstash_host = os.getenv("LOGSTASH_HOST")
if logstash_host and os.getenv("ENVIRONMENT") == "production":
try:
handlers["logstash"] = {
"class": "logstash.TCPLogstashHandler",
"host": os.getenv("LOGSTASH_HOST", "localhost"),
"host": logstash_host,
"port": int(os.getenv("LOGSTASH_PORT", "5000")),
"version": 1,
"message_type": "logstash",
"fqdn": False,
"tags": [service_name]
}
},
except Exception as e:
print(f"Warning: Could not setup logstash handler: {e}")
# Define root logger configuration
root_handlers = ["console"]
if enable_file:
root_handlers.append("file")
if "logstash" in handlers:
root_handlers.append("logstash")
# Complete logging configuration
config: Dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": formatters,
"handlers": handlers,
"loggers": {
"": {
"handlers": ["console", "file"],
"": { # Root logger
"handlers": root_handlers,
"level": log_level,
"propagate": False
},
@@ -64,14 +123,32 @@ def setup_logging(service_name: str, log_level: str = "INFO") -> None:
"handlers": ["console"],
"level": log_level,
"propagate": False
},
"sqlalchemy": {
"handlers": ["console"],
"level": "WARNING", # Reduce SQL logging noise
"propagate": False
},
"httpx": {
"handlers": ["console"],
"level": "WARNING", # Reduce HTTP client logging
"propagate": False
}
}
}
# Add logstash handler if in production
if os.getenv("ENVIRONMENT") == "production":
config["loggers"][""]["handlers"].append("logstash")
logging.config.dictConfig(config)
logger = logging.getLogger(__name__)
logger.info(f"Logging configured for {service_name}")
try:
logging.config.dictConfig(config)
logger = logging.getLogger(__name__)
logger.info(f"Logging configured for {service_name} at level {log_level}")
except Exception as e:
# Fallback to basic logging if configuration fails
logging.basicConfig(
level=getattr(logging, log_level.upper()),
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
handlers=[logging.StreamHandler(sys.stdout)]
)
logger = logging.getLogger(__name__)
logger.error(f"Failed to configure advanced logging for {service_name}: {e}")
logger.info(f"Using basic logging configuration for {service_name}")