Initial microservices setup from artifacts
This commit is contained in:
0
services/auth/shared/monitoring/__init__.py
Normal file
0
services/auth/shared/monitoring/__init__.py
Normal file
77
services/auth/shared/monitoring/logging.py
Normal file
77
services/auth/shared/monitoring/logging.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""
|
||||
Centralized logging configuration for microservices
|
||||
"""
|
||||
|
||||
import logging
|
||||
import logging.config
|
||||
import os
|
||||
from typing import Dict, Any
|
||||
|
||||
def setup_logging(service_name: str, log_level: str = "INFO") -> None:
|
||||
"""Set up logging configuration for a microservice"""
|
||||
|
||||
config: Dict[str, Any] = {
|
||||
"version": 1,
|
||||
"disable_existing_loggers": False,
|
||||
"formatters": {
|
||||
"standard": {
|
||||
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
|
||||
},
|
||||
"detailed": {
|
||||
"format": "%(asctime)s [%(levelname)s] %(name)s [%(filename)s:%(lineno)d] %(message)s"
|
||||
},
|
||||
"json": {
|
||||
"()": "pythonjsonlogger.jsonlogger.JsonFormatter",
|
||||
"format": "%(asctime)s %(name)s %(levelname)s %(message)s"
|
||||
}
|
||||
},
|
||||
"handlers": {
|
||||
"console": {
|
||||
"class": "logging.StreamHandler",
|
||||
"level": log_level,
|
||||
"formatter": "standard",
|
||||
"stream": "ext://sys.stdout"
|
||||
},
|
||||
"file": {
|
||||
"class": "logging.FileHandler",
|
||||
"level": log_level,
|
||||
"formatter": "detailed",
|
||||
"filename": f"/var/log/{service_name}.log",
|
||||
"mode": "a"
|
||||
},
|
||||
"logstash": {
|
||||
"class": "logstash.TCPLogstashHandler",
|
||||
"host": os.getenv("LOGSTASH_HOST", "localhost"),
|
||||
"port": int(os.getenv("LOGSTASH_PORT", "5000")),
|
||||
"version": 1,
|
||||
"message_type": "logstash",
|
||||
"fqdn": False,
|
||||
"tags": [service_name]
|
||||
}
|
||||
},
|
||||
"loggers": {
|
||||
"": {
|
||||
"handlers": ["console", "file"],
|
||||
"level": log_level,
|
||||
"propagate": False
|
||||
},
|
||||
"uvicorn": {
|
||||
"handlers": ["console"],
|
||||
"level": log_level,
|
||||
"propagate": False
|
||||
},
|
||||
"uvicorn.access": {
|
||||
"handlers": ["console"],
|
||||
"level": log_level,
|
||||
"propagate": False
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Add logstash handler if in production
|
||||
if os.getenv("ENVIRONMENT") == "production":
|
||||
config["loggers"][""]["handlers"].append("logstash")
|
||||
|
||||
logging.config.dictConfig(config)
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.info(f"Logging configured for {service_name}")
|
||||
112
services/auth/shared/monitoring/metrics.py
Normal file
112
services/auth/shared/monitoring/metrics.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""
|
||||
Metrics collection for microservices
|
||||
"""
|
||||
|
||||
import time
|
||||
import logging
|
||||
from typing import Dict, Any
|
||||
from prometheus_client import Counter, Histogram, Gauge, start_http_server
|
||||
from functools import wraps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Prometheus metrics
|
||||
REQUEST_COUNT = Counter(
|
||||
'http_requests_total',
|
||||
'Total HTTP requests',
|
||||
['method', 'endpoint', 'status_code', 'service']
|
||||
)
|
||||
|
||||
REQUEST_DURATION = Histogram(
|
||||
'http_request_duration_seconds',
|
||||
'HTTP request duration in seconds',
|
||||
['method', 'endpoint', 'service']
|
||||
)
|
||||
|
||||
ACTIVE_CONNECTIONS = Gauge(
|
||||
'active_connections',
|
||||
'Active database connections',
|
||||
['service']
|
||||
)
|
||||
|
||||
TRAINING_JOBS = Counter(
|
||||
'training_jobs_total',
|
||||
'Total training jobs',
|
||||
['status', 'service']
|
||||
)
|
||||
|
||||
FORECASTS_GENERATED = Counter(
|
||||
'forecasts_generated_total',
|
||||
'Total forecasts generated',
|
||||
['service']
|
||||
)
|
||||
|
||||
class MetricsCollector:
|
||||
"""Metrics collector for microservices"""
|
||||
|
||||
def __init__(self, service_name: str):
|
||||
self.service_name = service_name
|
||||
self.start_time = time.time()
|
||||
|
||||
def start_metrics_server(self, port: int = 8080):
|
||||
"""Start Prometheus metrics server"""
|
||||
try:
|
||||
start_http_server(port)
|
||||
logger.info(f"Metrics server started on port {port}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to start metrics server: {e}")
|
||||
|
||||
def record_request(self, method: str, endpoint: str, status_code: int, duration: float):
|
||||
"""Record HTTP request metrics"""
|
||||
REQUEST_COUNT.labels(
|
||||
method=method,
|
||||
endpoint=endpoint,
|
||||
status_code=status_code,
|
||||
service=self.service_name
|
||||
).inc()
|
||||
|
||||
REQUEST_DURATION.labels(
|
||||
method=method,
|
||||
endpoint=endpoint,
|
||||
service=self.service_name
|
||||
).observe(duration)
|
||||
|
||||
def record_training_job(self, status: str):
|
||||
"""Record training job metrics"""
|
||||
TRAINING_JOBS.labels(
|
||||
status=status,
|
||||
service=self.service_name
|
||||
).inc()
|
||||
|
||||
def record_forecast_generated(self):
|
||||
"""Record forecast generation metrics"""
|
||||
FORECASTS_GENERATED.labels(
|
||||
service=self.service_name
|
||||
).inc()
|
||||
|
||||
def set_active_connections(self, count: int):
|
||||
"""Set active database connections"""
|
||||
ACTIVE_CONNECTIONS.labels(
|
||||
service=self.service_name
|
||||
).set(count)
|
||||
|
||||
def metrics_middleware(metrics_collector: MetricsCollector):
|
||||
"""Middleware to collect metrics"""
|
||||
|
||||
def middleware(request, call_next):
|
||||
start_time = time.time()
|
||||
|
||||
response = call_next(request)
|
||||
|
||||
duration = time.time() - start_time
|
||||
|
||||
metrics_collector.record_request(
|
||||
method=request.method,
|
||||
endpoint=request.url.path,
|
||||
status_code=response.status_code,
|
||||
duration=duration
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
return middleware
|
||||
Reference in New Issue
Block a user