Fix auth module
This commit is contained in:
@@ -117,7 +117,7 @@ async def metrics_middleware(request: Request, call_next):
|
|||||||
|
|
||||||
# Record metrics
|
# Record metrics
|
||||||
duration = time.time() - start_time
|
duration = time.time() - start_time
|
||||||
metrics.record_histogram("auth_request_duration_seconds", duration)
|
metrics.observe_histogram("auth_request_duration_seconds", duration)
|
||||||
metrics.increment_counter("auth_requests_total")
|
metrics.increment_counter("auth_requests_total")
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|||||||
@@ -1,9 +1,25 @@
|
|||||||
|
# app/services/messaging.py
|
||||||
"""
|
"""
|
||||||
Messaging service for auth service
|
Messaging service for auth service
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from shared.messaging.rabbitmq import RabbitMQClient
|
from shared.messaging.rabbitmq import RabbitMQClient
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Global message publisher
|
# Global message publisher
|
||||||
message_publisher = RabbitMQClient(settings.RABBITMQ_URL)
|
message_publisher = RabbitMQClient(settings.RABBITMQ_URL)
|
||||||
|
|
||||||
|
async def setup_messaging():
|
||||||
|
"""Establishes connection to RabbitMQ for the message publisher."""
|
||||||
|
logger.info("Attempting to connect to RabbitMQ...")
|
||||||
|
await message_publisher.connect()
|
||||||
|
logger.info("RabbitMQ connection established.")
|
||||||
|
|
||||||
|
async def cleanup_messaging():
|
||||||
|
"""Closes the connection to RabbitMQ for the message publisher."""
|
||||||
|
logger.info("Attempting to disconnect from RabbitMQ...")
|
||||||
|
await message_publisher.disconnect()
|
||||||
|
logger.info("RabbitMQ connection closed.")
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
# ================================================================
|
# ================================================================
|
||||||
# services/auth/docker-compose.yml (For standalone testing)
|
# services/auth/docker-compose.yml (For standalone testing)
|
||||||
# ================================================================
|
# ================================================================
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
auth-db:
|
auth-db:
|
||||||
|
|||||||
@@ -0,0 +1,84 @@
|
|||||||
|
# ================================================================
|
||||||
|
# services/auth/migrations/alembic.ini
|
||||||
|
# ================================================================
|
||||||
|
[alembic]
|
||||||
|
# path to migration scripts
|
||||||
|
script_location = migrations
|
||||||
|
|
||||||
|
# template used to generate migration file names
|
||||||
|
file_template = %%(year)d%%(month).2d%%(day).2d_%%(hour).2d%%(minute).2d_%%(rev)s_%%(slug)s
|
||||||
|
|
||||||
|
# sys.path path, will be prepended to sys.path if present.
|
||||||
|
prepend_sys_path = .
|
||||||
|
|
||||||
|
# timezone to use when rendering the date within the migration file
|
||||||
|
# as well as the filename.
|
||||||
|
timezone = Europe/Madrid
|
||||||
|
|
||||||
|
# max length of characters to apply to the
|
||||||
|
# "slug" field
|
||||||
|
truncate_slug_length = 40
|
||||||
|
|
||||||
|
# set to 'true' to run the environment during
|
||||||
|
# the 'revision' command, regardless of autogenerate
|
||||||
|
revision_environment = false
|
||||||
|
|
||||||
|
# set to 'true' to allow .pyc and .pyo files without
|
||||||
|
# a source .py file to be detected as revisions in the
|
||||||
|
# versions/ directory
|
||||||
|
sourceless = false
|
||||||
|
|
||||||
|
# version of a migration file's filename format
|
||||||
|
version_num_format = %s
|
||||||
|
|
||||||
|
# version path separator
|
||||||
|
version_path_separator = os
|
||||||
|
|
||||||
|
# set to 'true' to search source files recursively
|
||||||
|
# in each "version_locations" directory
|
||||||
|
# new in Alembic version 1.10
|
||||||
|
recursive_version_locations = false
|
||||||
|
|
||||||
|
# the output encoding used when revision files
|
||||||
|
# are written from script.py.mako
|
||||||
|
output_encoding = utf-8
|
||||||
|
|
||||||
|
sqlalchemy.url = postgresql+asyncpg://auth_user:auth_pass123@auth-db:5432/auth_db
|
||||||
|
|
||||||
|
[post_write_hooks]
|
||||||
|
# post_write_hooks defines scripts or Python functions that are run
|
||||||
|
# on newly generated revision scripts.
|
||||||
|
|
||||||
|
[loggers]
|
||||||
|
keys = root,sqlalchemy,alembic
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys = console
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
keys = generic
|
||||||
|
|
||||||
|
[logger_root]
|
||||||
|
level = WARN
|
||||||
|
handlers = console
|
||||||
|
qualname =
|
||||||
|
|
||||||
|
[logger_sqlalchemy]
|
||||||
|
level = WARN
|
||||||
|
handlers =
|
||||||
|
qualname = sqlalchemy.engine
|
||||||
|
|
||||||
|
[logger_alembic]
|
||||||
|
level = INFO
|
||||||
|
handlers =
|
||||||
|
qualname = alembic
|
||||||
|
|
||||||
|
[handler_console]
|
||||||
|
class = StreamHandler
|
||||||
|
args = (sys.stderr,)
|
||||||
|
level = NOTSET
|
||||||
|
formatter = generic
|
||||||
|
|
||||||
|
[formatter_generic]
|
||||||
|
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||||
|
datefmt = %H:%M:%S
|
||||||
@@ -1,12 +1,14 @@
|
|||||||
|
# shared/monitoring/metrics.py
|
||||||
"""
|
"""
|
||||||
Metrics collection for microservices
|
Metrics collection for microservices
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
from typing import Dict, Any
|
from typing import Dict, Any, List # Added List import
|
||||||
from prometheus_client import Counter, Histogram, Gauge, start_http_server
|
from prometheus_client import Counter, Histogram, Gauge, start_http_server
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
|
from prometheus_client import generate_latest # Moved this import here for consistency
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -47,6 +49,9 @@ class MetricsCollector:
|
|||||||
def __init__(self, service_name: str):
|
def __init__(self, service_name: str):
|
||||||
self.service_name = service_name
|
self.service_name = service_name
|
||||||
self.start_time = time.time()
|
self.start_time = time.time()
|
||||||
|
# Initialize dictionaries to hold custom counters and histograms
|
||||||
|
self._counters: Dict[str, Counter] = {}
|
||||||
|
self._histograms: Dict[str, Histogram] = {}
|
||||||
|
|
||||||
def start_metrics_server(self, port: int = 8080):
|
def start_metrics_server(self, port: int = 8080):
|
||||||
"""Start Prometheus metrics server"""
|
"""Start Prometheus metrics server"""
|
||||||
@@ -90,16 +95,77 @@ class MetricsCollector:
|
|||||||
service=self.service_name
|
service=self.service_name
|
||||||
).set(count)
|
).set(count)
|
||||||
|
|
||||||
|
def register_counter(self, name: str, documentation: str, labels: List[str] = None):
|
||||||
|
"""Register a custom Counter metric."""
|
||||||
|
if name not in self._counters:
|
||||||
|
if labels is None:
|
||||||
|
labels = ['service']
|
||||||
|
elif 'service' not in labels:
|
||||||
|
labels.append('service')
|
||||||
|
# Pass labelnames as a keyword argument
|
||||||
|
self._counters[name] = Counter(name, documentation, labelnames=labels)
|
||||||
|
logger.info(f"Registered counter: {name}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"Counter '{name}' already registered.")
|
||||||
|
return self._counters[name] # Return the counter for direct use if needed
|
||||||
|
|
||||||
|
def increment_counter(self, name: str, value: int = 1, labels: Dict[str, str] = None):
|
||||||
|
"""Increment a custom Counter metric."""
|
||||||
|
if name not in self._counters:
|
||||||
|
logger.error(f"Counter '{name}' not registered. Cannot increment.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Ensure the 'service' label is always present
|
||||||
|
if labels is None:
|
||||||
|
labels = {'service': self.service_name}
|
||||||
|
elif 'service' not in labels:
|
||||||
|
labels['service'] = self.service_name
|
||||||
|
|
||||||
|
self._counters[name].labels(**labels).inc(value)
|
||||||
|
|
||||||
|
def register_histogram(self, name: str, documentation: str, labels: List[str] = None, buckets: tuple = Histogram.DEFAULT_BUCKETS):
|
||||||
|
"""Register a custom Histogram metric."""
|
||||||
|
if name not in self._histograms:
|
||||||
|
if labels is None:
|
||||||
|
labels = ['service']
|
||||||
|
elif 'service' not in labels:
|
||||||
|
labels.append('service')
|
||||||
|
# Pass labelnames and buckets as keyword arguments
|
||||||
|
self._histograms[name] = Histogram(name, documentation, labelnames=labels, buckets=buckets)
|
||||||
|
logger.info(f"Registered histogram: {name}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"Histogram '{name}' already registered.")
|
||||||
|
return self._histograms[name] # Return the histogram for direct use if needed
|
||||||
|
|
||||||
|
def observe_histogram(self, name: str, value: float, labels: Dict[str, str] = None):
|
||||||
|
"""Observe a custom Histogram metric."""
|
||||||
|
if name not in self._histograms:
|
||||||
|
logger.error(f"Histogram '{name}' not registered. Cannot observe.")
|
||||||
|
return
|
||||||
|
|
||||||
|
if labels is None:
|
||||||
|
labels = {'service': self.service_name}
|
||||||
|
elif 'service' not in labels:
|
||||||
|
labels['service'] = self.service_name
|
||||||
|
|
||||||
|
self._histograms[name].labels(**labels).observe(value)
|
||||||
|
|
||||||
|
def get_metrics(self) -> str:
|
||||||
|
"""Return Prometheus metrics in exposition format."""
|
||||||
|
return generate_latest().decode('utf-8')
|
||||||
|
|
||||||
|
|
||||||
def metrics_middleware(metrics_collector: MetricsCollector):
|
def metrics_middleware(metrics_collector: MetricsCollector):
|
||||||
"""Middleware to collect metrics"""
|
"""Middleware to collect metrics"""
|
||||||
|
|
||||||
def middleware(request, call_next):
|
async def middleware(request, call_next):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
response = call_next(request)
|
response = await call_next(request)
|
||||||
|
|
||||||
duration = time.time() - start_time
|
duration = time.time() - start_time
|
||||||
|
|
||||||
|
# Use the specific record_request for HTTP requests
|
||||||
metrics_collector.record_request(
|
metrics_collector.record_request(
|
||||||
method=request.method,
|
method=request.method,
|
||||||
endpoint=request.url.path,
|
endpoint=request.url.path,
|
||||||
|
|||||||
Reference in New Issue
Block a user