- apps/web: test updates (Vitest/setup), playbackAnalyticsService, TrackGrid, serviceErrorHandler - veza-common: logging, metrics, traits, validation, random - veza-stream-server: audio pipeline, codecs, cache, monitoring, routes - apps/web/dist_verification: refresh build assets (content-hashed filenames) Co-authored-by: Cursor <cursoragent@cursor.com>
582 lines
18 KiB
Rust
582 lines
18 KiB
Rust
//! Logging utilities for Veza Rust services
|
|
//!
|
|
//! This module provides centralized logging configuration and utilities.
|
|
//! FIX #14: Support rotation des logs avec tracing-appender
|
|
|
|
use tracing::Level;
|
|
use tracing_subscriber::{
|
|
fmt::{self, format::FmtSpan},
|
|
layer::SubscriberExt,
|
|
util::SubscriberInitExt,
|
|
filter::LevelFilter,
|
|
EnvFilter, Registry, Layer,
|
|
};
|
|
use tracing_appender::rolling::{RollingFileAppender, Rotation};
|
|
|
|
use crate::error::server::{VezaError, VezaResult};
|
|
use std::path::PathBuf;
|
|
|
|
/// FIX #24: Normaliser le niveau de log depuis LOG_LEVEL ou RUST_LOG
|
|
/// Convertit les niveaux Go (INFO, DEBUG, WARN, ERROR) vers les niveaux Rust (info, debug, warn, error)
|
|
fn normalize_log_level() -> String {
|
|
// FIX #24: Lire LOG_LEVEL en priorité (standardisé), fallback sur RUST_LOG
|
|
let log_level = std::env::var("LOG_LEVEL")
|
|
.or_else(|_| std::env::var("RUST_LOG"))
|
|
.unwrap_or_else(|_| "INFO".to_string());
|
|
|
|
// Normaliser les niveaux Go vers Rust (case-insensitive)
|
|
let log_level_upper = log_level.to_uppercase();
|
|
let normalized = match log_level_upper.as_str() {
|
|
"DEBUG" => "debug".to_string(),
|
|
"INFO" => "info".to_string(),
|
|
"WARN" => "warn".to_string(),
|
|
"ERROR" => "error".to_string(),
|
|
"TRACE" => "trace".to_string(),
|
|
// Si déjà en format Rust, utiliser tel quel
|
|
_ => log_level.to_lowercase(),
|
|
};
|
|
|
|
normalized
|
|
}
|
|
|
|
/// Initialize logging for Veza services
|
|
pub fn init() -> VezaResult<()> {
|
|
// FIX #24: Utiliser LOG_LEVEL standardisé
|
|
let log_level = normalize_log_level();
|
|
std::env::set_var("RUST_LOG", &log_level);
|
|
|
|
// Create environment filter
|
|
let env_filter = EnvFilter::try_from_default_env()
|
|
.unwrap_or_else(|_| EnvFilter::new(&log_level));
|
|
|
|
// Create formatting layer
|
|
let fmt_layer = fmt::layer()
|
|
.with_target(true)
|
|
.with_thread_ids(true)
|
|
.with_thread_names(true)
|
|
.with_span_events(FmtSpan::CLOSE)
|
|
.with_file(true)
|
|
.with_line_number(true)
|
|
.with_ansi(true);
|
|
|
|
// Initialize the subscriber
|
|
Registry::default()
|
|
.with(env_filter)
|
|
.with(fmt_layer)
|
|
.init();
|
|
|
|
tracing::info!("Logging initialized");
|
|
Ok(())
|
|
}
|
|
|
|
/// Initialize logging with custom configuration
|
|
/// FIX #14: Support rotation des logs si config.file est défini
|
|
/// Note: Le WorkerGuard doit être gardé en vie par l'appelant pour maintenir la rotation active
|
|
/// Pour l'instant, on utilise une variable statique leakée (acceptable car le guard doit vivre toute la durée de vie de l'app)
|
|
pub fn init_with_config(config: LoggingConfig) -> VezaResult<()> {
|
|
// FIX #24: Normaliser le niveau de log depuis la config ou LOG_LEVEL
|
|
let log_level = if !config.level.is_empty() && config.level != "info" {
|
|
// Si un niveau est fourni dans la config, normaliser les niveaux Go vers Rust
|
|
match config.level.to_uppercase().as_str() {
|
|
"DEBUG" => "debug".to_string(),
|
|
"INFO" => "info".to_string(),
|
|
"WARN" => "warn".to_string(),
|
|
"ERROR" => "error".to_string(),
|
|
"TRACE" => "trace".to_string(),
|
|
_ => config.level.to_lowercase(),
|
|
}
|
|
} else {
|
|
// Si config.level est vide ou "info" (valeur par défaut), utiliser LOG_LEVEL ou RUST_LOG
|
|
normalize_log_level()
|
|
};
|
|
|
|
std::env::set_var("RUST_LOG", &log_level);
|
|
|
|
// Create environment filter
|
|
let env_filter = EnvFilter::try_from_default_env()
|
|
.unwrap_or_else(|_| EnvFilter::new(&log_level));
|
|
|
|
// FIX #14: Configurer la rotation des logs si un fichier est spécifié
|
|
// Créer deux fichiers : module.log (tous les logs) et module-error.log (erreurs uniquement)
|
|
let (file_writer, error_file_writer, guard) = if let Some(file_path) = &config.file {
|
|
let path = PathBuf::from(file_path);
|
|
let log_dir = path.parent()
|
|
.ok_or_else(|| VezaError::Config("Invalid log file path".to_string()))?;
|
|
let file_prefix = path.file_stem()
|
|
.and_then(|s| s.to_str())
|
|
.ok_or_else(|| VezaError::Config("Invalid log file name".to_string()))?;
|
|
|
|
// Déterminer la rotation selon max_size
|
|
let rotation = if config.max_size > 100 * 1024 * 1024 {
|
|
// > 100MB -> rotation quotidienne
|
|
Rotation::DAILY
|
|
} else {
|
|
// <= 100MB -> rotation horaire
|
|
Rotation::HOURLY
|
|
};
|
|
|
|
// Fichier pour tous les logs
|
|
let file_appender = RollingFileAppender::new(rotation.clone(), log_dir, file_prefix);
|
|
let (non_blocking, worker_guard) = tracing_appender::non_blocking(file_appender);
|
|
|
|
// Fichier pour les erreurs uniquement
|
|
let error_file_prefix = format!("{}-error", file_prefix);
|
|
let error_file_appender = RollingFileAppender::new(rotation, log_dir, &error_file_prefix);
|
|
let (error_non_blocking, error_worker_guard) = tracing_appender::non_blocking(error_file_appender);
|
|
|
|
// FIX #14: Le WorkerGuard doit être gardé en vie pour maintenir la rotation active
|
|
// On utilise Box::leak pour le garder en vie pendant toute la durée de vie de l'application
|
|
Box::leak(Box::new(worker_guard));
|
|
Box::leak(Box::new(error_worker_guard));
|
|
|
|
(Some(non_blocking), Some(error_non_blocking), true)
|
|
} else {
|
|
(None, None, false)
|
|
};
|
|
|
|
// FIX #25: Standardiser sur JSON en production/staging, texte en développement
|
|
// Create formatting layer based on format
|
|
let base_fmt_layer = match config.format.as_str() {
|
|
"json" => {
|
|
// Use JSON formatter with json feature enabled
|
|
fmt::layer()
|
|
.with_target(true)
|
|
.with_thread_ids(true)
|
|
.with_thread_names(true)
|
|
.with_span_events(FmtSpan::CLOSE)
|
|
.with_file(true)
|
|
.with_line_number(true)
|
|
},
|
|
"text" => {
|
|
fmt::layer()
|
|
.with_target(true)
|
|
.with_thread_ids(true)
|
|
.with_thread_names(true)
|
|
.with_span_events(FmtSpan::CLOSE)
|
|
.with_file(true)
|
|
.with_line_number(true)
|
|
.with_ansi(true)
|
|
},
|
|
_ => return Err(VezaError::Config(format!("Invalid log format: {}", config.format))),
|
|
};
|
|
|
|
// FIX #14: Ajouter des layers pour les fichiers si rotation configurée
|
|
// Construire le registry de manière conditionnelle pour éviter les problèmes de types
|
|
// Initialiser directement dans chaque branche car chaque .with() crée un nouveau type
|
|
// Note: file_writer et error_file_writer sont déplacés dans chaque branche
|
|
match (file_writer, error_file_writer) {
|
|
(Some(writer), Some(error_writer)) => {
|
|
// Les deux fichiers sont configurés
|
|
let file_layer = fmt::layer()
|
|
.with_writer(writer)
|
|
.with_target(true)
|
|
.with_thread_ids(true)
|
|
.with_thread_names(true)
|
|
.with_span_events(FmtSpan::CLOSE)
|
|
.with_file(true)
|
|
.with_line_number(true);
|
|
|
|
let error_file_layer = fmt::layer()
|
|
.with_writer(error_writer)
|
|
.with_target(true)
|
|
.with_thread_ids(true)
|
|
.with_thread_names(true)
|
|
.with_span_events(FmtSpan::CLOSE)
|
|
.with_file(true)
|
|
.with_line_number(true)
|
|
.with_filter(LevelFilter::ERROR);
|
|
|
|
Registry::default()
|
|
.with(env_filter)
|
|
.with(base_fmt_layer)
|
|
.with(file_layer)
|
|
.with(error_file_layer)
|
|
.init();
|
|
},
|
|
(Some(writer), None) => {
|
|
// Seulement le fichier principal est configuré
|
|
let file_layer = fmt::layer()
|
|
.with_writer(writer)
|
|
.with_target(true)
|
|
.with_thread_ids(true)
|
|
.with_thread_names(true)
|
|
.with_span_events(FmtSpan::CLOSE)
|
|
.with_file(true)
|
|
.with_line_number(true);
|
|
|
|
Registry::default()
|
|
.with(env_filter)
|
|
.with(base_fmt_layer)
|
|
.with(file_layer)
|
|
.init();
|
|
},
|
|
(None, Some(error_writer)) => {
|
|
// Seulement le fichier d'erreur est configuré
|
|
let error_file_layer = fmt::layer()
|
|
.with_writer(error_writer)
|
|
.with_target(true)
|
|
.with_thread_ids(true)
|
|
.with_thread_names(true)
|
|
.with_span_events(FmtSpan::CLOSE)
|
|
.with_file(true)
|
|
.with_line_number(true)
|
|
.with_filter(LevelFilter::ERROR);
|
|
|
|
Registry::default()
|
|
.with(env_filter)
|
|
.with(base_fmt_layer)
|
|
.with(error_file_layer)
|
|
.init();
|
|
},
|
|
(None, None) => {
|
|
// Aucun fichier configuré
|
|
Registry::default()
|
|
.with(env_filter)
|
|
.with(base_fmt_layer)
|
|
.init();
|
|
},
|
|
}
|
|
|
|
if guard {
|
|
tracing::info!(
|
|
file = ?config.file,
|
|
max_size = config.max_size,
|
|
max_files = config.max_files,
|
|
"Logging initialized with file rotation"
|
|
);
|
|
} else {
|
|
tracing::info!("Logging initialized with custom config");
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Logging configuration
|
|
#[derive(Debug, Clone)]
|
|
pub struct LoggingConfig {
|
|
pub level: String,
|
|
pub format: String,
|
|
pub file: Option<String>,
|
|
pub max_size: u64,
|
|
pub max_files: u32,
|
|
pub compress: bool,
|
|
}
|
|
|
|
impl Default for LoggingConfig {
|
|
fn default() -> Self {
|
|
Self {
|
|
level: "info".to_string(),
|
|
format: "json".to_string(),
|
|
file: None,
|
|
max_size: 100 * 1024 * 1024, // 100MB
|
|
max_files: 5,
|
|
compress: true,
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Structured logging macros
|
|
#[macro_export]
|
|
macro_rules! log_info {
|
|
($($arg:tt)*) => {
|
|
tracing::info!($($arg)*)
|
|
};
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! log_warn {
|
|
($($arg:tt)*) => {
|
|
tracing::warn!($($arg)*)
|
|
};
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! log_error {
|
|
($($arg:tt)*) => {
|
|
tracing::error!($($arg)*)
|
|
};
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! log_debug {
|
|
($($arg:tt)*) => {
|
|
tracing::debug!($($arg)*)
|
|
};
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! log_trace {
|
|
($($arg:tt)*) => {
|
|
tracing::trace!($($arg)*)
|
|
};
|
|
}
|
|
|
|
/// Log performance metrics
|
|
pub fn log_performance(operation: &str, duration: std::time::Duration, metadata: &[(&str, String)]) {
|
|
let mut fields = std::collections::HashMap::new();
|
|
for (key, value) in metadata {
|
|
fields.insert(key.to_string(), value.clone());
|
|
}
|
|
tracing::info!(
|
|
operation = operation,
|
|
duration_ms = duration.as_millis(),
|
|
metadata = ?fields,
|
|
"Performance metric"
|
|
);
|
|
}
|
|
|
|
/// Log security events
|
|
pub fn log_security_event(event: &str, user_id: Option<uuid::Uuid>, ip_address: Option<&str>, metadata: &[(&str, String)]) {
|
|
let mut fields = std::collections::HashMap::new();
|
|
for (key, value) in metadata {
|
|
fields.insert(key.to_string(), value.clone());
|
|
}
|
|
tracing::warn!(
|
|
event = event,
|
|
user_id = ?user_id,
|
|
ip_address = ip_address,
|
|
metadata = ?fields,
|
|
"Security event"
|
|
);
|
|
}
|
|
|
|
/// Log business events
|
|
pub fn log_business_event(event: &str, user_id: Option<uuid::Uuid>, resource_id: Option<uuid::Uuid>, metadata: &[(&str, String)]) {
|
|
let mut fields = std::collections::HashMap::new();
|
|
for (key, value) in metadata {
|
|
fields.insert(key.to_string(), value.clone());
|
|
}
|
|
tracing::info!(
|
|
event = event,
|
|
user_id = ?user_id,
|
|
resource_id = ?resource_id,
|
|
metadata = ?fields,
|
|
"Business event"
|
|
);
|
|
}
|
|
|
|
/// Log system events
|
|
pub fn log_system_event(event: &str, component: &str, metadata: &[(&str, String)]) {
|
|
let mut fields = std::collections::HashMap::new();
|
|
for (key, value) in metadata {
|
|
fields.insert(key.to_string(), value.clone());
|
|
}
|
|
tracing::info!(
|
|
event = event,
|
|
component = component,
|
|
metadata = ?fields,
|
|
"System event"
|
|
);
|
|
}
|
|
|
|
/// Log error with context
|
|
pub fn log_error_with_context(error: &VezaError, context: &[(&str, String)]) {
|
|
let mut fields = std::collections::HashMap::new();
|
|
for (key, value) in context {
|
|
fields.insert(key.to_string(), value.clone());
|
|
}
|
|
if error.should_log() {
|
|
tracing::error!(
|
|
error = %error,
|
|
context = ?fields,
|
|
"Error occurred"
|
|
);
|
|
} else {
|
|
tracing::debug!(
|
|
error = %error,
|
|
context = ?fields,
|
|
"Error occurred"
|
|
);
|
|
}
|
|
}
|
|
|
|
/// Log request/response
|
|
pub fn log_request_response(
|
|
method: &str,
|
|
path: &str,
|
|
status_code: u16,
|
|
duration: std::time::Duration,
|
|
user_id: Option<uuid::Uuid>,
|
|
request_id: Option<&str>,
|
|
) {
|
|
let level = if status_code >= 500 {
|
|
Level::ERROR
|
|
} else if status_code >= 400 {
|
|
Level::WARN
|
|
} else {
|
|
Level::INFO
|
|
};
|
|
|
|
match level {
|
|
Level::ERROR => tracing::error!(
|
|
method = method,
|
|
path = path,
|
|
status_code = status_code,
|
|
duration_ms = duration.as_millis(),
|
|
user_id = ?user_id,
|
|
request_id = request_id,
|
|
"HTTP request"
|
|
),
|
|
Level::WARN => tracing::warn!(
|
|
method = method,
|
|
path = path,
|
|
status_code = status_code,
|
|
duration_ms = duration.as_millis(),
|
|
user_id = ?user_id,
|
|
request_id = request_id,
|
|
"HTTP request"
|
|
),
|
|
Level::INFO => tracing::info!(
|
|
method = method,
|
|
path = path,
|
|
status_code = status_code,
|
|
duration_ms = duration.as_millis(),
|
|
user_id = ?user_id,
|
|
request_id = request_id,
|
|
"HTTP request"
|
|
),
|
|
Level::DEBUG => tracing::debug!(
|
|
method = method,
|
|
path = path,
|
|
status_code = status_code,
|
|
duration_ms = duration.as_millis(),
|
|
user_id = ?user_id,
|
|
request_id = request_id,
|
|
"HTTP request"
|
|
),
|
|
Level::TRACE => tracing::trace!(
|
|
method = method,
|
|
path = path,
|
|
status_code = status_code,
|
|
duration_ms = duration.as_millis(),
|
|
user_id = ?user_id,
|
|
request_id = request_id,
|
|
"HTTP request"
|
|
),
|
|
}
|
|
}
|
|
|
|
/// Log database query
|
|
pub fn log_db_query(
|
|
query: &str,
|
|
duration: std::time::Duration,
|
|
rows_affected: Option<u64>,
|
|
error: Option<&str>,
|
|
) {
|
|
if let Some(error) = error {
|
|
tracing::error!(
|
|
query = query,
|
|
duration_ms = duration.as_millis(),
|
|
rows_affected = ?rows_affected,
|
|
error = error,
|
|
"Database query failed"
|
|
);
|
|
} else {
|
|
tracing::debug!(
|
|
query = query,
|
|
duration_ms = duration.as_millis(),
|
|
rows_affected = ?rows_affected,
|
|
"Database query"
|
|
);
|
|
}
|
|
}
|
|
|
|
/// Log cache operations
|
|
pub fn log_cache_operation(
|
|
operation: &str,
|
|
key: &str,
|
|
hit: bool,
|
|
duration: std::time::Duration,
|
|
error: Option<&str>,
|
|
) {
|
|
if let Some(error) = error {
|
|
tracing::warn!(
|
|
operation = operation,
|
|
key = key,
|
|
hit = hit,
|
|
duration_ms = duration.as_millis(),
|
|
error = error,
|
|
"Cache operation failed"
|
|
);
|
|
} else {
|
|
tracing::debug!(
|
|
operation = operation,
|
|
key = key,
|
|
hit = hit,
|
|
duration_ms = duration.as_millis(),
|
|
"Cache operation"
|
|
);
|
|
}
|
|
}
|
|
|
|
/// Log WebSocket events
|
|
pub fn log_websocket_event(
|
|
event: &str,
|
|
user_id: Option<uuid::Uuid>,
|
|
connection_id: Option<uuid::Uuid>,
|
|
metadata: &[(&str, String)],
|
|
) {
|
|
let mut fields = std::collections::HashMap::new();
|
|
for (key, value) in metadata {
|
|
fields.insert(key.to_string(), value.clone());
|
|
}
|
|
tracing::info!(
|
|
event = event,
|
|
user_id = ?user_id,
|
|
connection_id = ?connection_id,
|
|
metadata = ?fields,
|
|
"WebSocket event"
|
|
);
|
|
}
|
|
|
|
/// Log streaming events
|
|
pub fn log_streaming_event(
|
|
event: &str,
|
|
track_id: Option<uuid::Uuid>,
|
|
user_id: Option<uuid::Uuid>,
|
|
metadata: &[(&str, String)],
|
|
) {
|
|
let mut fields = std::collections::HashMap::new();
|
|
for (key, value) in metadata {
|
|
fields.insert(key.to_string(), value.clone());
|
|
}
|
|
tracing::info!(
|
|
event = event,
|
|
track_id = ?track_id,
|
|
user_id = ?user_id,
|
|
metadata = ?fields,
|
|
"Streaming event"
|
|
);
|
|
}
|
|
|
|
/// Create a span for tracing
|
|
pub fn create_span(_name: &str, metadata: &[(&str, String)]) -> tracing::Span {
|
|
let mut fields = std::collections::HashMap::new();
|
|
for (key, value) in metadata {
|
|
fields.insert(key.to_string(), value.clone());
|
|
}
|
|
tracing::info_span!("span", metadata = ?fields)
|
|
}
|
|
|
|
/// Log startup information
|
|
pub fn log_startup(service_name: &str, version: &str, config: &[(&str, String)]) {
|
|
let mut fields = std::collections::HashMap::new();
|
|
for (key, value) in config {
|
|
fields.insert(key.to_string(), value.clone());
|
|
}
|
|
tracing::info!(
|
|
service = service_name,
|
|
version = version,
|
|
config = ?fields,
|
|
"Service starting"
|
|
);
|
|
}
|
|
|
|
/// Log shutdown information
|
|
pub fn log_shutdown(service_name: &str, uptime: std::time::Duration) {
|
|
tracing::info!(
|
|
service = service_name,
|
|
uptime_seconds = uptime.as_secs(),
|
|
"Service shutting down"
|
|
);
|
|
}
|