veza/veza-backend-api/internal/logging/config.go
senke 73eca4f6ad feat: backend, stream server & infra improvements
Backend (Go):
- Config: CORS, RabbitMQ, rate limit, main config updates
- Routes: core, distribution, tracks routing changes
- Middleware: rate limiter, endpoint limiter, response cache hardening
- Handlers: distribution, search handler fixes
- Workers: job worker improvements
- Upload validator and logging config additions
- New migrations: products, orders, performance indexes
- Seed tooling and data

Stream Server (Rust):
- Audio processing, config, routes, simple stream server updates
- Dockerfile improvements

Infrastructure:
- docker-compose.yml updates
- nginx-rtmp config changes
- Makefile improvements (config, dev, high, infra)
- Root package.json and lock file updates
- .env.example updates

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 11:36:06 +01:00

227 lines
6 KiB
Go

package logging
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/BurntSushi/toml"
)
// LogConfig holds the centralized logging configuration loaded from config/logging.toml.
// Environment variables override file values (highest priority).
type LogConfig struct {
Global GlobalConfig `toml:"global"`
Rotation RotationConfig `toml:"rotation"`
Backend BackendConfig `toml:"backend"`
Stream StreamConfig `toml:"stream"`
Frontend FrontendConfig `toml:"frontend"`
Aggregation LogAggregationConfig `toml:"aggregation"`
Permissions PermissionsConfig `toml:"permissions"`
}
type GlobalConfig struct {
Level string `toml:"level"`
Dir string `toml:"dir"`
Format string `toml:"format"`
}
type RotationConfig struct {
MaxSizeMB int `toml:"max_size_mb"`
MaxBackups int `toml:"max_backups"`
MaxAgeDays int `toml:"max_age_days"`
Compress bool `toml:"compress"`
RustRotation string `toml:"rust_rotation"`
RustMaxFiles int `toml:"rust_max_files"`
}
type BackendConfig struct {
Module string `toml:"module"`
Modules []string `toml:"modules"`
SlowRequestThresholdMs int `toml:"slow_request_threshold_ms"`
SamplingInitial int `toml:"sampling_initial"`
SamplingThereafter int `toml:"sampling_thereafter"`
BufferSizeKB int `toml:"buffer_size_kb"`
FlushIntervalMs int `toml:"flush_interval_ms"`
}
type StreamConfig struct {
Module string `toml:"module"`
IncludeSource bool `toml:"include_source"`
IncludeThreadIDs bool `toml:"include_thread_ids"`
}
type FrontendConfig struct {
Level string `toml:"level"`
Endpoint string `toml:"endpoint"`
SentryEnabled bool `toml:"sentry_enabled"`
}
type LogAggregationConfig struct {
Enabled bool `toml:"enabled"`
Endpoint string `toml:"endpoint"`
BatchSize int `toml:"batch_size"`
FlushInterval int `toml:"flush_interval_s"`
Timeout int `toml:"timeout_s"`
Labels string `toml:"labels"`
}
type PermissionsConfig struct {
DirMode string `toml:"dir_mode"`
FileMode string `toml:"file_mode"`
}
// LoadConfig loads logging configuration from config/logging.toml.
// Environment variables override file values.
// Returns sensible defaults if the file is missing.
func LoadConfig() *LogConfig {
cfg := defaultConfig()
// Try to find and load the TOML file
paths := []string{
"config/logging.toml",
"../config/logging.toml",
filepath.Join(os.Getenv("VEZA_ROOT"), "config/logging.toml"),
}
for _, p := range paths {
if _, err := os.Stat(p); err == nil {
if _, err := toml.DecodeFile(p, cfg); err != nil {
fmt.Fprintf(os.Stderr, "warning: failed to parse %s: %v (using defaults)\n", p, err)
}
break
}
}
// Environment variable overrides (highest priority)
applyEnvOverrides(cfg)
return cfg
}
func defaultConfig() *LogConfig {
return &LogConfig{
Global: GlobalConfig{
Level: "INFO",
Dir: "/var/log/veza",
Format: "auto",
},
Rotation: RotationConfig{
MaxSizeMB: 100,
MaxBackups: 10,
MaxAgeDays: 30,
Compress: true,
RustRotation: "hourly",
RustMaxFiles: 5,
},
Backend: BackendConfig{
Module: "backend-api",
Modules: []string{"db", "rabbitmq"},
SlowRequestThresholdMs: 1000,
SamplingInitial: 100,
SamplingThereafter: 100,
BufferSizeKB: 256,
FlushIntervalMs: 100,
},
Stream: StreamConfig{
Module: "stream",
IncludeSource: true,
IncludeThreadIDs: true,
},
Frontend: FrontendConfig{
Level: "auto",
Endpoint: "/api/v1/logs/frontend",
},
Aggregation: LogAggregationConfig{
BatchSize: 100,
FlushInterval: 5,
Timeout: 10,
},
Permissions: PermissionsConfig{
DirMode: "0755",
FileMode: "0640",
},
}
}
func applyEnvOverrides(cfg *LogConfig) {
if v := os.Getenv("LOG_LEVEL"); v != "" {
cfg.Global.Level = v
}
if v := os.Getenv("LOG_DIR"); v != "" {
cfg.Global.Dir = v
}
if v := os.Getenv("LOG_FORMAT"); v != "" {
cfg.Global.Format = v
}
if v := os.Getenv("SLOW_REQUEST_THRESHOLD_MS"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
cfg.Backend.SlowRequestThresholdMs = n
}
}
if v := os.Getenv("LOG_AGGREGATION_ENABLED"); v != "" {
cfg.Aggregation.Enabled = strings.EqualFold(v, "true") || v == "1"
}
if v := os.Getenv("LOG_AGGREGATION_ENDPOINT"); v != "" {
cfg.Aggregation.Endpoint = v
}
if v := os.Getenv("LOG_AGGREGATION_BATCH_SIZE"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
cfg.Aggregation.BatchSize = n
}
}
if v := os.Getenv("LOG_AGGREGATION_FLUSH_INTERVAL"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
cfg.Aggregation.FlushInterval = n
}
}
if v := os.Getenv("LOG_AGGREGATION_TIMEOUT"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
cfg.Aggregation.Timeout = n
}
}
if v := os.Getenv("LOG_AGGREGATION_LABELS"); v != "" {
cfg.Aggregation.Labels = v
}
// Resolve "auto" format based on APP_ENV
if cfg.Global.Format == "auto" {
env := strings.ToLower(os.Getenv("APP_ENV"))
if env == "production" || env == "staging" {
cfg.Global.Format = "json"
} else {
cfg.Global.Format = "text"
}
}
}
// ResolveLogDir ensures the log directory exists and is writable.
// Falls back to ./logs if the configured directory is not accessible.
func (c *LogConfig) ResolveLogDir(env string) string {
dir := c.Global.Dir
// Try to create the directory
if err := os.MkdirAll(dir, 0755); err != nil {
// Fallback to local ./logs in development
if env == "development" || env == "dev" || env == "test" {
fallback := "./logs"
_ = os.MkdirAll(fallback, 0755)
return fallback
}
}
// Verify writable
testFile := filepath.Join(dir, ".write_test")
if f, err := os.Create(testFile); err != nil {
fallback := "./logs"
_ = os.MkdirAll(fallback, 0755)
return fallback
} else {
f.Close()
os.Remove(testFile)
}
return dir
}