# nginx_proxy_cache defaults — phase-1 edge cache (self-hosted) in # front of the distributed MinIO cluster. # # Why Nginx and not Varnish : VCL is overkill for HLS in front of S3. # Segments are content-addressed (immutable), playlists rotate every # 60s ; a plain HTTP cache with proper Cache-Control fences is # sufficient. Nginx integrates trivially with TLS, structured logs, # and the existing Prometheus stack via stub_status. # # Phase-1 scope : single cache node colocated on the R720 host # (Incus container `nginx-cache`). Phase-2 (W3+) adds a second # geographically-distinct cache node + GeoDNS ; phase-3 only if the # auto-hosted edges aren't enough. --- nginx_cache_root: /var/cache/nginx/veza nginx_cache_max_size: "20g" # disk cap. R720 has plenty of space. nginx_cache_inactive: "7d" # purge entries unused for > 7d nginx_cache_levels: "1:2" # 16 × 256 dir fan-out, plenty for 100k objects # Origin pool — points at the MinIO cluster. The role reads # groups['minio_nodes'] inventory to populate the upstream block # automatically ; override here if testing against an external bucket. nginx_cache_minio_port: 9000 # Cache TTLs by file extension. Segments are content-addressed # (immutable) so 7 days is safe + matches the backend's # Cache-Control: max-age=86400, immutable header (we add the upper # bound here on top, the backend can't reach above the origin's TTL). nginx_cache_ttl_segment: "7d" # .ts, .m4s, .mp4, .aac nginx_cache_ttl_playlist: "60s" # .m3u8 (live streams may regen) nginx_cache_ttl_other: "1h" # cover art, generic objects # Stale-on-error : if the origin times out / 5xx, serve the stale # cached version. Bounded so we don't lock viewers into a permanently # stale view if MinIO is genuinely gone. nginx_cache_stale_error_window: "1h" # Listener config. v1.0 = HTTP only on the Incus bridge ; TLS # termination lives at the public LB (HAProxy/Caddy in prod). When # we add direct internet exposure (phase-2), tls_cert_path / # tls_key_path go here. nginx_cache_listen_port: 80 nginx_cache_server_name: "cache.veza.lxd" # Worker tuning. nginx defaults are ~1 worker per core ; stub_status # exporter parses these so set them explicitly for graphability. nginx_cache_worker_processes: "auto" nginx_cache_worker_connections: 4096 # Stub-status endpoint for the prometheus nginx exporter. Bound to # loopback only — the exporter sidecar reads it via 127.0.0.1. nginx_cache_stub_status_path: "/__nginx_status"