chore(cleanup): remove veza-chat-server directory and all operational references
Chat functionality is now fully handled by the Go backend (since v0.502). Remove the deprecated Rust chat server and all its references from: - CI/CD workflows (ci.yml, cd.yml, rust-ci.yml, chat-ci.yml) - Monitoring & proxy config (prometheus, caddy, haproxy) - Incus deployment scripts and documentation - Monorepo config (package.json, dependabot, GH templates)
This commit is contained in:
parent
0376bdcd16
commit
279a10d317
155 changed files with 37 additions and 37524 deletions
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
|
@ -27,7 +27,7 @@ Ce qui aurait dû se passer.
|
|||
|
||||
## 💻 Contexte
|
||||
|
||||
- Service impacté : (backend-api / chat-server / stream-server / web-frontend / infra)
|
||||
- Service impacté : (backend-api / stream-server / web-frontend / infra)
|
||||
- Branch : (main / develop / autre)
|
||||
- Environnement : (local / dev / staging / prod)
|
||||
|
||||
|
|
|
|||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
|
|
@ -6,12 +6,6 @@ updates:
|
|||
interval: "weekly"
|
||||
labels: ["dependencies", "go"]
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/veza-chat-server"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels: ["dependencies", "rust"]
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/veza-stream-server"
|
||||
schedule:
|
||||
|
|
|
|||
3
.github/pull_request_template.md
vendored
3
.github/pull_request_template.md
vendored
|
|
@ -1,7 +1,7 @@
|
|||
# 🧩 Résumé
|
||||
|
||||
- **Type de changement** : (feat / fix / refactor / chore / docs)
|
||||
- **Scope** : (backend-api / chat-server / stream-server / web-frontend / infra / docs)
|
||||
- **Scope** : (backend-api / stream-server / web-frontend / infra / docs)
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -48,7 +48,6 @@ Si oui, préciser :
|
|||
Coche ce qui a été lancé :
|
||||
|
||||
- [ ] `go test ./...` (backend-api)
|
||||
- [ ] `cargo test` (chat-server)
|
||||
- [ ] `cargo test` (stream-server)
|
||||
- [ ] `pnpm test` (web-frontend)
|
||||
- [ ] Tests manuels locaux (décrire rapidement)
|
||||
|
|
|
|||
20
.github/workflows/cd.yml
vendored
20
.github/workflows/cd.yml
vendored
|
|
@ -36,9 +36,8 @@ jobs:
|
|||
run: |
|
||||
docker build -t veza-frontend:${{ github.sha }} -f apps/web/Dockerfile.production apps/web/
|
||||
|
||||
- name: Build Rust Services Docker Images
|
||||
- name: Build Stream Server Docker Image
|
||||
run: |
|
||||
docker build -t veza-chat-server:${{ github.sha }} -f veza-chat-server/Dockerfile.production veza-chat-server/
|
||||
docker build -t veza-stream-server:${{ github.sha }} -f veza-stream-server/Dockerfile.production veza-stream-server/
|
||||
|
||||
- name: Trivy vulnerability scan
|
||||
|
|
@ -57,14 +56,6 @@ jobs:
|
|||
exit-code: '1'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
|
||||
- name: Trivy scan chat server
|
||||
uses: aquasecurity/trivy-action@0.28.0
|
||||
with:
|
||||
image-ref: 'veza-chat-server:${{ github.sha }}'
|
||||
format: 'table'
|
||||
exit-code: '1'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
|
||||
- name: Trivy scan stream server
|
||||
uses: aquasecurity/trivy-action@0.28.0
|
||||
with:
|
||||
|
|
@ -76,7 +67,7 @@ jobs:
|
|||
- name: Generate SBOM
|
||||
run: |
|
||||
mkdir -p sbom
|
||||
for svc in veza-backend-api veza-frontend veza-chat-server veza-stream-server; do
|
||||
for svc in veza-backend-api veza-frontend veza-stream-server; do
|
||||
trivy image --format cyclonedx --output "sbom/${svc}-${{ github.sha }}.json" "${svc}:${{ github.sha }}"
|
||||
done
|
||||
- name: Upload SBOM artifacts
|
||||
|
|
@ -89,7 +80,7 @@ jobs:
|
|||
if: vars.DOCKER_REGISTRY != ''
|
||||
run: |
|
||||
echo "${{ secrets.DOCKER_REGISTRY_PASSWORD }}" | docker login "${{ vars.DOCKER_REGISTRY }}" -u "${{ secrets.DOCKER_REGISTRY_USERNAME }}" --password-stdin
|
||||
for svc in veza-backend-api veza-frontend veza-chat-server veza-stream-server; do
|
||||
for svc in veza-backend-api veza-frontend veza-stream-server; do
|
||||
docker tag "${svc}:${{ github.sha }}" "${{ vars.DOCKER_REGISTRY }}/${svc}:${{ github.sha }}"
|
||||
docker tag "${svc}:${{ github.sha }}" "${{ vars.DOCKER_REGISTRY }}/${svc}:latest"
|
||||
docker push "${{ vars.DOCKER_REGISTRY }}/${svc}:${{ github.sha }}"
|
||||
|
|
@ -107,7 +98,7 @@ jobs:
|
|||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
run: |
|
||||
for svc in veza-backend-api veza-frontend veza-chat-server veza-stream-server; do
|
||||
for svc in veza-backend-api veza-frontend veza-stream-server; do
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --yes "${{ vars.DOCKER_REGISTRY }}/${svc}:${{ github.sha }}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --yes "${{ vars.DOCKER_REGISTRY }}/${svc}:latest"
|
||||
done
|
||||
|
|
@ -117,7 +108,6 @@ jobs:
|
|||
echo "## Build Summary" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Backend: veza-backend-api:${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Frontend: veza-frontend:${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Chat Server: veza-chat-server:${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Stream Server: veza-stream-server:${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
deploy:
|
||||
|
|
@ -134,7 +124,7 @@ jobs:
|
|||
echo "${{ secrets.KUBE_CONFIG }}" | base64 -d > "$KUBECONFIG"
|
||||
chmod 600 "$KUBECONFIG"
|
||||
export KUBECONFIG
|
||||
for svc in veza-backend-api veza-chat-server veza-stream-server; do
|
||||
for svc in veza-backend-api veza-stream-server; do
|
||||
kubectl set image "deployment/${svc}" "${svc}=${{ vars.DOCKER_REGISTRY }}/${svc}:${{ github.sha }}" \
|
||||
-n veza --record || echo "Skipping ${svc} (deployment not found)"
|
||||
done
|
||||
|
|
|
|||
41
.github/workflows/chat-ci.yml
vendored
41
.github/workflows/chat-ci.yml
vendored
|
|
@ -1,41 +0,0 @@
|
|||
name: Chat Server CI
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "veza-chat-server/**"
|
||||
- "veza-common/**"
|
||||
- ".github/workflows/chat-ci.yml"
|
||||
pull_request:
|
||||
paths:
|
||||
- "veza-chat-server/**"
|
||||
- "veza-common/**"
|
||||
- ".github/workflows/chat-ci.yml"
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: veza-chat-server
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: clippy
|
||||
|
||||
- name: Lint with clippy
|
||||
run: cargo clippy --all-targets -- -D warnings
|
||||
|
||||
- name: Audit dependencies
|
||||
uses: actions-rust-lang/audit@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --all
|
||||
|
||||
13
.github/workflows/ci.yml
vendored
13
.github/workflows/ci.yml
vendored
|
|
@ -50,7 +50,7 @@ jobs:
|
|||
run: npx turbo run build --filter=veza-backend-api
|
||||
|
||||
rust-services:
|
||||
name: Rust Services (Chat & Stream)
|
||||
name: Rust Services (Stream)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
|
@ -81,24 +81,19 @@ jobs:
|
|||
- name: Install cargo-audit
|
||||
run: cargo install cargo-audit
|
||||
|
||||
- name: Auditing Chat Server
|
||||
run: |
|
||||
cd veza-chat-server
|
||||
cargo audit
|
||||
|
||||
- name: Auditing Stream Server
|
||||
run: |
|
||||
cd veza-stream-server
|
||||
cargo audit
|
||||
|
||||
- name: Lint
|
||||
run: npx turbo run lint --filter=veza-chat-server --filter=veza-stream-server
|
||||
run: npx turbo run lint --filter=veza-stream-server
|
||||
|
||||
- name: Build
|
||||
run: npx turbo run build --filter=veza-chat-server --filter=veza-stream-server
|
||||
run: npx turbo run build --filter=veza-stream-server
|
||||
|
||||
- name: Test
|
||||
run: npx turbo run test --filter=veza-chat-server --filter=veza-stream-server
|
||||
run: npx turbo run test --filter=veza-stream-server
|
||||
|
||||
frontend:
|
||||
name: Frontend (Web)
|
||||
|
|
|
|||
13
.github/workflows/rust-ci.yml
vendored
13
.github/workflows/rust-ci.yml
vendored
|
|
@ -3,26 +3,13 @@ on:
|
|||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'veza-chat-server/**'
|
||||
- 'veza-stream-server/**'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'veza-chat-server/**'
|
||||
- 'veza-stream-server/**'
|
||||
|
||||
jobs:
|
||||
clippy-chat:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: clippy
|
||||
- name: Clippy lint
|
||||
run: cargo clippy -- -D warnings
|
||||
working-directory: veza-chat-server
|
||||
|
||||
clippy-stream:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
{$STAGING_DOMAIN:staging.veza.app} {
|
||||
reverse_proxy /api/* backend:8080
|
||||
reverse_proxy /ws chat-server:8081
|
||||
reverse_proxy /stream stream-server:3001
|
||||
reverse_proxy /hls/* stream-server:3001
|
||||
reverse_proxy /* frontend:5173
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@ This project uses multiple `docker-compose` files for different environments. Af
|
|||
|
||||
| File | Purpose | Usage |
|
||||
|------|---------|-------|
|
||||
| `veza-chat-server/docker-compose.yml` | Standalone chat server dev | `cd veza-chat-server && docker compose up` |
|
||||
| `veza-stream-server/docker-compose.yml` | Standalone stream server dev | `cd veza-stream-server && docker compose up` |
|
||||
|
||||
## Infrastructure monitoring
|
||||
|
|
|
|||
|
|
@ -38,14 +38,12 @@ frontend http_frontend
|
|||
|
||||
# ACLs for routing
|
||||
acl is_api path_beg /api/v1
|
||||
acl is_ws path_beg /ws
|
||||
acl is_stream path_beg /stream
|
||||
acl is_hls path_beg /hls
|
||||
acl is_web path_beg /
|
||||
|
||||
# Route to appropriate backend
|
||||
use_backend backend_api if is_api
|
||||
use_backend chat_ws if is_ws
|
||||
use_backend stream_ws if is_stream
|
||||
use_backend stream_ws if is_hls
|
||||
use_backend web_frontend if is_web
|
||||
|
|
@ -59,13 +57,11 @@ frontend https_frontend
|
|||
mode http
|
||||
# ACLs for routing
|
||||
acl is_api path_beg /api/v1
|
||||
acl is_ws path_beg /ws
|
||||
acl is_stream path_beg /stream
|
||||
acl is_hls path_beg /hls
|
||||
acl is_web path_beg /
|
||||
# Route to appropriate backend
|
||||
use_backend backend_api if is_api
|
||||
use_backend chat_ws if is_ws
|
||||
use_backend stream_ws if is_stream
|
||||
use_backend stream_ws if is_hls
|
||||
use_backend web_frontend if is_web
|
||||
|
|
@ -84,16 +80,6 @@ backend backend_api
|
|||
# Add more servers for load balancing:
|
||||
# server backend2 backend-api-2:8080 check inter 5s fall 3 rise 2
|
||||
|
||||
# Chat WebSocket (Rust)
|
||||
backend chat_ws
|
||||
mode http
|
||||
balance roundrobin
|
||||
option httpchk GET /health
|
||||
http-check expect status 200
|
||||
server chat1 chat-server:3000 check inter 5s fall 3 rise 2
|
||||
# WebSocket specific options
|
||||
timeout tunnel 3600s
|
||||
|
||||
# Stream WebSocket (Rust)
|
||||
backend stream_ws
|
||||
mode http
|
||||
|
|
|
|||
|
|
@ -28,8 +28,7 @@ Tous les scripts de déploiement ont été mis à jour pour utiliser **Debian 13
|
|||
- `veza-web` (10.10.10.5) - Debian 13 ✅
|
||||
- `veza-haproxy` (10.10.10.6) - Debian 13 ✅
|
||||
|
||||
❌ **2 conteneurs manquants** :
|
||||
- `veza-chat-server` (10.10.10.3) - Binaire non compilé (erreurs Rust)
|
||||
❌ **1 conteneur manquant** :
|
||||
- `veza-stream-server` (10.10.10.4) - Binaire non compilé (erreurs Rust)
|
||||
|
||||
### Vérification OS
|
||||
|
|
@ -52,7 +51,7 @@ VERSION_CODENAME=trixie
|
|||
- Apache (Web)
|
||||
- HAProxy
|
||||
|
||||
3. **Corriger les erreurs de compilation Rust** pour déployer chat-server et stream-server
|
||||
3. **Corriger les erreurs de compilation Rust** pour déployer stream-server
|
||||
|
||||
## Commandes Utiles
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ Le déploiement Incus sans Docker est maintenant **complètement implémenté et
|
|||
**Fichier**: `config/incus/build-native.sh`
|
||||
|
||||
- ✅ Build Go backend (binaire statique Linux)
|
||||
- ✅ Build Rust chat-server (release mode)
|
||||
- ✅ Build Rust stream-server (release mode)
|
||||
- ✅ Build frontend React (production build)
|
||||
- ✅ Gestion des erreurs et vérifications
|
||||
|
|
@ -35,12 +34,11 @@ Le déploiement Incus sans Docker est maintenant **complètement implémenté et
|
|||
- ✅ Copie des binaires compilés
|
||||
- ✅ Configuration systemd
|
||||
- ✅ Configuration des variables d'environnement
|
||||
- ✅ Support pour tous les services (backend, chat, stream, web, haproxy, infra)
|
||||
- ✅ Support pour tous les services (backend, stream, web, haproxy, infra)
|
||||
|
||||
**Services déployés**:
|
||||
- `veza-infra` (10.10.10.10) - PostgreSQL + Redis
|
||||
- `veza-backend-api` (10.10.10.2) - Backend Go
|
||||
- `veza-chat-server` (10.10.10.3) - Chat Server Rust
|
||||
- `veza-stream-server` (10.10.10.4) - Stream Server Rust
|
||||
- `veza-web` (10.10.10.5) - Frontend React (Apache)
|
||||
- `veza-haproxy` (10.10.10.6) - Reverse Proxy
|
||||
|
|
@ -50,7 +48,6 @@ Le déploiement Incus sans Docker est maintenant **complètement implémenté et
|
|||
**Dossier**: `config/incus/systemd/`
|
||||
|
||||
- ✅ `veza-backend-api.service` - Service Backend API
|
||||
- ✅ `veza-chat-server.service` - Service Chat Server
|
||||
- ✅ `veza-stream-server.service` - Service Stream Server
|
||||
|
||||
**Caractéristiques**:
|
||||
|
|
@ -64,7 +61,6 @@ Le déploiement Incus sans Docker est maintenant **complètement implémenté et
|
|||
**Dossier**: `config/incus/env/`
|
||||
|
||||
- ✅ `backend-api.env` - Variables Backend API
|
||||
- ✅ `chat-server.env` - Variables Chat Server
|
||||
- ✅ `stream-server.env` - Variables Stream Server
|
||||
|
||||
**Configuration réseau**:
|
||||
|
|
@ -198,7 +194,6 @@ Le container `veza-infra` doit être démarré avant les services applicatifs po
|
|||
|
||||
Les ports suivants sont utilisés :
|
||||
- Backend API: 8080
|
||||
- Chat Server: 8081
|
||||
- Stream Server: 3002
|
||||
- Web (Apache): 80
|
||||
- HAProxy: 80
|
||||
|
|
@ -239,7 +234,6 @@ Internet
|
|||
HAProxy (10.10.10.6:80)
|
||||
|
|
||||
+---> Backend API (10.10.10.2:8080)
|
||||
+---> Chat Server (10.10.10.3:8081)
|
||||
+---> Stream Server (10.10.10.4:3002)
|
||||
+---> Web Frontend (10.10.10.5:80)
|
||||
|
|
||||
|
|
|
|||
|
|
@ -17,8 +17,7 @@ Le déploiement Incus de Veza est **partiellement fonctionnel** mais nécessite
|
|||
- Binaires compilés pour backend-api et web
|
||||
|
||||
### ❌ Ce qui ne fonctionne pas
|
||||
- **4 conteneurs manquants** :
|
||||
- `veza-chat-server` (10.10.10.3)
|
||||
- **3 conteneurs manquants** :
|
||||
- `veza-stream-server` (10.10.10.4)
|
||||
- `veza-web` (10.10.10.5)
|
||||
- `veza-haproxy` (10.10.10.6)
|
||||
|
|
@ -65,18 +64,7 @@ dial tcp 10.10.10.10:6379: connect: connection refused
|
|||
1. Installer et démarrer Redis dans veza-infra
|
||||
2. Redémarrer veza-backend-api
|
||||
|
||||
### 3. Chat Server (veza-chat-server)
|
||||
|
||||
**État**: ❌ Conteneur non déployé
|
||||
|
||||
**Raison**: Binaire non compilé (erreurs de compilation Rust)
|
||||
|
||||
**Action requise**:
|
||||
1. Corriger les erreurs de compilation Rust
|
||||
2. Compiler le binaire: `./config/incus/build-native.sh chat-server`
|
||||
3. Déployer: `./config/incus/deploy-service-native.sh chat-server`
|
||||
|
||||
### 4. Stream Server (veza-stream-server)
|
||||
### 3. Stream Server (veza-stream-server)
|
||||
|
||||
**État**: ❌ Conteneur non déployé
|
||||
|
||||
|
|
@ -87,7 +75,7 @@ dial tcp 10.10.10.10:6379: connect: connection refused
|
|||
2. Compiler le binaire: `./config/incus/build-native.sh stream-server`
|
||||
3. Déployer: `./config/incus/deploy-service-native.sh stream-server`
|
||||
|
||||
### 5. Web Frontend (veza-web)
|
||||
### 4. Web Frontend (veza-web)
|
||||
|
||||
**État**: ❌ Conteneur non déployé
|
||||
|
||||
|
|
@ -98,7 +86,7 @@ dial tcp 10.10.10.10:6379: connect: connection refused
|
|||
./config/incus/deploy-service-native.sh web
|
||||
```
|
||||
|
||||
### 6. HAProxy (veza-haproxy)
|
||||
### 5. HAProxy (veza-haproxy)
|
||||
|
||||
**État**: ❌ Conteneur non déployé
|
||||
|
||||
|
|
@ -134,7 +122,7 @@ incus exec veza-backend-api -- systemctl status veza-backend-api
|
|||
# Déployer haproxy
|
||||
./config/incus/deploy-service-native.sh haproxy
|
||||
|
||||
# Pour chat-server et stream-server, corriger d'abord les erreurs de compilation
|
||||
# Pour stream-server, corriger d'abord les erreurs de compilation
|
||||
```
|
||||
|
||||
### Étape 4: Vérification Complète
|
||||
|
|
@ -179,9 +167,9 @@ make incus-start-all
|
|||
|
||||
## Problèmes Connus
|
||||
|
||||
1. **Compilation Rust échoue** pour chat-server et stream-server
|
||||
1. **Compilation Rust échoue** pour stream-server
|
||||
- Nécessite correction des erreurs de compilation
|
||||
- Voir les logs dans `/tmp/chat-build.log` et `/tmp/stream-build.log`
|
||||
- Voir les logs dans `/tmp/stream-build.log`
|
||||
|
||||
2. **Déploiement prend beaucoup de temps**
|
||||
- L'installation des packages peut prendre 5-10 minutes par conteneur
|
||||
|
|
@ -196,7 +184,7 @@ make incus-start-all
|
|||
1. ✅ **Priorité 1**: Corriger l'infrastructure (PostgreSQL + Redis)
|
||||
2. ✅ **Priorité 2**: Faire démarrer le Backend API
|
||||
3. ✅ **Priorité 3**: Déployer Web et HAProxy
|
||||
4. ⚠️ **Priorité 4**: Corriger les erreurs de compilation Rust pour chat/stream
|
||||
4. ⚠️ **Priorité 4**: Corriger les erreurs de compilation Rust pour stream-server
|
||||
|
||||
## Support
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ Chaque service est déployé dans un container Incus séparé avec des binaires
|
|||
|---------|-----------|-----|------|-------------|
|
||||
| Infrastructure | `veza-infra` | 10.10.10.10 | 5432, 6379 | PostgreSQL + Redis |
|
||||
| Backend API | `veza-backend-api` | 10.10.10.2 | 8080 | Backend Go (binaire natif) |
|
||||
| Chat Server | `veza-chat-server` | 10.10.10.3 | 8081 | Serveur Chat Rust (binaire natif) |
|
||||
| Stream Server | `veza-stream-server` | 10.10.10.4 | 3002 | Serveur Stream Rust (binaire natif) |
|
||||
| Web Frontend | `veza-web` | 10.10.10.5 | 80 | Frontend React (Apache - fichiers statiques uniquement) |
|
||||
| HAProxy | `veza-haproxy` | 10.10.10.6 | 80 | Reverse Proxy |
|
||||
|
|
@ -127,7 +126,6 @@ incus exec veza-backend-api -- systemctl status veza-backend-api
|
|||
|
||||
Les fichiers de configuration sont dans `config/incus/env/` :
|
||||
- `backend-api.env` - Configuration Backend API
|
||||
- `chat-server.env` - Configuration Chat Server
|
||||
- `stream-server.env` - Configuration Stream Server
|
||||
|
||||
**Important** : Ces fichiers `.env` ne sont pas versionnés (ils contiennent des secrets).
|
||||
|
|
@ -137,7 +135,7 @@ Créez-les localement à partir de `env/env.example` :
|
|||
cd config/incus/env
|
||||
# Créer les fichiers à partir du template et remplir les valeurs
|
||||
cp env.example backend-api.env # puis éditer
|
||||
# Idem pour chat-server.env et stream-server.env
|
||||
# Idem pour stream-server.env
|
||||
```
|
||||
|
||||
Modifiez ces fichiers avant le déploiement ou éditez-les dans les containers :
|
||||
|
|
@ -151,7 +149,6 @@ incus exec veza-backend-api -- systemctl restart veza-backend-api
|
|||
|
||||
Les fichiers systemd sont dans `config/incus/systemd/` :
|
||||
- `veza-backend-api.service`
|
||||
- `veza-chat-server.service`
|
||||
- `veza-stream-server.service`
|
||||
|
||||
## Accès aux services
|
||||
|
|
@ -160,7 +157,6 @@ Une fois déployé, les services sont accessibles via :
|
|||
|
||||
- **HAProxy (point d'entrée)** : http://10.10.10.6:80
|
||||
- **Backend API** : http://10.10.10.2:8080
|
||||
- **Chat Server** : ws://10.10.10.3:8081/ws
|
||||
- **Stream Server** : ws://10.10.10.4:3002/stream
|
||||
- **Web Frontend** : http://10.10.10.5:80
|
||||
|
||||
|
|
@ -213,7 +209,6 @@ make deploy-incus
|
|||
|
||||
**HAProxy** est le point d'entrée principal et gère tout le routing :
|
||||
- `/api/v1/*` → Backend API (10.10.10.2:8080)
|
||||
- `/ws/*` → Chat Server (10.10.10.3:8081)
|
||||
- `/stream/*` → Stream Server (10.10.10.4:3002)
|
||||
- `/*` → Web Frontend (10.10.10.5:80)
|
||||
|
||||
|
|
|
|||
|
|
@ -45,49 +45,6 @@ build_backend_api() {
|
|||
echo -e "${GREEN}✅ backend-api built${NC}"
|
||||
}
|
||||
|
||||
build_chat_server() {
|
||||
echo -e "${BLUE}Building chat-server (Rust)...${NC}"
|
||||
cd "${PROJECT_ROOT}/veza-chat-server"
|
||||
|
||||
# Try cross-compilation first, fallback to native
|
||||
BINARY_PATH=""
|
||||
if command -v rustup >/dev/null 2>&1 && rustup target list --installed | grep -q "x86_64-unknown-linux-gnu"; then
|
||||
echo -e "${YELLOW}Attempting cross-compilation...${NC}"
|
||||
if cargo build --release --target x86_64-unknown-linux-gnu 2>&1 | tee /tmp/chat-build.log; then
|
||||
BINARY_PATH="target/x86_64-unknown-linux-gnu/release/chat-server"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Fallback to native build
|
||||
if [ -z "${BINARY_PATH}" ] || [ ! -f "${BINARY_PATH}" ]; then
|
||||
echo -e "${YELLOW}Using native build...${NC}"
|
||||
if cargo build --release 2>&1 | tee /tmp/chat-build.log; then
|
||||
BINARY_PATH="target/release/chat-server"
|
||||
else
|
||||
echo -e "${RED}❌ Failed to build chat-server${NC}"
|
||||
echo -e "${YELLOW}Build log saved to /tmp/chat-build.log${NC}"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy binary
|
||||
if [ -f "${BINARY_PATH}" ]; then
|
||||
cp "${BINARY_PATH}" "${BUILD_DIR}/veza-chat-server"
|
||||
chmod +x "${BUILD_DIR}/veza-chat-server"
|
||||
else
|
||||
echo -e "${RED}❌ Failed to build chat-server: binary not found${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ ! -f "${BUILD_DIR}/veza-chat-server" ]; then
|
||||
echo -e "${RED}❌ Failed to copy chat-server binary${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✅ chat-server built${NC}"
|
||||
return 0
|
||||
}
|
||||
|
||||
build_stream_server() {
|
||||
echo -e "${BLUE}Building stream-server (Rust)...${NC}"
|
||||
cd "${PROJECT_ROOT}/veza-stream-server"
|
||||
|
|
@ -178,9 +135,6 @@ case "${SERVICE}" in
|
|||
backend-api)
|
||||
build_backend_api
|
||||
;;
|
||||
chat-server)
|
||||
build_chat_server
|
||||
;;
|
||||
stream-server)
|
||||
build_stream_server
|
||||
;;
|
||||
|
|
@ -190,7 +144,6 @@ case "${SERVICE}" in
|
|||
all)
|
||||
FAILED=0
|
||||
build_backend_api || FAILED=$((FAILED + 1))
|
||||
build_chat_server || FAILED=$((FAILED + 1))
|
||||
build_stream_server || FAILED=$((FAILED + 1))
|
||||
build_web || FAILED=$((FAILED + 1))
|
||||
|
||||
|
|
@ -206,7 +159,7 @@ case "${SERVICE}" in
|
|||
;;
|
||||
*)
|
||||
echo -e "${YELLOW}Unknown service: ${SERVICE}${NC}"
|
||||
echo "Available services: backend-api, chat-server, stream-server, web, all"
|
||||
echo "Available services: backend-api, stream-server, web, all"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ echo ""
|
|||
|
||||
# 1. Vérifier les conteneurs
|
||||
echo -e "${BLUE}1. Conteneurs Incus:${NC}"
|
||||
EXPECTED_CONTAINERS=("veza-infra" "veza-backend-api" "veza-chat-server" "veza-stream-server" "veza-web" "veza-haproxy")
|
||||
EXPECTED_CONTAINERS=("veza-infra" "veza-backend-api" "veza-stream-server" "veza-web" "veza-haproxy")
|
||||
ALL_CONTAINERS_OK=true
|
||||
|
||||
for container in "${EXPECTED_CONTAINERS[@]}"; do
|
||||
|
|
@ -49,16 +49,6 @@ if incus list -c n --format csv 2>/dev/null | grep -q "^veza-backend-api$"; then
|
|||
fi
|
||||
fi
|
||||
|
||||
# Chat Server
|
||||
if incus list -c n --format csv 2>/dev/null | grep -q "^veza-chat-server$"; then
|
||||
if incus exec veza-chat-server -- systemctl is-active --quiet veza-chat-server 2>/dev/null; then
|
||||
echo -e " ${GREEN}✅ veza-chat-server - ACTIVE${NC}"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️ veza-chat-server - INACTIVE${NC}"
|
||||
ALL_SERVICES_OK=false
|
||||
fi
|
||||
fi
|
||||
|
||||
# Stream Server
|
||||
if incus list -c n --format csv 2>/dev/null | grep -q "^veza-stream-server$"; then
|
||||
if incus exec veza-stream-server -- systemctl is-active --quiet veza-stream-server 2>/dev/null; then
|
||||
|
|
@ -146,19 +136,6 @@ if incus list -c n --format csv 2>/dev/null | grep -q "^veza-backend-api$"; then
|
|||
fi
|
||||
fi
|
||||
|
||||
# Chat Server
|
||||
if incus list -c n --format csv 2>/dev/null | grep -q "^veza-chat-server$"; then
|
||||
if incus exec veza-chat-server -- systemctl is-active --quiet veza-chat-server 2>/dev/null; then
|
||||
if incus exec veza-chat-server -- timeout 3 curl -s -f http://localhost:8081/health >/dev/null 2>&1; then
|
||||
echo -e " ${GREEN}✅ Chat Server (http://10.10.10.3:8081) - OK${NC}"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️ Chat Server - Service running but endpoint not responding${NC}"
|
||||
fi
|
||||
else
|
||||
echo -e " ${RED}❌ Chat Server - Service not running${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Stream Server
|
||||
if incus list -c n --format csv 2>/dev/null | grep -q "^veza-stream-server$"; then
|
||||
if incus exec veza-stream-server -- systemctl is-active --quiet veza-stream-server 2>/dev/null; then
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ BUILD_DIR="${PROJECT_ROOT}/.build/incus"
|
|||
|
||||
if [ -z "$SERVICE" ]; then
|
||||
echo "Usage: $0 <service-name>"
|
||||
echo "Services: backend-api, chat-server, stream-server, web, haproxy, infra"
|
||||
echo "Services: backend-api, stream-server, web, haproxy, infra"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
@ -109,9 +109,6 @@ case ${SERVICE} in
|
|||
backend-api)
|
||||
STATIC_IP="10.10.10.2"
|
||||
;;
|
||||
chat-server)
|
||||
STATIC_IP="10.10.10.3"
|
||||
;;
|
||||
stream-server)
|
||||
STATIC_IP="10.10.10.4"
|
||||
;;
|
||||
|
|
@ -419,44 +416,6 @@ case ${SERVICE} in
|
|||
incus exec ${CONTAINER_NAME} -- systemctl start veza-backend-api || echo "Warning: Service start failed, check logs"
|
||||
;;
|
||||
|
||||
chat-server)
|
||||
echo "Installing Rust runtime dependencies..."
|
||||
incus exec ${CONTAINER_NAME} -- bash -c "
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get install -y -qq ca-certificates libc6 libssl3
|
||||
"
|
||||
|
||||
# Copy binary
|
||||
echo "Copying chat-server binary..."
|
||||
if [ ! -f "${BUILD_DIR}/veza-chat-server" ]; then
|
||||
echo "ERROR: Binary not found at ${BUILD_DIR}/veza-chat-server"
|
||||
echo "Please run: make build-all-native or ./config/incus/build-native.sh chat-server"
|
||||
exit 1
|
||||
fi
|
||||
incus file push "${BUILD_DIR}/veza-chat-server" ${CONTAINER_NAME}/usr/local/bin/veza-chat-server
|
||||
incus exec ${CONTAINER_NAME} -- chmod +x /usr/local/bin/veza-chat-server
|
||||
|
||||
# Create directories
|
||||
incus exec ${CONTAINER_NAME} -- bash -c "
|
||||
mkdir -p /opt/veza/chat-server
|
||||
mkdir -p /var/log/veza
|
||||
mkdir -p /etc/veza
|
||||
"
|
||||
|
||||
# Copy systemd service
|
||||
incus file push "${PROJECT_ROOT}/config/incus/systemd/veza-chat-server.service" \
|
||||
${CONTAINER_NAME}/etc/systemd/system/veza-chat-server.service
|
||||
|
||||
# Copy environment file template
|
||||
incus file push "${PROJECT_ROOT}/config/incus/env/chat-server.env" \
|
||||
${CONTAINER_NAME}/etc/veza/chat-server.env 2>/dev/null || true
|
||||
|
||||
# Enable and start service
|
||||
incus exec ${CONTAINER_NAME} -- systemctl daemon-reload
|
||||
incus exec ${CONTAINER_NAME} -- systemctl enable veza-chat-server
|
||||
incus exec ${CONTAINER_NAME} -- systemctl start veza-chat-server || echo "Warning: Service start failed, check logs"
|
||||
;;
|
||||
|
||||
stream-server)
|
||||
echo "Installing Rust runtime dependencies..."
|
||||
incus exec ${CONTAINER_NAME} -- bash -c "
|
||||
|
|
@ -850,7 +809,7 @@ if incus list ${CONTAINER_NAME} --format csv | grep -q "${CONTAINER_NAME}"; then
|
|||
|
||||
# Check service status if applicable
|
||||
case ${SERVICE} in
|
||||
backend-api|chat-server|stream-server)
|
||||
backend-api|stream-server)
|
||||
SERVICE_NAME="veza-${SERVICE}"
|
||||
if incus exec ${CONTAINER_NAME} -- systemctl is-active ${SERVICE_NAME} >/dev/null 2>&1; then
|
||||
echo "✅ Service ${SERVICE_NAME} is running"
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ PROFILE="veza-profile"
|
|||
|
||||
if [ -z "$SERVICE" ]; then
|
||||
echo "Usage: $0 <service-name>"
|
||||
echo "Services: backend-api, chat-server, stream-server, web, haproxy"
|
||||
echo "Services: backend-api, stream-server, web, haproxy"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
@ -59,9 +59,6 @@ case ${SERVICE} in
|
|||
backend-api)
|
||||
incus file push -r ../../veza-backend-api ${CONTAINER_NAME}/opt/veza/
|
||||
;;
|
||||
chat-server)
|
||||
incus file push -r ../../veza-chat-server ${CONTAINER_NAME}/opt/veza/
|
||||
;;
|
||||
stream-server)
|
||||
incus file push -r ../../veza-stream-server ${CONTAINER_NAME}/opt/veza/
|
||||
;;
|
||||
|
|
|
|||
12
config/incus/env/env.example
vendored
12
config/incus/env/env.example
vendored
|
|
@ -1,5 +1,5 @@
|
|||
# Incus Environment Templates
|
||||
# Copy the relevant section to backend-api.env, chat-server.env, stream-server.env
|
||||
# Copy the relevant section to backend-api.env, stream-server.env
|
||||
# NEVER commit real .env files — they contain secrets.
|
||||
# Create these files locally: cp env.example backend-api.env && edit backend-api.env
|
||||
|
||||
|
|
@ -16,19 +16,9 @@
|
|||
# JWT_SECRET=${JWT_SECRET}
|
||||
# CORS_ALLOWED_ORIGINS=https://veza.fr,https://app.veza.fr
|
||||
# STREAM_SERVER_URL=http://10.10.10.4:3002
|
||||
# CHAT_SERVER_URL=http://10.10.10.3:8081
|
||||
# ENABLE_CLAMAV=false
|
||||
# CLAMAV_REQUIRED=false
|
||||
|
||||
# === chat-server.env ===
|
||||
# RUST_ENV=production
|
||||
# RUST_LOG=info
|
||||
# DATABASE_URL=postgresql://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:5432/veza?sslmode=disable
|
||||
# JWT_SECRET=${JWT_SECRET}
|
||||
# SERVER_BIND_ADDR=0.0.0.0:8081
|
||||
# REDIS_URL=redis://${REDIS_HOST}:6379
|
||||
# RABBITMQ_URL=amqp://${RABBITMQ_USER}:${RABBITMQ_PASSWORD}@${RABBITMQ_HOST}:5672/%2f
|
||||
|
||||
# === stream-server.env ===
|
||||
# RUST_ENV=production
|
||||
# RUST_LOG=info
|
||||
|
|
|
|||
|
|
@ -145,7 +145,6 @@ for CONTAINER in ${CONTAINERS}; do
|
|||
case ${CONTAINER} in
|
||||
*infra*) CONTAINER_IP="10.10.10.10" ;;
|
||||
*backend-api*) CONTAINER_IP="10.10.10.2" ;;
|
||||
*chat-server*) CONTAINER_IP="10.10.10.3" ;;
|
||||
*stream-server*) CONTAINER_IP="10.10.10.4" ;;
|
||||
*web*) CONTAINER_IP="10.10.10.5" ;;
|
||||
*haproxy*) CONTAINER_IP="10.10.10.6" ;;
|
||||
|
|
|
|||
|
|
@ -47,11 +47,11 @@ frontend http_frontend
|
|||
acl is_stream path_beg /stream
|
||||
acl is_web path_beg /
|
||||
|
||||
# Return 503 for WebSocket endpoints (chat/stream not available)
|
||||
# Note: chat-server and stream-server are disabled (Rust services not deployed)
|
||||
# Return 503 for WebSocket endpoints (stream not available)
|
||||
# Note: stream-server is disabled (Rust service not deployed)
|
||||
# Must be before redirect to avoid processing order issues
|
||||
http-request return status 503 content-type "text/plain" string "Service temporarily unavailable: chat-server and stream-server are not deployed" if is_ws
|
||||
http-request return status 503 content-type "text/plain" string "Service temporarily unavailable: chat-server and stream-server are not deployed" if is_stream
|
||||
http-request return status 503 content-type "text/plain" string "Service temporarily unavailable: stream-server is not deployed" if is_ws
|
||||
http-request return status 503 content-type "text/plain" string "Service temporarily unavailable: stream-server is not deployed" if is_stream
|
||||
|
||||
# Redirect HTTP to HTTPS (after WebSocket checks)
|
||||
redirect scheme https code 301 if !{ ssl_fc }
|
||||
|
|
@ -72,17 +72,6 @@ backend backend_api
|
|||
http-check expect status 200
|
||||
server backend1 10.10.10.2:8080 check inter 5s fall 3 rise 2
|
||||
|
||||
# Chat WebSocket (Rust) - veza-chat-server container
|
||||
# DISABLED: chat-server is not deployed (Rust compilation issues)
|
||||
# backend chat_ws
|
||||
# mode http
|
||||
# balance roundrobin
|
||||
# option httpchk GET /health
|
||||
# http-check expect status 200
|
||||
# server chat1 10.10.10.3:8081 check inter 5s fall 3 rise 2
|
||||
# # WebSocket specific options
|
||||
# timeout tunnel 3600s
|
||||
|
||||
# Stream WebSocket (Rust) - veza-stream-server container
|
||||
# DISABLED: stream-server is not deployed (Rust compilation issues)
|
||||
# backend stream_ws
|
||||
|
|
|
|||
|
|
@ -142,4 +142,4 @@ echo ""
|
|||
echo "You can now deploy services with:"
|
||||
echo " ./deploy-service-native.sh <service-name>"
|
||||
echo ""
|
||||
echo "Available services: infra, backend-api, chat-server, stream-server, web, haproxy"
|
||||
echo "Available services: infra, backend-api, stream-server, web, haproxy"
|
||||
|
|
|
|||
|
|
@ -1,27 +0,0 @@
|
|||
[Unit]
|
||||
Description=Veza Chat Server Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
WorkingDirectory=/opt/veza/chat-server
|
||||
ExecStart=/usr/local/bin/veza-chat-server
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=veza-chat-server
|
||||
|
||||
# Environment variables
|
||||
EnvironmentFile=/etc/veza/chat-server.env
|
||||
|
||||
# Security
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
@ -15,7 +15,7 @@ echo ""
|
|||
|
||||
# Check if containers exist
|
||||
echo -e "${BLUE}Checking containers...${NC}"
|
||||
CONTAINERS=("veza-infra" "veza-backend-api" "veza-chat-server" "veza-stream-server" "veza-web" "veza-haproxy")
|
||||
CONTAINERS=("veza-infra" "veza-backend-api" "veza-stream-server" "veza-web" "veza-haproxy")
|
||||
ALL_EXIST=true
|
||||
|
||||
for container in "${CONTAINERS[@]}"; do
|
||||
|
|
@ -39,7 +39,6 @@ echo ""
|
|||
echo -e "${BLUE}Checking IP addresses...${NC}"
|
||||
EXPECTED_IPS=(
|
||||
"veza-backend-api:10.10.10.2"
|
||||
"veza-chat-server:10.10.10.3"
|
||||
"veza-stream-server:10.10.10.4"
|
||||
"veza-web:10.10.10.5"
|
||||
"veza-haproxy:10.10.10.6"
|
||||
|
|
@ -64,7 +63,7 @@ echo ""
|
|||
|
||||
# Check services
|
||||
echo -e "${BLUE}Checking systemd services...${NC}"
|
||||
SERVICES=("veza-backend-api" "veza-chat-server" "veza-stream-server")
|
||||
SERVICES=("veza-backend-api" "veza-stream-server")
|
||||
|
||||
for service in "${SERVICES[@]}"; do
|
||||
container="veza-$(echo $service | sed 's/veza-//')"
|
||||
|
|
@ -143,18 +142,6 @@ if incus list -c n --format csv | grep -q "^veza-backend-api$"; then
|
|||
fi
|
||||
fi
|
||||
|
||||
if incus list -c n --format csv | grep -q "^veza-chat-server$"; then
|
||||
if incus exec veza-chat-server -- systemctl is-active --quiet veza-chat-server 2>/dev/null; then
|
||||
if incus exec veza-chat-server -- timeout 2 curl -s -f http://localhost:8081/health >/dev/null 2>&1; then
|
||||
echo -e " ${GREEN}✅ Chat Server health check - OK${NC}"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️ Chat Server health check - Service running but endpoint not responding${NC}"
|
||||
fi
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️ Chat Server - Service not running${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if incus list -c n --format csv | grep -q "^veza-stream-server$"; then
|
||||
if incus exec veza-stream-server -- systemctl is-active --quiet veza-stream-server 2>/dev/null; then
|
||||
if incus exec veza-stream-server -- timeout 2 curl -s -f http://localhost:3002/health >/dev/null 2>&1; then
|
||||
|
|
|
|||
|
|
@ -15,11 +15,6 @@ scrape_configs:
|
|||
- targets: ['backend-api:8080'] # Use container name in same network
|
||||
metrics_path: '/metrics'
|
||||
|
||||
- job_name: 'veza-chat'
|
||||
static_configs:
|
||||
- targets: ['chat-server:8081'] # Use container name
|
||||
metrics_path: '/metrics'
|
||||
|
||||
- job_name: 'veza-stream'
|
||||
static_configs:
|
||||
- targets: ['stream-server:8082'] # Use container name
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
"name": "veza-monorepo",
|
||||
"private": true,
|
||||
"packageManager": "npm@10.9.2",
|
||||
"workspaces": ["apps/web", "packages/*", "veza-backend-api", "veza-chat-server", "veza-stream-server"],
|
||||
"workspaces": ["apps/web", "packages/*", "veza-backend-api", "veza-stream-server"],
|
||||
"overrides": {
|
||||
"axios": ">=1.13.5"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,14 +0,0 @@
|
|||
# Configuration Clippy pour le chat server
|
||||
# Ignorer les warnings non critiques pour se concentrer sur les erreurs importantes
|
||||
|
||||
# Ignorer les warnings de formatage
|
||||
allow-mixed-uninlined-format-args = true
|
||||
|
||||
# Ignorer les warnings de variables inutilisées (code en développement)
|
||||
allow-unwrap-in-tests = true
|
||||
allow-expect-in-tests = true
|
||||
allow-dbg-in-tests = true
|
||||
|
||||
# Ignorer les warnings de style
|
||||
allow-comparison-to-zero = true
|
||||
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
# Rust build artifacts
|
||||
target/
|
||||
**/*.rs.bk
|
||||
*.pdb
|
||||
|
||||
# Cargo
|
||||
# Cargo.lock removed to allow reproducible builds
|
||||
# Note: We DO want Cargo.lock in the container for reproducible builds
|
||||
|
||||
# Test files
|
||||
**/*_test.rs
|
||||
**/test_*.rs
|
||||
tests/
|
||||
*.test
|
||||
|
||||
# Documentation
|
||||
*.md
|
||||
docs/
|
||||
README.md
|
||||
docs/
|
||||
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
.gitattributes
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Environment
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# Build scripts (not needed in container)
|
||||
Makefile
|
||||
deploy-simple.sh
|
||||
|
||||
# Docker
|
||||
Dockerfile*
|
||||
.dockerignore
|
||||
docker-compose*.yml
|
||||
|
||||
# Temporary files
|
||||
tmp/
|
||||
temp/
|
||||
*.tmp
|
||||
|
||||
# SQLx metadata - KEEP sqlx-data.json for SQLX_OFFLINE build (v0.101)
|
||||
# sqlx-data.json removed from ignore to allow Docker build without DB
|
||||
# SQLX_METADATA.md
|
||||
|
||||
# Config files (not needed in container if using env vars)
|
||||
config/
|
||||
# proto/ removed to allow building protos
|
||||
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# Configuration Lab pour Veza Chat Server
|
||||
# Copiez ce fichier vers .env.lab
|
||||
|
||||
# Base de données (avec schema chat forcé)
|
||||
# Note: Les scripts lab ajoutent automatiquement options=-c search_path=chat si absent
|
||||
VEZA_LAB_DSN="postgres://veza:veza_password@veza.fr:5432/veza_lab?sslmode=disable"
|
||||
DATABASE_URL="postgres://veza:veza_password@veza.fr:5432/veza_lab?sslmode=disable&options=-c%20search_path=chat"
|
||||
|
||||
# Serveur
|
||||
CHAT_SERVER_PORT=8081
|
||||
CHAT_SERVER_HOST=0.0.0.0
|
||||
RUST_LOG=info,chat_server=debug
|
||||
|
||||
# Sécurité (Généré auto par start_lab.sh si absent)
|
||||
# JWT_SECRET=...
|
||||
|
||||
# RabbitMQ (Désactivé par défaut en lab)
|
||||
RABBITMQ_ENABLE=false
|
||||
42
veza-chat-server/.gitignore
vendored
42
veza-chat-server/.gitignore
vendored
|
|
@ -1,42 +0,0 @@
|
|||
# If you prefer the allow list template instead of the deny list, see community template:
|
||||
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
|
||||
#
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
go.work.sum
|
||||
|
||||
# env file
|
||||
.env
|
||||
.env.*
|
||||
!.env.lab.example
|
||||
target/
|
||||
*.zip
|
||||
tree.txt
|
||||
all/
|
||||
bin/
|
||||
node_modules/
|
||||
.parcel-cache/
|
||||
dist/
|
||||
.DS_Store
|
||||
versions_details/
|
||||
tests/
|
||||
static/
|
||||
go.sum
|
||||
Cargo.lock
|
||||
.cursor*
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,636 +0,0 @@
|
|||
# 🔍 AUDIT EXHAUSTIF - VEZA CHAT SERVER
|
||||
## Module: `veza-chat-server` (Rust)
|
||||
|
||||
**Date**: 2025-01-27
|
||||
**Auditeur**: Auto (Cursor AI)
|
||||
**Version analysée**: 0.2.0
|
||||
**Statut compilation**: ❌ **ÉCHEC** (conflit de dépendances SQLx)
|
||||
|
||||
---
|
||||
|
||||
# PHASE A — CARTOGRAPHIE DU MODULE
|
||||
|
||||
## 1. But du module
|
||||
|
||||
**Rôle**: Serveur de chat temps réel avec WebSocket pour la plateforme Veza.
|
||||
|
||||
**Fonctionnalités principales**:
|
||||
- Communication WebSocket bidirectionnelle (Axum + tokio-tungstenite)
|
||||
- Gestion de conversations (directes, groupes, channels)
|
||||
- Messages avec édition/suppression
|
||||
- Read receipts et delivered status
|
||||
- Typing indicators
|
||||
- Recherche et synchronisation d'historique
|
||||
- Authentification JWT avec refresh tokens
|
||||
- Permissions RBAC (admin, moderator, member)
|
||||
- Event Bus RabbitMQ (optionnel)
|
||||
- Métriques Prometheus
|
||||
|
||||
## 2. Entrées / Sorties
|
||||
|
||||
### APIs exposées
|
||||
|
||||
**HTTP REST** (port 8081 par défaut):
|
||||
- `GET /health` - Health check
|
||||
- `GET /healthz` - Health check (alias)
|
||||
- `GET /readyz` - Readiness check (DB + RabbitMQ)
|
||||
- `GET /metrics` - Métriques Prometheus
|
||||
- `GET /api/messages/stats` - Statistiques serveur
|
||||
- `GET /api/messages/{conversation_id}` - Récupération messages (authentifié)
|
||||
- `POST /api/messages` - Envoi message (authentifié)
|
||||
|
||||
**WebSocket** (port 8081):
|
||||
- `GET /ws?token=<JWT>` - Connexion WebSocket
|
||||
|
||||
**Formats**:
|
||||
- JSON pour HTTP REST
|
||||
- JSON pour WebSocket (messages structurés)
|
||||
- Protobuf pour gRPC (présent mais non utilisé dans main.rs)
|
||||
|
||||
### Events WebSocket
|
||||
|
||||
**Incoming** (`IncomingMessage`):
|
||||
- `SendMessage`, `JoinConversation`, `LeaveConversation`
|
||||
- `MarkAsRead`, `Typing`, `Delivered`
|
||||
- `EditMessage`, `DeleteMessage`
|
||||
- `FetchHistory`, `SearchMessages`, `SyncMessages`
|
||||
- `Ping`
|
||||
|
||||
**Outgoing** (`OutgoingMessage`):
|
||||
- `NewMessage`, `MessageRead`, `MessageDelivered`
|
||||
- `UserTyping`, `MessageEdited`, `MessageDeleted`
|
||||
- `HistoryChunk`, `SearchResults`, `SyncChunk`
|
||||
- `ActionConfirmed`, `Error`, `Pong`
|
||||
|
||||
## 3. Dépendances internes
|
||||
|
||||
- `veza-common` (path: `../veza-common`) - Types partagés
|
||||
- Modules internes: `config`, `database`, `error`, `jwt_manager`, `repository`, `security`, `services`, `websocket`
|
||||
|
||||
## 4. Dépendances externes
|
||||
|
||||
**Base de données**:
|
||||
- PostgreSQL (via SQLx 0.8.6)
|
||||
- Migrations SQL dans `migrations/`
|
||||
|
||||
**Cache** (optionnel):
|
||||
- Redis (via `redis` crate, feature `redis-cache`)
|
||||
|
||||
**Message Broker** (optionnel):
|
||||
- RabbitMQ (via `lapin` 2.3)
|
||||
|
||||
**Monitoring**:
|
||||
- Prometheus (via `metrics-exporter-prometheus`)
|
||||
|
||||
## 5. Exécution
|
||||
|
||||
### Build
|
||||
```bash
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
### Run
|
||||
```bash
|
||||
./target/release/chat-server
|
||||
```
|
||||
|
||||
### Variables d'environnement critiques
|
||||
- `DATABASE_URL` (requis) - PostgreSQL connection string
|
||||
- `JWT_SECRET` (requis, min 32 chars) - Secret pour JWT
|
||||
- `CHAT_SERVER_PORT` (défaut: 8081)
|
||||
- `CHAT_SERVER_HOST` (défaut: 0.0.0.0)
|
||||
- `RABBITMQ_URL` (optionnel)
|
||||
- `RABBITMQ_ENABLE` (défaut: true)
|
||||
|
||||
### Docker
|
||||
- `Dockerfile` présent (multi-stage, Alpine)
|
||||
- `docker-compose.yml` présent
|
||||
- Healthcheck configuré (`/health`)
|
||||
|
||||
## 6. Points d'intégration
|
||||
|
||||
**Backend Go**:
|
||||
- JWT tokens partagés (audience: `veza-chat`, issuer: `veza-backend`)
|
||||
- Schéma DB partagé (UUID pour users, conversations, messages)
|
||||
|
||||
**Frontend React**:
|
||||
- WebSocket: `ws://<host>:8081/ws?token=<JWT>`
|
||||
- REST API: `http://<host>:8081/api/*`
|
||||
- Headers: `Authorization: Bearer <JWT>`
|
||||
|
||||
**Auth**:
|
||||
- JWT HS256 (configurable)
|
||||
- Claims: `sub` (user_id UUID), `username`, `role`, `aud`, `iss`, `exp`, `iat`, `jti`
|
||||
|
||||
---
|
||||
|
||||
# PHASE B — SANTÉ TECHNIQUE
|
||||
|
||||
## Build Status
|
||||
|
||||
### ❌ **P0 - BUILD CASSÉ**
|
||||
|
||||
**Erreur**: Conflit de dépendances SQLx
|
||||
|
||||
```
|
||||
error: failed to select a version for `libsqlite3-sys`.
|
||||
package `libsqlite3-sys v0.30.1` (sqlx 0.8.6)
|
||||
conflicts with `libsqlite3-sys v0.26.0` (sqlx 0.7.0 via veza-common)
|
||||
```
|
||||
|
||||
**Fichiers concernés**:
|
||||
- `Cargo.toml` ligne 43: `sqlx = "0.8.6"`
|
||||
- `veza-common` (externe): `sqlx = "^0.7"`
|
||||
|
||||
**Impact**: **Impossible de compiler le projet**
|
||||
|
||||
**Fix minimal**: Aligner les versions SQLx entre `chat_server` et `veza-common`, ou exclure SQLite de `veza-common` si non utilisé.
|
||||
|
||||
## Tests
|
||||
|
||||
### Couverture
|
||||
- Tests unitaires présents dans plusieurs modules
|
||||
- Tests d'intégration avec `#[ignore]` (nécessitent DB)
|
||||
- Tests JWT présents (`jwt_manager.rs`)
|
||||
- Tests permissions partiels
|
||||
|
||||
### Problèmes détectés
|
||||
- Beaucoup de tests ignorés (`#[ignore]`) car nécessitent DB
|
||||
- Pas de tests E2E WebSocket
|
||||
- Tests de sécurité manquants (injection, rate limiting)
|
||||
|
||||
## Gestion des erreurs
|
||||
|
||||
### ✅ Points positifs
|
||||
- Type `ChatError` exhaustif avec `thiserror`
|
||||
- Helpers pour création d'erreurs (`ChatError::not_found`, etc.)
|
||||
- Mapping HTTP status codes approprié
|
||||
- Logging structuré avec `tracing`
|
||||
|
||||
### ⚠️ Points d'attention
|
||||
- **169 occurrences de `unwrap()` / `expect()` / `panic!`** détectées
|
||||
- Certains `unwrap()` dans code de production (ex: `src/config.rs:21`, `src/main.rs:489`)
|
||||
- Panics possibles dans `SecurityConfig::default()` (ligne 192) si appelé hors test
|
||||
|
||||
## Linters / Qualité
|
||||
|
||||
### Clippy
|
||||
- Non exécuté dans l'audit (nécessite build)
|
||||
- Recommandation: `cargo clippy --all-targets --all-features -- -D warnings`
|
||||
|
||||
### Conventions
|
||||
- ✅ Structure modulaire claire
|
||||
- ✅ Documentation rustdoc présente
|
||||
- ⚠️ Mix de noms français/anglais (ex: `conversation_id` vs `room_id`)
|
||||
- ⚠️ Code mort potentiel (`security_legacy.rs`, `simple_message_store.rs`)
|
||||
|
||||
---
|
||||
|
||||
# PHASE C — SÉCURITÉ
|
||||
|
||||
## Top 10 Risques Critiques
|
||||
|
||||
### **P0-001: Conflit de dépendances SQLx (Build cassé)**
|
||||
- **Impact**: Impossible de déployer
|
||||
- **Fichier**: `Cargo.toml:43` vs `veza-common`
|
||||
- **Fix**: Aligner versions ou exclure SQLite
|
||||
- **Effort**: S (1h)
|
||||
|
||||
### **P0-002: JWT Secret faible par défaut**
|
||||
- **Impact**: Tokens compromis si secret faible
|
||||
- **Fichier**: `src/main.rs:158`, `env.example:20`
|
||||
- **Preuve**: `JWT_SECRET=your-super-secret-jwt-key-change-this-in-production`
|
||||
- **Fix**: Validation stricte min 64 chars, génération aléatoire au démarrage si absent
|
||||
- **Effort**: S (30min)
|
||||
|
||||
### **P0-003: Panics dans code de production**
|
||||
- **Impact**: Crash serveur
|
||||
- **Fichiers**:
|
||||
- `src/main.rs:489` - `expect("failed to install Ctrl+C handler")`
|
||||
- `src/config.rs:192` - `panic!` dans `SecurityConfig::default()`
|
||||
- `src/env.rs:30,61` - `panic!` dans helpers
|
||||
- **Fix**: Remplacer par `Result` et gestion d'erreurs
|
||||
- **Effort**: M (2h)
|
||||
|
||||
### **P0-004: Validation JWT incomplète**
|
||||
- **Impact**: Tokens expirés ou invalides acceptés
|
||||
- **Fichier**: `src/jwt_manager.rs:266-303`
|
||||
- **Preuve**: Vérification `exp` manuelle après décodage (ligne 290-293), mais pas de vérification `nbf` (not before)
|
||||
- **Fix**: Ajouter validation `nbf`, vérifier `iss`/`aud` strictement
|
||||
- **Effort**: S (1h)
|
||||
|
||||
### **P1-005: SQL Injection potentielle dans recherche**
|
||||
- **Impact**: Exécution de requêtes SQL arbitraires
|
||||
- **Fichier**: `src/repository/message_repository.rs:563`
|
||||
- **Preuve**: `format!("%{}%", query)` - si `query` contient `%` ou `_`, comportement inattendu
|
||||
- **Fix**: Échapper caractères spéciaux ou utiliser `ILIKE` avec paramètre bindé
|
||||
- **Effort**: S (30min)
|
||||
|
||||
### **P1-006: Rate limiting non implémenté**
|
||||
- **Impact**: DoS possible, spam
|
||||
- **Fichier**: `src/security/mod.rs:94` - TODO comment
|
||||
- **Preuve**: `// TODO: Implémenter le Rate Limiting réel via Redis ou mémoire partagée`
|
||||
- **Fix**: Implémenter rate limiter avec Redis ou in-memory (DashMap)
|
||||
- **Effort**: M (4h)
|
||||
|
||||
### **P1-007: CORS non configuré**
|
||||
- **Impact**: XSS via requêtes cross-origin
|
||||
- **Fichier**: `src/main.rs` - Pas de middleware CORS
|
||||
- **Preuve**: Aucune configuration CORS dans Axum router
|
||||
- **Fix**: Ajouter middleware CORS avec origines whitelistées
|
||||
- **Effort**: S (1h)
|
||||
|
||||
### **P1-008: Secrets dans logs**
|
||||
- **Impact**: Fuite de secrets en production
|
||||
- **Fichier**: `src/config.rs:55` - Logging de `database_url` potentiellement
|
||||
- **Preuve**: `info!("Initializing database connection pool with config: {:?}", config)` - peut logger credentials
|
||||
- **Fix**: Masquer credentials dans logs (remplacer par `***`)
|
||||
- **Effort**: S (30min)
|
||||
|
||||
### **P1-009: WebSocket sans rate limiting**
|
||||
- **Impact**: Spam de messages, DoS
|
||||
- **Fichier**: `src/websocket/handler.rs:200-894`
|
||||
- **Preuve**: Aucune limite sur fréquence de messages WebSocket
|
||||
- **Fix**: Ajouter rate limiter par client (ex: max 100 messages/sec)
|
||||
- **Effort**: M (3h)
|
||||
|
||||
### **P1-010: Blacklist JWT en mémoire (non persistant)**
|
||||
- **Impact**: Tokens révoqués revalidés après redémarrage
|
||||
- **Fichier**: `src/jwt_manager.rs:142`
|
||||
- **Preuve**: `revoked_tokens: Arc<RwLock<HashSet<String>>>` - perdu au redémarrage
|
||||
- **Fix**: Persister blacklist dans Redis ou DB
|
||||
- **Effort**: M (2h)
|
||||
|
||||
## Autres Risques (P2/P3)
|
||||
|
||||
### P2-011: Validation de contenu basique
|
||||
- **Fichier**: `src/security_legacy.rs` - Regex patterns, mais module "legacy"
|
||||
- **Fix**: Utiliser `ammonia` (déjà dans deps) pour sanitization HTML
|
||||
|
||||
### P2-012: Pas de protection CSRF pour REST API
|
||||
- **Fichier**: `src/security/csrf.rs` existe mais non utilisé dans `main.rs`
|
||||
- **Fix**: Activer middleware CSRF pour routes POST/PUT/DELETE
|
||||
|
||||
### P2-013: Heartbeat WebSocket fixe (60s)
|
||||
- **Fichier**: `src/websocket/handler.rs:121`
|
||||
- **Preuve**: `keepalive_timeout = Duration::from_secs(60)` - hardcodé
|
||||
- **Fix**: Configurable via env var
|
||||
|
||||
### P2-014: Pas de validation de taille de message
|
||||
- **Fichier**: `src/websocket/handler.rs:214` - Pas de check `content.len()`
|
||||
- **Fix**: Valider `MAX_MESSAGE_LENGTH` (défini dans `env.example:57` mais non utilisé)
|
||||
|
||||
### P3-015: Logs verbeux en production
|
||||
- **Fichier**: `src/main.rs:84-101` - Logs avec `debug`/`info` même en prod
|
||||
- **Fix**: Utiliser `RUST_LOG` avec niveaux appropriés
|
||||
|
||||
---
|
||||
|
||||
# PHASE D — ROBUSTESSE & OBSERVABILITÉ
|
||||
|
||||
## Logs structurés
|
||||
|
||||
### ✅ Points positifs
|
||||
- `tracing` avec `tracing-subscriber` configuré
|
||||
- Format JSON en production (`main.rs:92`)
|
||||
- Format détaillé en dev (`main.rs:95-100`)
|
||||
- Champs structurés (`user_id = %user_id`, `conversation_id = %conversation_id`)
|
||||
|
||||
### ⚠️ Gaps
|
||||
- **Pas de `request_id` / `trace_id`** pour corrélation
|
||||
- **Pas de log rotation** configuré (mentionné dans `env.example:83` mais non implémenté)
|
||||
- **Secrets potentiellement loggés** (voir P1-008)
|
||||
|
||||
## Métriques
|
||||
|
||||
### ✅ Présent
|
||||
- Prometheus exporter (`/metrics`)
|
||||
- `ChatMetrics` avec compteurs/gauges
|
||||
- Métriques système (CPU, mémoire) via `sysinfo`
|
||||
|
||||
### ⚠️ Manquants
|
||||
- **Latence P50/P95/P99** pour requêtes DB
|
||||
- **Taux d'erreur par endpoint**
|
||||
- **Connexions WebSocket actives** (compteur)
|
||||
- **Taille de la blacklist JWT**
|
||||
|
||||
## Healthchecks
|
||||
|
||||
### ✅ Présent
|
||||
- `/health` - Basic health check
|
||||
- `/readyz` - Readiness (DB + RabbitMQ)
|
||||
|
||||
### ⚠️ Améliorations
|
||||
- **Liveness check** séparé (actuellement `/health` fait DB check aussi)
|
||||
- **Timeout configurable** pour healthchecks
|
||||
- **Circuit breaker** pour DB/RabbitMQ
|
||||
|
||||
## Timeouts & Retries
|
||||
|
||||
### ✅ Présent
|
||||
- Timeout WebSocket inactivité (60s)
|
||||
- Retry RabbitMQ (`max_retries`, `retry_interval_secs`)
|
||||
|
||||
### ⚠️ Manquants
|
||||
- **Timeout pour requêtes DB** (seulement `acquire_timeout` dans pool)
|
||||
- **Retry avec backoff exponentiel** pour DB
|
||||
- **Circuit breaker** pour services externes
|
||||
|
||||
## Gestion de charge
|
||||
|
||||
### ✅ Présent
|
||||
- Pool DB configuré (max 20, min 5)
|
||||
- Limite messages (100 max dans `fetch_history`)
|
||||
|
||||
### ⚠️ Gaps
|
||||
- **Pas de backpressure** pour WebSocket (clients peuvent spam)
|
||||
- **Pas de limite de connexions WebSocket simultanées**
|
||||
- **Pas de queue pour messages en attente**
|
||||
|
||||
## Migrations
|
||||
|
||||
### ✅ Présent
|
||||
- Migrations SQL dans `migrations/`
|
||||
- SQLx migrate supporté
|
||||
|
||||
### ⚠️ Problèmes
|
||||
- **16 fichiers de migration** - risque de confusion
|
||||
- **Migrations archivées** dans `migrations/archive/` - à nettoyer
|
||||
- **Pas de rollback** automatique en cas d'échec
|
||||
|
||||
---
|
||||
|
||||
# PHASE E — PERFORMANCE & SCALABILITÉ
|
||||
|
||||
## Hotspots identifiés
|
||||
|
||||
### 1. Broadcast WebSocket inefficace
|
||||
**Fichier**: `src/websocket/mod.rs:228-244`
|
||||
**Problème**: Itération sur tous les clients pour chaque broadcast
|
||||
```rust
|
||||
for client in clients.iter() {
|
||||
let conversations = client.conversations.read().await;
|
||||
if conversations.contains(&conversation_id) {
|
||||
let _ = client.send_message(message.clone()).await;
|
||||
}
|
||||
}
|
||||
```
|
||||
**Impact**: O(n) où n = nombre total de clients, même si seulement quelques-uns sont dans la conversation
|
||||
**Fix**: Index inversé `conversation_id -> Vec<client_id>` pour O(1) lookup
|
||||
**Effort**: M (3h)
|
||||
|
||||
### 2. Clonage de messages pour broadcast
|
||||
**Fichier**: `src/websocket/mod.rs:239`
|
||||
**Problème**: `message.clone()` pour chaque client
|
||||
**Impact**: Allocations inutiles pour gros messages
|
||||
**Fix**: Utiliser `Arc<OutgoingMessage>` ou sérialiser une fois, cloner bytes
|
||||
**Effort**: S (1h)
|
||||
|
||||
### 3. Requêtes DB N+1 potentielles
|
||||
**Fichier**: `src/repository/message_repository.rs:82-144`
|
||||
**Problème**: Pas de batch loading pour conversations multiples
|
||||
**Impact**: Latence élevée si plusieurs conversations chargées
|
||||
**Fix**: Ajouter méthode `get_multiple_conversations_messages`
|
||||
**Effort**: M (2h)
|
||||
|
||||
### 4. Blacklist JWT en mémoire (HashSet)
|
||||
**Fichier**: `src/jwt_manager.rs:142`
|
||||
**Problème**: `HashSet<String>` - recherche O(1) mais mémoire illimitée
|
||||
**Impact**: Fuite mémoire si beaucoup de tokens révoqués
|
||||
**Fix**: LRU cache ou TTL-based cleanup (déjà partiellement implémenté ligne 473)
|
||||
**Effort**: S (1h)
|
||||
|
||||
### 5. Parsing JSON répété
|
||||
**Fichier**: `src/websocket/handler.rs:210`
|
||||
**Problème**: `serde_json::from_str(text)` à chaque message
|
||||
**Impact**: CPU overhead pour gros payloads
|
||||
**Fix**: Cache de schémas JSON ou validation pré-compilée
|
||||
**Effort**: P3 (optimisation future)
|
||||
|
||||
## Streaming & I/O
|
||||
|
||||
### WebSocket
|
||||
- ✅ Utilisation de `tokio-tungstenite` (async)
|
||||
- ⚠️ Pas de compression WebSocket (per-message deflate)
|
||||
- ⚠️ Pas de fragmentation pour gros messages
|
||||
|
||||
### Base de données
|
||||
- ✅ Pool de connexions configuré
|
||||
- ⚠️ Pas de prepared statements caching explicite (SQLx le fait mais non configuré)
|
||||
- ⚠️ Pas de connection pooling metrics exposées
|
||||
|
||||
## Async Runtime
|
||||
|
||||
### ✅ Points positifs
|
||||
- Tokio avec features "full"
|
||||
- Pas de blocking dans async (sauf `sysinfo` potentiellement)
|
||||
|
||||
### ⚠️ Points d'attention
|
||||
- **Pas de configuration de worker threads** (utilise par défaut)
|
||||
- **Pas de metrics Tokio** (task spawns, park/unpark)
|
||||
|
||||
---
|
||||
|
||||
# PHASE F — LISTE EXHAUSTIVE DES PROBLÈMES
|
||||
|
||||
## P0 - CRITIQUES (Build / Sécurité / Crash)
|
||||
|
||||
| ID | Titre | Impact | Fichier | Fix minimal | Validation | Effort |
|
||||
|---|---|---|---|---|---|---|
|
||||
| **MOD-P0-001** | Conflit dépendances SQLx (build cassé) | Impossible de compiler | `Cargo.toml:43` | Aligner versions SQLx (0.8.6 partout) ou exclure SQLite de veza-common | `cargo check` passe | S (1h) |
|
||||
| **MOD-P0-002** | JWT Secret faible par défaut | Tokens compromis | `src/main.rs:158`, `env.example:20` | Validation min 64 chars, génération aléatoire si absent | Test: secret < 64 chars rejeté | S (30min) |
|
||||
| **MOD-P0-003** | Panics dans code production | Crash serveur | `src/main.rs:489`, `src/config.rs:192`, `src/env.rs:30,61` | Remplacer `expect/panic` par `Result` | Tests: pas de panic sur erreurs attendues | M (2h) |
|
||||
| **MOD-P0-004** | Validation JWT incomplète | Tokens invalides acceptés | `src/jwt_manager.rs:266-303` | Ajouter validation `nbf`, vérifier `iss/aud` strictement | Test: token avec `nbf` futur rejeté | S (1h) |
|
||||
|
||||
## P1 - HAUTE PRIORITÉ (Bugs fréquents / Dette bloquante)
|
||||
|
||||
| ID | Titre | Impact | Fichier | Fix minimal | Validation | Effort |
|
||||
|---|---|---|---|---|---|---|
|
||||
| **MOD-P1-005** | SQL Injection potentielle recherche | Exécution SQL arbitraire | `src/repository/message_repository.rs:563` | Échapper `%` et `_` ou utiliser paramètre bindé | Test: query avec `%` ne match pas littéralement | S (30min) |
|
||||
| **MOD-P1-006** | Rate limiting non implémenté | DoS, spam | `src/security/mod.rs:94` | Implémenter avec Redis ou DashMap | Test: 1000 req/sec rejetées | M (4h) |
|
||||
| **MOD-P1-007** | CORS non configuré | XSS cross-origin | `src/main.rs:246` | Ajouter middleware CORS Axum | Test: requête cross-origin rejetée sans header | S (1h) |
|
||||
| **MOD-P1-008** | Secrets dans logs | Fuite credentials | `src/config.rs:264` | Masquer credentials (remplacer par `***`) | Test: logs ne contiennent pas `password=` | S (30min) |
|
||||
| **MOD-P1-009** | WebSocket sans rate limiting | Spam messages, DoS | `src/websocket/handler.rs:200` | Rate limiter par client (100 msg/sec) | Test: client spammant rejeté | M (3h) |
|
||||
| **MOD-P1-010** | Blacklist JWT non persistant | Tokens révoqués revalidés | `src/jwt_manager.rs:142` | Persister dans Redis ou DB | Test: token révoqué reste révoqué après restart | M (2h) |
|
||||
| **MOD-P1-011** | Pas de validation taille message | Messages trop longs | `src/websocket/handler.rs:214` | Valider `MAX_MESSAGE_LENGTH` (2000 chars) | Test: message > 2000 chars rejeté | S (30min) |
|
||||
| **MOD-P1-012** | Broadcast WebSocket O(n) | Performance dégradée | `src/websocket/mod.rs:228` | Index inversé conversation -> clients | Test: broadcast à 1000 clients < 10ms | M (3h) |
|
||||
| **MOD-P1-013** | Pas de limite connexions WS | DoS par connexions | `src/websocket/manager.rs:209` | Limiter max connexions (ex: 10000) | Test: 10001ème connexion rejetée | S (1h) |
|
||||
| **MOD-P1-014** | Healthcheck timeout non configuré | Healthcheck bloque | `src/main.rs:298` | Timeout configurable (ex: 5s) | Test: DB lent retourne 503 | S (30min) |
|
||||
|
||||
## P2 - MOYENNE PRIORITÉ (Qualité / Maintenabilité)
|
||||
|
||||
| ID | Titre | Impact | Fichier | Fix minimal | Validation | Effort |
|
||||
|---|---|---|---|---|---|---|
|
||||
| **MOD-P2-015** | Pas de request_id/trace_id | Debug difficile | `src/main.rs:82` | Ajouter middleware tracing avec `request_id` | Test: logs contiennent `request_id` | M (2h) |
|
||||
| **MOD-P2-016** | Log rotation non implémenté | Disque plein | `env.example:83` | Implémenter avec `tracing-appender` | Test: logs rotent après 100MB | M (2h) |
|
||||
| **MOD-P2-017** | Métriques latence manquantes | Monitoring incomplet | `src/monitoring.rs` | Ajouter histogrammes P50/P95/P99 | Test: métriques exposées sur `/metrics` | M (2h) |
|
||||
| **MOD-P2-018** | Circuit breaker manquant | Cascading failures | N/A | Implémenter avec `tower` ou custom | Test: DB down -> circuit ouvert | M (4h) |
|
||||
| **MOD-P2-019** | Migrations multiples confuses | Risque d'erreur | `migrations/` (16 fichiers) | Nettoyer migrations archivées | Test: migrations appliquent dans ordre | S (1h) |
|
||||
| **MOD-P2-020** | Code mort (security_legacy) | Maintenance inutile | `src/security_legacy.rs` | Supprimer ou documenter usage | Test: build sans ce fichier | S (30min) |
|
||||
| **MOD-P2-021** | CSRF non activé | CSRF attacks | `src/security/csrf.rs` | Activer middleware CSRF | Test: requête sans token CSRF rejetée | M (2h) |
|
||||
| **MOD-P2-022** | Heartbeat WebSocket hardcodé | Non configurable | `src/websocket/handler.rs:121` | Configurable via env var | Test: heartbeat = 30s fonctionne | S (30min) |
|
||||
| **MOD-P2-023** | Clonage messages broadcast | Allocations inutiles | `src/websocket/mod.rs:239` | Utiliser `Arc<OutgoingMessage>` | Test: broadcast 100 clients < 5ms | S (1h) |
|
||||
| **MOD-P2-024** | Requêtes N+1 potentielles | Latence élevée | `src/repository/message_repository.rs` | Batch loading conversations | Test: 10 conversations < 100ms | M (2h) |
|
||||
|
||||
## P3 - BASSE PRIORITÉ (Cosmétique / Refactors)
|
||||
|
||||
| ID | Titre | Impact | Fichier | Fix minimal | Validation | Effort |
|
||||
|---|---|---|---|---|---|---|
|
||||
| **MOD-P3-025** | Mix français/anglais | Confusion | Multiple | Standardiser sur anglais | Review code | L (8h) |
|
||||
| **MOD-P3-026** | Logs verbeux en production | Bruit | `src/main.rs:84` | Utiliser `RUST_LOG` approprié | Test: prod logs = info seulement | S (30min) |
|
||||
| **MOD-P3-027** | Pas de compression WebSocket | Bande passante | `src/websocket/handler.rs:47` | Activer per-message deflate | Test: messages compressés | M (2h) |
|
||||
| **MOD-P3-028** | Tests ignorés nombreux | Couverture faible | Multiple `#[ignore]` | Setup DB de test ou mocks | Test: tous tests passent | L (8h) |
|
||||
| **MOD-P3-029** | Documentation rustdoc incomplète | DX | Multiple | Compléter doc comments | Test: `cargo doc` sans warnings | M (4h) |
|
||||
| **MOD-P3-030** | Pas de benchmarks | Performance non mesurée | N/A | Ajouter `criterion` benchmarks | Test: benchmarks passent | M (4h) |
|
||||
|
||||
---
|
||||
|
||||
# PHASE G — PLAN D'EXÉCUTION
|
||||
|
||||
## Checklist P0 (Ordre strict)
|
||||
|
||||
1. ✅ **MOD-P0-001**: Fix conflit SQLx
|
||||
- Modifier `veza-common/Cargo.toml` pour utiliser `sqlx = "0.8.6"` OU exclure SQLite
|
||||
- Vérifier: `cargo check` passe
|
||||
- **PR**: `fix: align sqlx versions to 0.8.6`
|
||||
|
||||
2. ✅ **MOD-P0-002**: Validation JWT Secret
|
||||
- Modifier `src/env.rs` pour valider min 64 chars
|
||||
- Générer secret aléatoire si absent (dev seulement)
|
||||
- **PR**: `security: enforce strong JWT secret (min 64 chars)`
|
||||
|
||||
3. ✅ **MOD-P0-003**: Remplacer panics
|
||||
- `src/main.rs:489` -> `Result` pour signal handlers
|
||||
- `src/config.rs:192` -> Supprimer `panic!` ou rendre test-only
|
||||
- `src/env.rs:30,61` -> Retourner `Result` au lieu de `panic!`
|
||||
- **PR**: `fix: replace panics with Result types`
|
||||
|
||||
4. ✅ **MOD-P0-004**: Validation JWT complète
|
||||
- Ajouter validation `nbf` dans `validate_access_token`
|
||||
- Vérifier `iss`/`aud` strictement (déjà fait partiellement)
|
||||
- **PR**: `security: add nbf validation to JWT tokens`
|
||||
|
||||
## Checklist P1 (Par lots)
|
||||
|
||||
### Lot 1: Sécurité WebSocket (1 sprint)
|
||||
- **MOD-P1-009**: Rate limiting WebSocket
|
||||
- **MOD-P1-011**: Validation taille message
|
||||
- **MOD-P1-013**: Limite connexions WS
|
||||
- **PR**: `security: add rate limiting and validation for WebSocket`
|
||||
|
||||
### Lot 2: Sécurité REST (1 sprint)
|
||||
- **MOD-P1-006**: Rate limiting REST
|
||||
- **MOD-P1-007**: CORS middleware
|
||||
- **MOD-P1-008**: Masquer secrets dans logs
|
||||
- **PR**: `security: add rate limiting, CORS, and secure logging`
|
||||
|
||||
### Lot 3: Persistance & Robustesse (1 sprint)
|
||||
- **MOD-P1-010**: Blacklist JWT persistant
|
||||
- **MOD-P1-014**: Healthcheck timeout
|
||||
- **MOD-P2-015**: Request ID tracing
|
||||
- **PR**: `feat: persistent JWT blacklist, configurable healthcheck, request tracing`
|
||||
|
||||
### Lot 4: Performance WebSocket (1 sprint)
|
||||
- **MOD-P1-012**: Index inversé broadcast
|
||||
- **MOD-P2-023**: Arc pour messages
|
||||
- **PR**: `perf: optimize WebSocket broadcast with index and Arc`
|
||||
|
||||
### Lot 5: Base de données (1 sprint)
|
||||
- **MOD-P1-005**: Fix SQL injection recherche
|
||||
- **MOD-P2-024**: Batch loading conversations
|
||||
- **PR**: `fix: SQL injection in search, add batch loading`
|
||||
|
||||
## Quick Wins (≤ 1h chacun)
|
||||
|
||||
1. **MOD-P2-022**: Heartbeat configurable (30min)
|
||||
2. **MOD-P2-020**: Supprimer code mort (30min)
|
||||
3. **MOD-P3-026**: Logs production (30min)
|
||||
4. **MOD-P2-019**: Nettoyer migrations (1h)
|
||||
|
||||
## Tests à ajouter en priorité
|
||||
|
||||
### Tests de sécurité
|
||||
- [ ] Test: JWT secret < 64 chars rejeté
|
||||
- [ ] Test: Token avec `nbf` futur rejeté
|
||||
- [ ] Test: Recherche avec `%` ne match pas littéralement
|
||||
- [ ] Test: Rate limiting (1000 req/sec rejetées)
|
||||
- [ ] Test: CORS sans header rejeté
|
||||
- [ ] Test: Message > 2000 chars rejeté
|
||||
|
||||
### Tests de robustesse
|
||||
- [ ] Test: DB down -> healthcheck 503
|
||||
- [ ] Test: Token révoqué reste révoqué après restart
|
||||
- [ ] Test: Broadcast 1000 clients < 10ms
|
||||
- [ ] Test: 10001ème connexion WS rejetée
|
||||
|
||||
### Tests E2E
|
||||
- [ ] Test: Connexion WebSocket complète (join, send, leave)
|
||||
- [ ] Test: Édition/suppression message
|
||||
- [ ] Test: Read receipts et delivered status
|
||||
- [ ] Test: Recherche et synchronisation
|
||||
|
||||
## PR Plan (Découpe proposée)
|
||||
|
||||
1. **`fix: resolve sqlx dependency conflict`** (P0-001)
|
||||
2. **`security: enforce strong JWT secret and validation`** (P0-002, P0-004)
|
||||
3. **`fix: replace panics with Result types`** (P0-003)
|
||||
4. **`security: add rate limiting and validation for WebSocket`** (P1-009, P1-011, P1-013)
|
||||
5. **`security: add rate limiting, CORS, and secure logging`** (P1-006, P1-007, P1-008)
|
||||
6. **`feat: persistent JWT blacklist and request tracing`** (P1-010, P1-014, P2-015)
|
||||
7. **`perf: optimize WebSocket broadcast`** (P1-012, P2-023)
|
||||
8. **`fix: SQL injection and batch loading`** (P1-005, P2-024)
|
||||
9. **`chore: cleanup migrations and dead code`** (P2-019, P2-020)
|
||||
10. **`feat: add observability improvements`** (P2-016, P2-017, P2-018)
|
||||
|
||||
---
|
||||
|
||||
# RÉSUMÉ EXÉCUTIF
|
||||
|
||||
## Statut global: 🔴 **NON PRODUCTION-READY**
|
||||
|
||||
### Bloqueurs (P0): 4
|
||||
- Build cassé (conflit SQLx)
|
||||
- Sécurité JWT faible
|
||||
- Panics en production
|
||||
- Validation JWT incomplète
|
||||
|
||||
### Critiques (P1): 10
|
||||
- SQL injection potentielle
|
||||
- Rate limiting manquant
|
||||
- CORS non configuré
|
||||
- Secrets dans logs
|
||||
- Performance WebSocket
|
||||
|
||||
### Améliorations (P2): 10
|
||||
- Observabilité incomplète
|
||||
- Robustesse (circuit breakers, timeouts)
|
||||
- Code mort
|
||||
|
||||
### Cosmétiques (P3): 6
|
||||
- Documentation
|
||||
- Tests
|
||||
- Refactors
|
||||
|
||||
## Estimation totale
|
||||
|
||||
- **P0**: 4.5h (1 sprint)
|
||||
- **P1**: 20h (2-3 sprints)
|
||||
- **P2**: 18h (2 sprints)
|
||||
- **P3**: 24h (3 sprints)
|
||||
|
||||
**Total**: ~66.5h (~8-9 jours de travail)
|
||||
|
||||
## Recommandation
|
||||
|
||||
**Priorité immédiate**: Fixer P0 (build + sécurité) avant toute autre chose.
|
||||
|
||||
**Roadmap suggérée**:
|
||||
1. **Sprint 1**: P0 complet (build + sécurité critique)
|
||||
2. **Sprint 2-3**: P1 sécurité (rate limiting, CORS, validation)
|
||||
3. **Sprint 4-5**: P1 performance + P2 observabilité
|
||||
4. **Sprint 6+**: P2 robustesse + P3 refactors
|
||||
|
||||
---
|
||||
|
||||
**Fin du rapport d'audit**
|
||||
|
||||
|
|
@ -1,252 +0,0 @@
|
|||
#file: backend/modules/chat_server/Cargo.toml
|
||||
|
||||
[package]
|
||||
name = "chat_server"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
authors = ["Veza Team <dev@veza-chat.com>"]
|
||||
description = "Serveur de chat WebSocket sécurisé et haute performance"
|
||||
repository = "https://github.com/veza/chat-server"
|
||||
license = "MIT"
|
||||
keywords = ["websocket", "chat", "real-time", "rust", "tokio"]
|
||||
categories = ["network-programming", "web-programming::websocket"]
|
||||
readme = "README.md"
|
||||
|
||||
[lib]
|
||||
name = "chat_server"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "chat-server"
|
||||
path = "src/main.rs"
|
||||
|
||||
# Les binaires de test sont dans le dossier target/debug et ne sont pas définis ici
|
||||
|
||||
[dependencies]
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# RUNTIME ASYNCHRONE ET RÉSEAU
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
tokio = { version = "1.35", features = [
|
||||
"full", # Toutes les fonctionnalités
|
||||
"tracing", # Support tracing
|
||||
"signal", # Signaux système pour shutdown gracieux
|
||||
] }
|
||||
tokio-tungstenite = "0.21" # WebSocket server/client
|
||||
tungstenite = "0.21" # Core WebSocket
|
||||
futures-util = "0.3" # Utilitaires futures
|
||||
hyper = { version = "1.0", features = ["full"] } # Client HTTP pour webhooks
|
||||
axum = { version = "0.8", features = ["macros", "ws"] } # Framework web moderne
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# BASE DE DONNÉES ET CACHE
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
sqlx = { version = "0.8", features = [
|
||||
"postgres", # Support PostgreSQL
|
||||
"runtime-tokio-rustls", # Runtime async avec TLS rustls
|
||||
"chrono", # Support des types de date
|
||||
"uuid", # Support UUID
|
||||
"json", # Support JSON/JSONB
|
||||
"migrate", # Migrations de base de données
|
||||
"macros", # Macros query!
|
||||
] }
|
||||
redis = { version = "0.32", features = [
|
||||
"tokio-comp", # Support Tokio
|
||||
"connection-manager", # Gestionnaire de connexions
|
||||
], optional = true }
|
||||
lz4 = "1.24" # Compression pour message storage
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# SÉRIALISATION ET FORMATS
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
toml = "0.9" # Configuration TOML
|
||||
rmp-serde = "1.1" # MessagePack pour cache efficace
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# AUTHENTIFICATION ET SÉCURITÉ
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
jsonwebtoken = { version = "10", features = ["aws_lc_rs"] } # JWT tokens
|
||||
bcrypt = "0.17" # Hachage de mots de passe
|
||||
ring = "0.17" # Cryptographie (signatures, HMAC)
|
||||
argon2 = "0.5" # Hachage de mots de passe moderne (alternative à bcrypt)
|
||||
sha2 = "0.10" # Hachage SHA-2
|
||||
totp-rs = { version = "5.4", features = ["qr"] } # TOTP 2FA
|
||||
qrcode = "0.14" # Génération QR codes pour 2FA
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# TYPES ET UTILITAIRES
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
veza-common = { path = "../veza-common" }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
uuid = { version = "1.6", features = ["v4", "serde"] }
|
||||
url = { version = "2.5", features = ["serde"] } # Parsing d'URLs
|
||||
percent-encoding = "2.3" # Encodage URL
|
||||
base64 = "0.21" # Encodage base64
|
||||
hex = "0.4" # Encodage hexadécimal
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# VALIDATION ET NETTOYAGE
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
regex = "1.10" # Expressions régulières
|
||||
validator = { version = "0.20", features = ["derive"] } # Validation des données
|
||||
ammonia = "3.3" # Nettoyage HTML/XSS
|
||||
linkify = "0.10" # Détection automatique de liens
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# GESTION D'ERREURS ET LOGGING
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
thiserror = "2.0" # Macros d'erreurs
|
||||
anyhow = "1.0" # Gestion d'erreurs contextuelles
|
||||
async-trait = "0.1" # Async trait support
|
||||
tracing = "0.1" # Logging structuré
|
||||
tracing-subscriber = { version = "0.3", features = [
|
||||
"env-filter", # Filtrage par variables d'env
|
||||
"fmt", # Formatage console
|
||||
"json", # Format JSON pour production
|
||||
"ansi", # Couleurs ANSI
|
||||
"chrono", # Timestamps
|
||||
] }
|
||||
tracing-appender = "0.2" # Rotation des logs
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# CONFIGURATION ET ENVIRONNEMENT
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
dotenvy = "0.15" # Variables d'environnement (.env)
|
||||
config = "0.15" # Configuration multi-sources
|
||||
clap = { version = "4.4", features = ["derive", "env"] } # CLI arguments
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# PERFORMANCE ET MONITORING
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
metrics = { version = "0.22", optional = true } # Métriques de performance
|
||||
metrics-exporter-prometheus = { version = "0.13", optional = true } # Export Prometheus
|
||||
dashmap = "6.1" # HashMap concurrent
|
||||
parking_lot = "0.12" # Mutex plus performants
|
||||
rayon = "1.10" # Parallel processing pour batching
|
||||
bytes = "1.6" # Zero-copy message handling
|
||||
once_cell = "1.19" # Initialisation paresseuse thread-safe
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# FONCTIONNALITÉS AVANCÉES
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
notify = "8.2" # Surveillance système de fichiers
|
||||
image = { version = "0.24", features = ["png", "jpeg", "webp"], optional = true } # Traitement d'images
|
||||
infer = { version = "0.15", optional = true } # Détection de type de fichier
|
||||
mime = "0.3" # Types MIME
|
||||
tempfile = { version = "3.8", optional = true } # Fichiers temporaires
|
||||
zip = { version = "0.6", optional = true } # Archives ZIP
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# gRPC ET COMMUNICATION INTER-SERVICES
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
tonic = { version = "0.11", features = ["transport", "prost"] }
|
||||
prost = "0.12"
|
||||
prost-types = "0.14"
|
||||
tokio-stream = "0.1"
|
||||
|
||||
# RabbitMQ client (ORIGIN Architecture - Event Bus)
|
||||
lapin = "2.3"
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# INTÉGRATIONS EXTERNES (OPTIONAL)
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
lettre = { version = "0.11", features = ["tokio1-native-tls"], optional = true } # Envoi d'emails
|
||||
reqwest = { version = "0.11", features = ["json", "rustls-tls"], optional = true } # Client HTTP
|
||||
webhook = { version = "2.1", optional = true } # Webhooks sortants
|
||||
sysinfo = "0.37.2"
|
||||
|
||||
[dev-dependencies]
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# DÉPENDANCES DE TEST ET DÉVELOPPEMENT
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
tokio-test = "0.4" # Utilitaires de test async
|
||||
mockall = "0.12" # Mocking
|
||||
proptest = "1.4" # Property testing
|
||||
criterion = { version = "0.5", features = ["html_reports"] } # Benchmarks
|
||||
insta = "1.34" # Tests de snapshot
|
||||
test-log = "0.2" # Logging dans les tests
|
||||
pretty_assertions = "1.4" # Assertions plus lisibles
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.11" # Génération de code protobuf
|
||||
|
||||
[features]
|
||||
# Fonctionnalités par défaut
|
||||
default = [
|
||||
"redis-cache",
|
||||
"file-uploads",
|
||||
"webhooks",
|
||||
"metrics",
|
||||
"email"
|
||||
]
|
||||
|
||||
# Cache Redis (désactivable pour dev/test)
|
||||
redis-cache = ["dep:redis"]
|
||||
|
||||
# Upload de fichiers avec validation
|
||||
file-uploads = ["dep:image", "dep:infer", "dep:tempfile", "dep:zip"]
|
||||
|
||||
# Support des webhooks sortants
|
||||
webhooks = ["dep:reqwest", "dep:webhook"]
|
||||
|
||||
# Métriques et monitoring
|
||||
metrics = ["dep:metrics", "dep:metrics-exporter-prometheus"]
|
||||
|
||||
# Envoi d'emails
|
||||
email = ["dep:lettre"]
|
||||
|
||||
# Mode de développement avec fonctionnalités de debug
|
||||
dev = ["tokio/test-util"]
|
||||
|
||||
# Version sans dépendances optionnelles (pour déploiements légers)
|
||||
minimal = []
|
||||
|
||||
# Tests de base de données (nécessite une DB active)
|
||||
test-db = []
|
||||
|
||||
[profile.dev]
|
||||
# Configuration pour le développement
|
||||
opt-level = 0 # Pas d'optimisation pour compilation rapide
|
||||
debug = true # Symboles de debug complets
|
||||
split-debuginfo = "unpacked" # Debug info séparé (macOS/Linux)
|
||||
overflow-checks = true # Vérifications d'overflow
|
||||
lto = false # Pas de LTO pour compilation rapide
|
||||
|
||||
[profile.release]
|
||||
# Configuration pour la production
|
||||
opt-level = 3 # Optimisation maximale
|
||||
debug = false # Pas de symboles de debug
|
||||
strip = true # Supprimer les symboles
|
||||
lto = "fat" # Link Time Optimization complète
|
||||
codegen-units = 1 # Compilation en une seule unité pour optimisation
|
||||
panic = "abort" # Abort au lieu d'unwind pour performance
|
||||
|
||||
[profile.bench]
|
||||
# Configuration pour les benchmarks
|
||||
inherits = "release"
|
||||
debug = true # Conserver debug pour profiling
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
# MÉTADONNÉES CARGO
|
||||
# ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
# Configuration pour docs.rs
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
# Badges pour crates.io
|
||||
[badges]
|
||||
maintenance = { status = "actively-developed" }
|
||||
|
||||
# Scripts personnalisés
|
||||
[package.metadata.scripts]
|
||||
# cargo run-script db-setup
|
||||
db-setup = "sqlx database create && sqlx migrate run"
|
||||
# cargo run-script test-all
|
||||
test-all = "cargo test --all-features --all-targets"
|
||||
# cargo run-script security-audit
|
||||
security-audit = "cargo audit"
|
||||
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
# Build stage - context: repo root (for veza-common path dep)
|
||||
FROM rust:alpine AS builder
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Copy veza-common (path dependency) and chat-server
|
||||
COPY veza-common ./veza-common
|
||||
COPY veza-chat-server/Cargo.toml veza-chat-server/Cargo.lock ./veza-chat-server/
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache musl-dev ca-certificates perl make pkgconfig openssl-dev protobuf-dev openssl-libs-static
|
||||
|
||||
WORKDIR /build/veza-chat-server
|
||||
|
||||
# Fetch dependencies (this layer will be cached if Cargo.toml/Cargo.lock don't change)
|
||||
RUN cargo fetch --locked
|
||||
|
||||
# Copy source code
|
||||
COPY veza-chat-server/src ./src
|
||||
COPY veza-chat-server/migrations ./migrations
|
||||
# SQLx offline build (v0.101) - no DB needed at compile time
|
||||
COPY veza-chat-server/sqlx-data.json ./
|
||||
ENV SQLX_OFFLINE=true
|
||||
COPY veza-chat-server/proto ./proto
|
||||
COPY veza-chat-server/build.rs ./
|
||||
|
||||
# Build the application
|
||||
# Using --locked to ensure reproducible builds
|
||||
RUN cargo build --release --locked --target x86_64-unknown-linux-musl
|
||||
|
||||
# Runtime stage
|
||||
FROM alpine:latest
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk --no-cache add ca-certificates tzdata && \
|
||||
# Add wget for health checks
|
||||
apk --no-cache add wget && \
|
||||
# Clean up apk cache
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Create non-root user for security
|
||||
RUN addgroup -g 1001 -S app && \
|
||||
adduser -S app -u 1001 -G app -h /app -s /bin/sh
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder --chown=app:app /build/veza-chat-server/target/x86_64-unknown-linux-musl/release/chat-server /app/chat-server
|
||||
|
||||
# Copy migrations if they exist
|
||||
COPY --from=builder --chown=app:app /build/veza-chat-server/migrations ./migrations
|
||||
|
||||
# Switch to app user
|
||||
USER app
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8081
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:8081/health || exit 1
|
||||
|
||||
# Run the application
|
||||
CMD ["./chat-server"]
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
# Production Dockerfile for Chat Server
|
||||
# Optimized for smaller size and security
|
||||
|
||||
# Build stage
|
||||
FROM rust:1.84-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache musl-dev ca-certificates
|
||||
|
||||
# Copy Cargo files first for better caching
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
|
||||
# Fetch dependencies (this layer will be cached if Cargo.toml/Cargo.lock don't change)
|
||||
RUN cargo fetch --locked
|
||||
|
||||
# Copy source code
|
||||
COPY src ./src
|
||||
COPY migrations ./migrations
|
||||
COPY build.rs ./
|
||||
|
||||
# Build the application with optimizations
|
||||
# - --locked: ensures reproducible builds
|
||||
# - --target x86_64-unknown-linux-musl: static binary for alpine
|
||||
# - Strip symbols in release profile (configured in Cargo.toml)
|
||||
RUN cargo build --release --locked --target x86_64-unknown-linux-musl && \
|
||||
# Strip the binary to reduce size
|
||||
strip /app/target/x86_64-unknown-linux-musl/release/chat-server
|
||||
|
||||
# Runtime stage - minimal alpine
|
||||
FROM alpine:3.21
|
||||
|
||||
# Install only runtime dependencies
|
||||
RUN apk --no-cache add ca-certificates tzdata && \
|
||||
# Add wget for health checks
|
||||
apk --no-cache add wget && \
|
||||
# Clean up apk cache
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Create non-root user for security
|
||||
RUN addgroup -g 1001 -S app && \
|
||||
adduser -S app -u 1001 -G app -h /app -s /bin/sh
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder --chown=app:app /app/target/x86_64-unknown-linux-musl/release/chat-server /app/chat-server
|
||||
|
||||
# Copy migrations if they exist
|
||||
COPY --from=builder --chown=app:app /app/migrations ./migrations 2>/dev/null || true
|
||||
|
||||
# Switch to app user
|
||||
USER app
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8081
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:8081/health || exit 1
|
||||
|
||||
# Run the application
|
||||
ENTRYPOINT ["./chat-server"]
|
||||
|
||||
|
|
@ -1,221 +0,0 @@
|
|||
# Makefile - Chat Server Phase 4 - Optimisations avancées
|
||||
#
|
||||
# Ce Makefile gère les opérations pour les optimisations Phase 4 :
|
||||
# - Connection Pool 10k connexions
|
||||
# - Persistence < 5ms
|
||||
# - Modération automatique 99.9%
|
||||
# - Analytics temps réel
|
||||
|
||||
.PHONY: help build test validate clean dev docker phase4
|
||||
|
||||
# Couleurs pour l'affichage
|
||||
BLUE = \033[0;34m
|
||||
GREEN = \033[0;32m
|
||||
YELLOW = \033[1;33m
|
||||
RED = \033[0;31m
|
||||
NC = \033[0m # No Color
|
||||
|
||||
# Configuration
|
||||
RUST_LOG ?= info
|
||||
CHAT_PORT ?= 3030
|
||||
GRPC_PORT ?= 50051
|
||||
|
||||
help: ## Affiche l'aide
|
||||
@echo -e "$(BLUE)🎯 MAKEFILE CHAT SERVER - PHASE 4 OPTIMISATIONS$(NC)"
|
||||
@echo "=================================================="
|
||||
@echo ""
|
||||
@echo -e "$(YELLOW)📋 COMMANDES DISPONIBLES :$(NC)"
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf " $(GREEN)%-20s$(NC) %s\n", $$1, $$2}'
|
||||
@echo ""
|
||||
@echo -e "$(BLUE)🚀 OBJECTIFS PHASE 4 :$(NC)"
|
||||
@echo " • Connection Pool : 10,000 connexions simultanées"
|
||||
@echo " • Latence Persistence : < 5ms (L1<1ms, L2<3ms, L3<5ms)"
|
||||
@echo " • Détection Spam : 99.9% efficacité"
|
||||
@echo " • Analytics : Temps réel"
|
||||
|
||||
build: ## Compile le chat server en mode optimisé
|
||||
@echo -e "$(BLUE)🔧 Compilation Chat Server Phase 4...$(NC)"
|
||||
cargo build --release
|
||||
@echo -e "$(GREEN)✅ Compilation terminée$(NC)"
|
||||
|
||||
test: ## Lance les tests unitaires
|
||||
@echo -e "$(BLUE)🧪 Tests Chat Server Phase 4...$(NC)"
|
||||
cargo test --release
|
||||
@echo -e "$(GREEN)✅ Tests terminés$(NC)"
|
||||
|
||||
validate: ## Valide les optimisations Phase 4
|
||||
@echo -e "$(BLUE)✅ Validation Phase 4...$(NC)"
|
||||
@chmod +x scripts/validate_phase4.sh
|
||||
@./scripts/validate_phase4.sh || echo -e "$(YELLOW)⚠️ Validation partielle$(NC)"
|
||||
|
||||
clean: ## Nettoie les fichiers de build
|
||||
@echo -e "$(BLUE)🧹 Nettoyage...$(NC)"
|
||||
cargo clean
|
||||
@echo -e "$(GREEN)✅ Nettoyage terminé$(NC)"
|
||||
|
||||
dev: build ## Lance le serveur en mode développement
|
||||
@echo -e "$(BLUE)🚀 Démarrage Chat Server Phase 4...$(NC)"
|
||||
@echo -e "$(YELLOW)📡 Chat WebSocket : ws://localhost:$(CHAT_PORT)/ws$(NC)"
|
||||
@echo -e "$(YELLOW)🔗 gRPC API : localhost:$(GRPC_PORT)$(NC)"
|
||||
@echo -e "$(YELLOW)📊 Métriques : http://localhost:$(CHAT_PORT)/metrics$(NC)"
|
||||
@echo ""
|
||||
RUST_LOG=$(RUST_LOG) ./target/release/veza-chat-server
|
||||
|
||||
dev-debug: ## Lance le serveur avec logs détaillés
|
||||
@echo -e "$(BLUE)🔍 Chat Server Phase 4 (Debug)...$(NC)"
|
||||
RUST_LOG=debug ./target/release/veza-chat-server
|
||||
|
||||
bench: ## Tests de performance
|
||||
@echo -e "$(BLUE)⚡ Benchmarks Phase 4...$(NC)"
|
||||
cargo bench
|
||||
@echo -e "$(GREEN)✅ Benchmarks terminés$(NC)"
|
||||
|
||||
docker: ## Build l'image Docker optimisée
|
||||
@echo -e "$(BLUE)🐳 Build Docker Chat Server Phase 4...$(NC)"
|
||||
docker build -t veza-chat-server:phase4 .
|
||||
@echo -e "$(GREEN)✅ Image Docker créée$(NC)"
|
||||
|
||||
docker-run: docker ## Lance le container Docker
|
||||
@echo -e "$(BLUE)🐳 Démarrage Container Chat Server...$(NC)"
|
||||
docker run -p $(CHAT_PORT):$(CHAT_PORT) -p $(GRPC_PORT):$(GRPC_PORT) \
|
||||
-e RUST_LOG=$(RUST_LOG) \
|
||||
veza-chat-server:phase4
|
||||
|
||||
metrics: ## Affiche les métriques de performance
|
||||
@echo -e "$(BLUE)📊 Métriques Chat Server Phase 4...$(NC)"
|
||||
@echo ""
|
||||
@echo -e "$(YELLOW)🔍 ANALYSE BINAIRE :$(NC)"
|
||||
@if [ -f target/release/veza-chat-server ]; then \
|
||||
echo -e " Taille binaire : $$(du -h target/release/veza-chat-server | cut -f1)"; \
|
||||
echo -e " Dernière build : $$(stat -c %y target/release/veza-chat-server | cut -d. -f1)"; \
|
||||
else \
|
||||
echo -e " $(RED)❌ Binaire non trouvé - exécuter 'make build'$(NC)"; \
|
||||
fi
|
||||
@echo ""
|
||||
@echo -e "$(YELLOW)🏗️ ARCHITECTURE MODULES :$(NC)"
|
||||
@for module in connection_pool advanced_moderation optimized_persistence; do \
|
||||
if [ -f src/$$module.rs ]; then \
|
||||
lines=$$(wc -l < src/$$module.rs); \
|
||||
size=$$(du -h src/$$module.rs | cut -f1); \
|
||||
echo -e " $$module : $$lines lignes ($$size)"; \
|
||||
fi; \
|
||||
done
|
||||
@echo ""
|
||||
@echo -e "$(YELLOW)⚡ OPTIMISATIONS DÉTECTÉES :$(NC)"
|
||||
@if grep -q "max_connections.*10000" src/connection_pool.rs 2>/dev/null; then \
|
||||
echo -e " $(GREEN)✅ Connection Pool 10k$(NC)"; \
|
||||
else \
|
||||
echo -e " $(RED)❌ Connection Pool$(NC)"; \
|
||||
fi
|
||||
@if grep -q "l1_cache.*l2_cache" src/optimized_persistence.rs 2>/dev/null; then \
|
||||
echo -e " $(GREEN)✅ Cache multi-niveaux$(NC)"; \
|
||||
else \
|
||||
echo -e " $(RED)❌ Cache multi-niveaux$(NC)"; \
|
||||
fi
|
||||
@if grep -q "detect_spam.*detect_toxicity" src/advanced_moderation.rs 2>/dev/null; then \
|
||||
echo -e " $(GREEN)✅ Modération ML$(NC)"; \
|
||||
else \
|
||||
echo -e " $(RED)❌ Modération ML$(NC)"; \
|
||||
fi
|
||||
|
||||
status: ## Affiche le statut du développement Phase 4
|
||||
@echo -e "$(BLUE)📋 STATUT DÉVELOPPEMENT PHASE 4$(NC)"
|
||||
@echo "=================================="
|
||||
@echo ""
|
||||
@echo -e "$(YELLOW)📁 MODULES PHASE 4 :$(NC)"
|
||||
@for module in connection_pool advanced_moderation optimized_persistence; do \
|
||||
if [ -f src/$$module.rs ]; then \
|
||||
echo -e " $(GREEN)✅ src/$$module.rs$(NC)"; \
|
||||
else \
|
||||
echo -e " $(RED)❌ src/$$module.rs$(NC)"; \
|
||||
fi; \
|
||||
done
|
||||
@echo ""
|
||||
@echo -e "$(YELLOW)🔧 ÉTAT COMPILATION :$(NC)"
|
||||
@if [ -f target/release/veza-chat-server ]; then \
|
||||
echo -e " $(GREEN)✅ Binaire optimisé disponible$(NC)"; \
|
||||
else \
|
||||
echo -e " $(RED)❌ Binaire à compiler (make build)$(NC)"; \
|
||||
fi
|
||||
@echo ""
|
||||
@echo -e "$(YELLOW)🧪 TESTS :$(NC)"
|
||||
@if cargo test --release >/dev/null 2>&1; then \
|
||||
echo -e " $(GREEN)✅ Tests passent$(NC)"; \
|
||||
else \
|
||||
echo -e " $(RED)❌ Tests échouent$(NC)"; \
|
||||
fi
|
||||
|
||||
install-deps: ## Installe les dépendances système
|
||||
@echo -e "$(BLUE)📦 Installation dépendances Phase 4...$(NC)"
|
||||
@echo -e "$(YELLOW)🔍 Vérification dépendances Rust...$(NC)"
|
||||
@if ! command -v cargo >/dev/null 2>&1; then \
|
||||
echo -e "$(RED)❌ Rust non installé$(NC)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo -e " $(GREEN)✅ Rust/Cargo disponible$(NC)"
|
||||
@if ! command -v redis-cli >/dev/null 2>&1; then \
|
||||
echo -e "$(YELLOW)⚠️ Redis CLI recommandé pour tests$(NC)"; \
|
||||
else \
|
||||
echo -e " $(GREEN)✅ Redis CLI disponible$(NC)"; \
|
||||
fi
|
||||
@if ! command -v psql >/dev/null 2>&1; then \
|
||||
echo -e "$(YELLOW)⚠️ PostgreSQL CLI recommandé pour tests$(NC)"; \
|
||||
else \
|
||||
echo -e " $(GREEN)✅ PostgreSQL CLI disponible$(NC)"; \
|
||||
fi
|
||||
|
||||
lint: ## Vérifie la qualité du code
|
||||
@echo -e "$(BLUE)🔍 Analyse qualité code Phase 4...$(NC)"
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
cargo fmt --check
|
||||
@echo -e "$(GREEN)✅ Code quality check terminé$(NC)"
|
||||
|
||||
fix: ## Corrige automatiquement le code
|
||||
@echo -e "$(BLUE)🔧 Correction automatique code...$(NC)"
|
||||
cargo fmt
|
||||
cargo fix --allow-dirty --allow-staged
|
||||
@echo -e "$(GREEN)✅ Corrections appliquées$(NC)"
|
||||
|
||||
load-test: build ## Test de charge basique
|
||||
@echo -e "$(BLUE)⚡ Test de charge Chat Server...$(NC)"
|
||||
@echo -e "$(YELLOW)📡 Démarrage serveur test...$(NC)"
|
||||
@# Simuler une charge basique
|
||||
@if command -v ab >/dev/null 2>&1; then \
|
||||
echo -e " $(GREEN)✅ Apache Bench disponible$(NC)"; \
|
||||
timeout 10s ./target/release/veza-chat-server & \
|
||||
sleep 2 && \
|
||||
ab -n 1000 -c 10 http://localhost:$(CHAT_PORT)/health || true; \
|
||||
pkill -f veza-chat-server || true; \
|
||||
else \
|
||||
echo -e " $(YELLOW)⚠️ Apache Bench non installé (sudo dnf install httpd-tools)$(NC)"; \
|
||||
fi
|
||||
|
||||
phase4: build validate metrics ## Validation complète Phase 4
|
||||
@echo -e "$(BLUE)🎯 VALIDATION COMPLÈTE PHASE 4$(NC)"
|
||||
@echo "================================="
|
||||
@echo ""
|
||||
@echo -e "$(GREEN)✅ PHASE 4 - OPTIMISATION CHAT SERVER VALIDÉE !$(NC)"
|
||||
@echo ""
|
||||
@echo -e "$(YELLOW)🏆 RÉALISATIONS :$(NC)"
|
||||
@echo -e " • Connection Pool haute performance (10k connexions)"
|
||||
@echo -e " • Persistence ultra-rapide (cache L1/L2/L3 < 5ms)"
|
||||
@echo -e " • Modération automatique avancée (ML + patterns)"
|
||||
@echo -e " • Analytics temps réel (métriques complètes)"
|
||||
@echo ""
|
||||
@echo -e "$(BLUE)🚀 PRÊT POUR PHASE 5 - OPTIMISATION STREAM SERVER !$(NC)"
|
||||
|
||||
# Targets pour développement rapide
|
||||
quick: build dev ## Build et lance rapidement
|
||||
|
||||
restart: ## Relance le serveur
|
||||
@pkill -f veza-chat-server || true
|
||||
@sleep 1
|
||||
@make dev
|
||||
|
||||
logs: ## Affiche les logs du serveur
|
||||
@echo -e "$(BLUE)📋 Logs Chat Server...$(NC)"
|
||||
@tail -f /tmp/veza-chat-server.log 2>/dev/null || echo "Aucun log trouvé"
|
||||
|
||||
# Aide par défaut
|
||||
.DEFAULT_GOAL := help
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Générer les bindings Rust à partir des fichiers .proto
|
||||
let proto_dir = "proto";
|
||||
let proto_files = vec!["proto/chat/chat.proto", "proto/common/auth.proto"];
|
||||
|
||||
// Vérifier si protoc est disponible
|
||||
// Si les fichiers générés existent déjà, on peut continuer sans protoc
|
||||
let generated_dir = std::path::Path::new("src/generated");
|
||||
let required_files = vec![
|
||||
generated_dir.join("veza.chat.rs"),
|
||||
generated_dir.join("veza.common.auth.rs"),
|
||||
];
|
||||
|
||||
let all_generated_exist = required_files.iter().all(|p| p.exists());
|
||||
|
||||
if all_generated_exist {
|
||||
// Les fichiers générés existent, on peut continuer sans protoc
|
||||
println!("cargo:warning=Using pre-generated protobuf files. protoc not required.");
|
||||
for proto_file in &proto_files {
|
||||
println!("cargo:rerun-if-changed={}", proto_file);
|
||||
}
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Configuration tonic-build
|
||||
tonic_build::configure()
|
||||
.build_server(true)
|
||||
.build_client(false) // Chat server est serveur, pas client
|
||||
.out_dir("src/generated")
|
||||
.compile(&proto_files, &[proto_dir])?;
|
||||
|
||||
// Recompiler si les fichiers .proto changent
|
||||
for proto_file in &proto_files {
|
||||
println!("cargo:rerun-if-changed={}", proto_file);
|
||||
}
|
||||
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
Updating crates.io index
|
||||
error: failed to select a version for `libsqlite3-sys`.
|
||||
... required by package `sqlx-sqlite v0.7.0`
|
||||
... which satisfies dependency `sqlx-sqlite = "=0.7.0"` of package `sqlx v0.7.0`
|
||||
... which satisfies dependency `sqlx = "^0.7"` of package `veza-common v0.1.0 (/home/senke/git/talas/veza/veza-common)`
|
||||
... which satisfies path dependency `veza-common` of package `chat_server v0.2.0 (/home/senke/git/talas/veza/veza-chat-server)`
|
||||
versions that meet the requirements `^0.26.0` are: 0.26.0
|
||||
|
||||
package `libsqlite3-sys` links to the native library `sqlite3`, but it conflicts with a previous package which links to `sqlite3` as well:
|
||||
package `libsqlite3-sys v0.30.1`
|
||||
... which satisfies dependency `libsqlite3-sys = "^0.30.1"` of package `sqlx-sqlite v0.8.6`
|
||||
... which satisfies dependency `sqlx-sqlite = "=0.8.6"` of package `sqlx v0.8.6`
|
||||
... which satisfies dependency `sqlx = "^0.8.6"` of package `chat_server v0.2.0 (/home/senke/git/talas/veza/veza-chat-server)`
|
||||
Only one package in the dependency graph may specify the same links value. This helps ensure that only one copy of a native library is linked in the final binary. Try to adjust your dependencies so that only one package uses the `links = "sqlite3"` value. For more information, see https://doc.rust-lang.org/cargo/reference/resolver.html#links.
|
||||
|
||||
failed to select a version for `libsqlite3-sys` which could resolve this conflict
|
||||
|
|
@ -1,94 +0,0 @@
|
|||
Checking chat_server v0.2.0 (/home/senke/Documents/veza/veza-chat-server)
|
||||
warning: unused imports: `Pool` and `Postgres`
|
||||
--> src/config.rs:2:20
|
||||
|
|
||||
2 | use sqlx::{PgPool, Pool, Postgres};
|
||||
| ^^^^ ^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(unused_imports)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: unused import: `error`
|
||||
--> src/config.rs:5:22
|
||||
|
|
||||
5 | use tracing::{debug, error, info, warn};
|
||||
| ^^^^^
|
||||
|
||||
warning: unused imports: `Error as LapinError`, `ExchangeKind`, and `options::ExchangeDeclareOptions`
|
||||
--> src/event_bus.rs:2:5
|
||||
|
|
||||
2 | options::ExchangeDeclareOptions, types::FieldTable, Channel, Connection, ConnectionProperties,
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
3 | Error as LapinError, ExchangeKind,
|
||||
| ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^
|
||||
|
||||
warning: unused import: `warn`
|
||||
--> src/typing_indicator.rs:5:40
|
||||
|
|
||||
5 | use tracing::{info, debug, instrument, warn};
|
||||
| ^^^^
|
||||
|
||||
warning: variable does not need to be mutable
|
||||
--> src/delivered_status.rs:57:21
|
||||
|
|
||||
57 | if let Some(mut status) = existing {
|
||||
| ----^^^^^^
|
||||
| |
|
||||
| help: remove this `mut`
|
||||
|
|
||||
= note: `#[warn(unused_mut)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: variable does not need to be mutable
|
||||
--> src/read_receipts.rs:86:21
|
||||
|
|
||||
86 | if let Some(mut receipt) = existing {
|
||||
| ----^^^^^^^
|
||||
| |
|
||||
| help: remove this `mut`
|
||||
|
||||
error[E0599]: no method named `get_all_metrics` found for reference `&ChatMetrics` in the current scope
|
||||
--> src/monitoring.rs:269:36
|
||||
|
|
||||
269 | let metrics_data = metrics.get_all_metrics().await;
|
||||
| ^^^^^^^^^^^^^^^
|
||||
|
|
||||
help: one of the expressions' fields has a method of the same name
|
||||
|
|
||||
269 | let metrics_data = metrics.collector.get_all_metrics().await;
|
||||
| ++++++++++
|
||||
help: there is a method `get_system_metrics` with a similar name
|
||||
|
|
||||
269 - let metrics_data = metrics.get_all_metrics().await;
|
||||
269 + let metrics_data = metrics.get_system_metrics().await;
|
||||
|
|
||||
|
||||
warning: unreachable expression
|
||||
--> src/config.rs:201:9
|
||||
|
|
||||
194 | / panic!(
|
||||
195 | | "SecurityConfig::default() cannot be used in production. \
|
||||
196 | | Create SecurityConfig manually with require_env_min_length(\"JWT_SECRET\", 32)"
|
||||
197 | | );
|
||||
| |_____________- any code following this expression is unreachable
|
||||
...
|
||||
201 | / Self {
|
||||
202 | | jwt_secret: "test_jwt_secret_minimum_32_characters_long".to_string(),
|
||||
203 | | jwt_access_duration: Duration::from_secs(900), // 15 min
|
||||
204 | | jwt_refresh_duration: Duration::from_secs(86400 * 30), // 30 days
|
||||
... |
|
||||
212 | | bcrypt_cost: 12,
|
||||
213 | | }
|
||||
| |_________^ unreachable expression
|
||||
|
|
||||
= note: `#[warn(unreachable_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: unused variable: `user_id`
|
||||
--> src/security/permission.rs:54:17
|
||||
|
|
||||
54 | user_id,
|
||||
| ^^^^^^^ help: try ignoring the field: `user_id: _`
|
||||
|
|
||||
= note: `#[warn(unused_variables)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
For more information about this error, try `rustc --explain E0599`.
|
||||
warning: `chat_server` (lib) generated 8 warnings
|
||||
error: could not compile `chat_server` (lib) due to 1 previous error; 8 warnings emitted
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
{
|
||||
"dashboard": {
|
||||
"id": null,
|
||||
"title": "Veza Platform - Local Dashboard",
|
||||
"tags": ["veza", "local"],
|
||||
"style": "dark",
|
||||
"timezone": "browser",
|
||||
"panels": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "System Overview",
|
||||
"type": "stat",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "up",
|
||||
"legendFormat": "{{job}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'veza-backend'
|
||||
static_configs:
|
||||
- targets: ['host.docker.internal:8080']
|
||||
metrics_path: '/metrics'
|
||||
|
||||
- job_name: 'veza-chat'
|
||||
static_configs:
|
||||
- targets: ['host.docker.internal:3001']
|
||||
metrics_path: '/metrics'
|
||||
|
||||
- job_name: 'veza-stream'
|
||||
static_configs:
|
||||
- targets: ['host.docker.internal:3002']
|
||||
metrics_path: '/metrics'
|
||||
|
|
@ -1,112 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Déploiement du serveur de chat Veza (version simplifiée)"
|
||||
echo "============================================================"
|
||||
|
||||
# Configuration
|
||||
CONTAINER_NAME="veza-chat"
|
||||
BINARY_NAME="chat-server"
|
||||
PORT=3001
|
||||
|
||||
# Fonctions
|
||||
build_server() {
|
||||
echo "📦 Compilation du serveur de chat..."
|
||||
cargo build --release --bin chat-server
|
||||
echo "✅ Compilation réussie"
|
||||
}
|
||||
|
||||
deploy_to_container() {
|
||||
echo "🚢 Déploiement dans le container $CONTAINER_NAME..."
|
||||
|
||||
# Copier le binaire
|
||||
incus file push target/release/chat-server $CONTAINER_NAME/opt/veza/
|
||||
|
||||
# Rendre exécutable
|
||||
incus exec $CONTAINER_NAME -- chmod +x /opt/veza/chat-server
|
||||
|
||||
# Créer le service systemd
|
||||
incus exec $CONTAINER_NAME -- tee /etc/systemd/system/veza-chat.service > /dev/null << 'EOF'
|
||||
[Unit]
|
||||
Description=Veza Chat Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=www-data
|
||||
WorkingDirectory=/opt/veza
|
||||
ExecStart=/opt/veza/chat-server
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
Environment=RUST_LOG=info
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Activer et démarrer le service
|
||||
incus exec $CONTAINER_NAME -- systemctl daemon-reload
|
||||
incus exec $CONTAINER_NAME -- systemctl enable veza-chat
|
||||
incus exec $CONTAINER_NAME -- systemctl restart veza-chat
|
||||
|
||||
echo "✅ Service déployé et démarré"
|
||||
}
|
||||
|
||||
test_deployment() {
|
||||
echo "🧪 Test du déploiement..."
|
||||
|
||||
# Récupérer l'IP du container
|
||||
IP=$(incus list $CONTAINER_NAME -c 4 --format csv | cut -d' ' -f1)
|
||||
|
||||
if [ -z "$IP" ]; then
|
||||
echo "❌ Impossible de récupérer l'IP du container"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "📡 Test de santé sur http://$IP:$PORT/health"
|
||||
|
||||
# Attendre que le service démarre
|
||||
sleep 5
|
||||
|
||||
# Test de l'endpoint de santé
|
||||
if curl -s "http://$IP:$PORT/health" | grep -q "healthy"; then
|
||||
echo "✅ Serveur de chat opérationnel sur $IP:$PORT"
|
||||
echo "📊 Endpoints disponibles :"
|
||||
echo " - GET http://$IP:$PORT/health"
|
||||
echo " - GET http://$IP:$PORT/api/messages?room=general"
|
||||
echo " - POST http://$IP:$PORT/api/messages"
|
||||
echo " - GET http://$IP:$PORT/api/messages/stats"
|
||||
return 0
|
||||
else
|
||||
echo "❌ Le serveur ne répond pas correctement"
|
||||
echo "📝 Logs du service :"
|
||||
incus exec $CONTAINER_NAME -- journalctl -u veza-chat --no-pager -n 20
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Vérifications préliminaires
|
||||
if ! command -v incus &> /dev/null; then
|
||||
echo "❌ Incus non installé"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! incus list | grep -q $CONTAINER_NAME; then
|
||||
echo "❌ Container $CONTAINER_NAME non trouvé"
|
||||
echo "📋 Containers disponibles :"
|
||||
incus list
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Déploiement
|
||||
echo "🎯 Déploiement vers le container : $CONTAINER_NAME"
|
||||
|
||||
build_server
|
||||
deploy_to_container
|
||||
test_deployment
|
||||
|
||||
echo ""
|
||||
echo "🎉 Déploiement terminé avec succès !"
|
||||
echo "📊 Pour tester l'API :"
|
||||
echo " curl http://$(incus list $CONTAINER_NAME -c 4 --format csv | cut -d' ' -f1):$PORT/health"
|
||||
|
|
@ -1,120 +0,0 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
# Base de données PostgreSQL
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
container_name: veza-postgres-local
|
||||
environment:
|
||||
POSTGRES_DB: veza_local
|
||||
POSTGRES_USER: veza_user
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
- ./scripts/database/init.sql:/docker-entrypoint-initdb.d/init.sql
|
||||
networks:
|
||||
- veza-network
|
||||
|
||||
# Cache Redis
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: veza-redis-local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
- veza-network
|
||||
|
||||
# Monitoring - Prometheus
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
container_name: veza-prometheus-local
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- prometheus_data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||
- '--web.console.templates=/etc/prometheus/consoles'
|
||||
- '--storage.tsdb.retention.time=200h'
|
||||
- '--web.enable-lifecycle'
|
||||
networks:
|
||||
- veza-network
|
||||
|
||||
# Monitoring - Grafana
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
container_name: veza-grafana-local
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
GF_SECURITY_ADMIN_PASSWORD: admin
|
||||
GF_USERS_ALLOW_SIGN_UP: "false"
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ./config/grafana/dashboards:/etc/grafana/provisioning/dashboards
|
||||
- ./config/grafana/datasources:/etc/grafana/provisioning/datasources
|
||||
networks:
|
||||
- veza-network
|
||||
|
||||
# Logging - Elasticsearch
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
|
||||
container_name: veza-elasticsearch-local
|
||||
environment:
|
||||
- discovery.type=single-node
|
||||
- xpack.security.enabled=false
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
ports:
|
||||
- "9200:9200"
|
||||
volumes:
|
||||
- elasticsearch_data:/usr/share/elasticsearch/data
|
||||
networks:
|
||||
- veza-network
|
||||
|
||||
# Logging - Kibana
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:8.8.0
|
||||
container_name: veza-kibana-local
|
||||
ports:
|
||||
- "5601:5601"
|
||||
environment:
|
||||
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
|
||||
volumes:
|
||||
- kibana_data:/usr/share/kibana/data
|
||||
networks:
|
||||
- veza-network
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
|
||||
# Logging - Filebeat
|
||||
filebeat:
|
||||
image: docker.elastic.co/beats/filebeat:8.8.0
|
||||
container_name: veza-filebeat-local
|
||||
user: root
|
||||
volumes:
|
||||
- ./config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- veza-network
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
redis_data:
|
||||
prometheus_data:
|
||||
grafana_data:
|
||||
elasticsearch_data:
|
||||
kibana_data:
|
||||
|
||||
networks:
|
||||
veza-network:
|
||||
driver: bridge
|
||||
|
|
@ -1,181 +0,0 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
# ====================================
|
||||
# SERVEUR CHAT RUST PRINCIPAL
|
||||
# ====================================
|
||||
chat-server:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "9090:9090" # Métriques Prometheus
|
||||
environment:
|
||||
- RUST_ENV=development
|
||||
- RUST_LOG=info
|
||||
- DATABASE_URL=postgresql://${POSTGRES_USER:-veza_user}:${POSTGRES_PASSWORD:-devpassword}@postgres:5432/veza_chat
|
||||
- REDIS_URL=redis://redis:6379
|
||||
# DEV ONLY: Override via .env in production. Never use this compose for prod.
|
||||
- JWT_SECRET=${JWT_SECRET:-dev-only-secret-min-32-chars-for-local}
|
||||
- SERVER_BIND_ADDR=0.0.0.0:8080
|
||||
- PROMETHEUS_BIND_ADDR=0.0.0.0:9090
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
- ./config:/app/config
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- veza-network
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# ====================================
|
||||
# BASE DE DONNÉES POSTGRESQL
|
||||
# ====================================
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: veza_chat
|
||||
POSTGRES_USER: ${POSTGRES_USER:-veza_user}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
|
||||
POSTGRES_INITDB_ARGS: "--auth-host=scram-sha-256"
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
- ./scripts/database/init.sql:/docker-entrypoint-initdb.d/init.sql:ro
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- veza-network
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U veza_user -d veza_chat"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# ====================================
|
||||
# CACHE REDIS
|
||||
# ====================================
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
ports:
|
||||
- "6379:6379"
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
- ./config/redis.conf:/usr/local/etc/redis/redis.conf:ro
|
||||
command: redis-server /usr/local/etc/redis/redis.conf
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- veza-network
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
# ====================================
|
||||
# MONITORING PROMETHEUS (OPTIONNEL)
|
||||
# ====================================
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
ports:
|
||||
- "9091:9090"
|
||||
volumes:
|
||||
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- prometheus_data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||
- '--web.console.templates=/etc/prometheus/consoles'
|
||||
- '--storage.tsdb.retention.time=200h'
|
||||
- '--web.enable-lifecycle'
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- veza-network
|
||||
profiles:
|
||||
- monitoring
|
||||
|
||||
# ====================================
|
||||
# GRAFANA POUR VISUALISATION (OPTIONNEL)
|
||||
# ====================================
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_USERS_ALLOW_SIGN_UP=false
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ./config/grafana/datasources:/etc/grafana/provisioning/datasources:ro
|
||||
- ./config/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- veza-network
|
||||
profiles:
|
||||
- monitoring
|
||||
|
||||
# ====================================
|
||||
# NGINX REVERSE PROXY (OPTIONNEL)
|
||||
# ====================================
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./config/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ./config/nginx/ssl:/etc/nginx/ssl:ro
|
||||
- ./logs/nginx:/var/log/nginx
|
||||
depends_on:
|
||||
- chat-server
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- veza-network
|
||||
profiles:
|
||||
- production
|
||||
|
||||
# ====================================
|
||||
# VOLUMES PERSISTANTS
|
||||
# ====================================
|
||||
volumes:
|
||||
postgres_data:
|
||||
driver: local
|
||||
redis_data:
|
||||
driver: local
|
||||
prometheus_data:
|
||||
driver: local
|
||||
grafana_data:
|
||||
driver: local
|
||||
|
||||
# ====================================
|
||||
# RÉSEAU INTERNE
|
||||
# ====================================
|
||||
networks:
|
||||
veza-network:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.0.0/16
|
||||
|
||||
# ====================================
|
||||
# PROFILS DE DÉPLOIEMENT
|
||||
# ====================================
|
||||
|
||||
# Démarrage basique (développement)
|
||||
# docker-compose up chat-server postgres redis
|
||||
|
||||
# Avec monitoring complet
|
||||
# docker-compose --profile monitoring up
|
||||
|
||||
# Production avec proxy
|
||||
# docker-compose --profile production --profile monitoring up
|
||||
|
|
@ -1,167 +0,0 @@
|
|||
# 🔍 AUDIT INITIAL — Delivered Status + Typing Indicators
|
||||
|
||||
**Date** : 2025-01-27
|
||||
**Cible** : `veza-chat-server`
|
||||
**Objectif** : État actuel avant implémentation P1
|
||||
|
||||
---
|
||||
|
||||
## 1. TYPING INDICATORS — État actuel
|
||||
|
||||
### 1.1. Module existant : `src/typing_indicator.rs`
|
||||
|
||||
✅ **Structure présente** :
|
||||
- `TypingIndicatorManager` existe avec :
|
||||
- `typing_users: Arc<RwLock<HashMap<String, HashMap<String, DateTime<Utc>>>>>`
|
||||
- `timeout_duration: Duration::seconds(3)` (hardcodé)
|
||||
|
||||
✅ **Méthodes disponibles** :
|
||||
- `set_typing(conversation_id, user_id)` — marque un user comme "typing"
|
||||
- `stop_typing(conversation_id, user_id)` — retire un user
|
||||
- `get_typing_users(conversation_id)` — liste les users actifs (filtre les expirés)
|
||||
- `cleanup_expired()` — nettoie les entrées expirées
|
||||
|
||||
❌ **Manques identifiés** :
|
||||
1. **Pas de task de monitoring automatique** : `cleanup_expired()` existe mais n'est jamais appelée automatiquement
|
||||
2. **Pas de broadcast automatique** : le manager ne déclenche pas de broadcast quand un timeout expire
|
||||
3. **Pas intégré dans WebSocketState** : le manager n'est pas instancié dans `WebSocketState`
|
||||
4. **Pas de méthode `monitor_timeouts()`** : pas de boucle de fond pour détecter les expirations
|
||||
|
||||
### 1.2. WebSocket Messages
|
||||
|
||||
❌ **IncomingMessage::Typing** : **N'EXISTE PAS**
|
||||
- Seuls existent : `SendMessage`, `JoinConversation`, `LeaveConversation`, `MarkAsRead`, `Ping`
|
||||
|
||||
❌ **OutgoingMessage::UserTyping** : **N'EXISTE PAS**
|
||||
- Seuls existent : `NewMessage`, `MessageRead`, `ActionConfirmed`, `Error`, `Pong`
|
||||
|
||||
### 1.3. Handler WebSocket
|
||||
|
||||
❌ **Pas de branchement pour Typing** dans `handle_incoming_message()` (`src/websocket/handler.rs`)
|
||||
|
||||
---
|
||||
|
||||
## 2. DELIVERED STATUS — État actuel
|
||||
|
||||
### 2.1. Enum MessageReadStatus
|
||||
|
||||
✅ **Existe dans `src/read_receipts.rs`** :
|
||||
```rust
|
||||
pub enum MessageReadStatus {
|
||||
Sent,
|
||||
Delivered, // ✅ Existe mais non utilisé
|
||||
Read,
|
||||
}
|
||||
```
|
||||
|
||||
⚠️ **Problème** : `Delivered` existe dans l'enum mais :
|
||||
- `get_message_status()` retourne toujours `Sent` si pas de read receipt (ligne 230)
|
||||
- Commentaire TODO : "Implémenter un système de tracking delivered si nécessaire"
|
||||
|
||||
### 2.2. Base de données
|
||||
|
||||
❌ **Table `delivered_status`** : **N'EXISTE PAS**
|
||||
- Aucune migration trouvée pour cette table
|
||||
- Seule table `read_receipts` existe pour les messages lus
|
||||
|
||||
### 2.3. Manager dédié
|
||||
|
||||
❌ **DeliveredStatusManager** : **N'EXISTE PAS**
|
||||
- `ReadReceiptManager` gère uniquement les read receipts
|
||||
- Pas de module `src/delivered_status.rs`
|
||||
|
||||
### 2.4. WebSocket Messages
|
||||
|
||||
❌ **IncomingMessage::Delivered** : **N'EXISTE PAS**
|
||||
|
||||
❌ **OutgoingMessage::MessageDelivered** : **N'EXISTE PAS**
|
||||
|
||||
### 2.5. Handler WebSocket
|
||||
|
||||
❌ **Pas de branchement pour Delivered** dans `handle_incoming_message()`
|
||||
|
||||
---
|
||||
|
||||
## 3. PERMISSIONS — État actuel
|
||||
|
||||
✅ **PermissionService existe** (`src/security/permission.rs`) :
|
||||
- `can_read_conversation(user_id, conversation_id)`
|
||||
- `can_send_message(user_id, conversation_id)`
|
||||
- `can_mark_read(user_id, conversation_id)`
|
||||
|
||||
✅ **Intégration dans handler** :
|
||||
- `MarkAsRead` utilise déjà `can_mark_read()`
|
||||
- `SendMessage` utilise déjà `can_send_message()`
|
||||
|
||||
---
|
||||
|
||||
## 4. ARCHITECTURE ACTUELLE
|
||||
|
||||
### 4.1. WebSocketState
|
||||
|
||||
```rust
|
||||
pub struct WebSocketState {
|
||||
pub message_repo: Arc<MessageRepository>,
|
||||
pub read_receipt_manager: Arc<ReadReceiptManager>,
|
||||
pub ws_manager: Arc<WebSocketManager>,
|
||||
pub jwt_manager: Arc<JwtManager>,
|
||||
pub permission_service: Arc<PermissionService>,
|
||||
}
|
||||
```
|
||||
|
||||
❌ **TypingIndicatorManager manquant** dans `WebSocketState`
|
||||
|
||||
❌ **DeliveredStatusManager manquant** dans `WebSocketState`
|
||||
|
||||
### 4.2. Main.rs
|
||||
|
||||
- `ReadReceiptManager` est instancié (ligne 147)
|
||||
- `PermissionService` est instancié (ligne 148)
|
||||
- `TypingIndicatorManager` **n'est pas instancié**
|
||||
- `DeliveredStatusManager` **n'existe pas encore**
|
||||
|
||||
---
|
||||
|
||||
## 5. RÉSUMÉ DES MANQUES
|
||||
|
||||
### Typing Indicators
|
||||
- ✅ Manager existe mais incomplet
|
||||
- ❌ Pas de task de monitoring automatique
|
||||
- ❌ Pas intégré dans WebSocketState
|
||||
- ❌ Pas de messages WebSocket (Incoming/Outgoing)
|
||||
- ❌ Pas de branchement dans handler
|
||||
|
||||
### Delivered Status
|
||||
- ✅ Enum `Delivered` existe mais non utilisé
|
||||
- ❌ Pas de table DB
|
||||
- ❌ Pas de manager dédié
|
||||
- ❌ Pas de messages WebSocket (Incoming/Outgoing)
|
||||
- ❌ Pas de branchement dans handler
|
||||
|
||||
---
|
||||
|
||||
## 6. PLAN D'IMPLÉMENTATION
|
||||
|
||||
### Phase 1 : Infrastructure
|
||||
1. Créer migration SQL pour `delivered_status`
|
||||
2. Créer `src/delivered_status.rs` avec `DeliveredStatusManager`
|
||||
3. Améliorer `TypingIndicatorManager` avec task de monitoring
|
||||
|
||||
### Phase 2 : WebSocket Messages
|
||||
4. Ajouter `IncomingMessage::Typing` et `IncomingMessage::Delivered`
|
||||
5. Ajouter `OutgoingMessage::UserTyping` et `OutgoingMessage::MessageDelivered`
|
||||
|
||||
### Phase 3 : Intégration
|
||||
6. Ajouter managers dans `WebSocketState`
|
||||
7. Brancher handlers dans `handle_incoming_message()`
|
||||
8. Démarrer task de monitoring typing dans `main.rs`
|
||||
|
||||
### Phase 4 : Tests & Documentation
|
||||
9. Tests unitaires
|
||||
10. Tests d'intégration
|
||||
11. Documentation complète
|
||||
|
||||
---
|
||||
|
||||
**Prochaine étape** : Implémentation selon le design cible.
|
||||
|
||||
|
|
@ -1,212 +0,0 @@
|
|||
# 🔍 Audit Initial - Message Search, History Pagination, and Offline Sync
|
||||
|
||||
**Date**: 2025-12-05
|
||||
**Objectif**: Analyser l'état actuel avant implémentation des fonctionnalités P1
|
||||
|
||||
---
|
||||
|
||||
## 1. AUDIT DES FONCTIONNALITÉS EXISTANTES
|
||||
|
||||
### 1.1 Recherche de messages
|
||||
**❌ N'EXISTE PAS**
|
||||
- Aucune fonction dans `MessageRepository` pour rechercher des messages
|
||||
- Aucune route WebSocket ou REST pour la recherche
|
||||
- Aucun index de recherche textuelle sur la colonne `content`
|
||||
|
||||
### 1.2 Pagination de l'historique
|
||||
**⚠️ PARTIELLEMENT EXISTANT**
|
||||
- `MessageRepository::get_conversation_messages()` existe mais :
|
||||
- Ne supporte que `LIMIT` (pas de cursors `before`/`after`)
|
||||
- Ne retourne pas `has_more_before`/`has_more_after`
|
||||
- Tri toujours `DESC` sans possibilité de tri `ASC` pour `after`
|
||||
- Aucune route WebSocket pour `FetchHistory`
|
||||
|
||||
### 1.3 Synchronisation hors ligne
|
||||
**❌ N'EXISTE PAS**
|
||||
- Aucune fonction pour récupérer les messages depuis un timestamp
|
||||
- Aucune route WebSocket pour `SyncMessages`
|
||||
- Pas de mécanisme pour tracker le dernier timestamp de sync
|
||||
|
||||
---
|
||||
|
||||
## 2. AUDIT DES INDEX SQL
|
||||
|
||||
### 2.1 Index existants sur `messages`
|
||||
```sql
|
||||
-- Migration 001
|
||||
idx_messages_conversation_id ON messages(conversation_id)
|
||||
idx_messages_sender_id ON messages(sender_id)
|
||||
idx_messages_created_at ON messages(created_at)
|
||||
|
||||
-- Migration 005
|
||||
idx_messages_deleted_at ON messages(deleted_at) WHERE deleted_at IS NOT NULL
|
||||
idx_messages_edited_at ON messages(edited_at) WHERE edited_at IS NOT NULL
|
||||
```
|
||||
|
||||
### 2.2 Index manquants (REQUIS)
|
||||
**❌ Index composite pour pagination**
|
||||
```sql
|
||||
CREATE INDEX idx_messages_conv_created_at
|
||||
ON messages(conversation_id, created_at DESC);
|
||||
```
|
||||
|
||||
**❌ Index GIN pour recherche textuelle**
|
||||
```sql
|
||||
-- Option 1: Index GIN avec tsvector (recherche avancée)
|
||||
ALTER TABLE messages ADD COLUMN tsv tsvector;
|
||||
CREATE INDEX idx_messages_tsv ON messages USING GIN(tsv);
|
||||
|
||||
-- Option 2: Index trigram pour recherche ILIKE (plus simple)
|
||||
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
||||
CREATE INDEX idx_messages_content_trgm ON messages USING GIN(content gin_trgm_ops);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. AUDIT DES CHAMPS DE TIMESTAMPS
|
||||
|
||||
### 3.1 Format stocké dans la table `messages`
|
||||
- ✅ `edited_at`: `TIMESTAMP WITH TIME ZONE` (Option<DateTime<Utc>> en Rust)
|
||||
- ✅ `deleted_at`: `TIMESTAMP WITH TIME ZONE` (Option<DateTime<Utc>> en Rust)
|
||||
- ✅ `created_at`: `TIMESTAMP WITH TIME ZONE` (DateTime<Utc> en Rust)
|
||||
- ✅ `updated_at`: `TIMESTAMP WITH TIME ZONE` (DateTime<Utc> en Rust)
|
||||
|
||||
### 3.2 Format stocké dans les tables séparées
|
||||
- ✅ `read_at`: Dans `read_receipts` table (Migration 003)
|
||||
- ✅ `delivered_at`: Dans `delivered_status` table (Migration 004)
|
||||
|
||||
**Note**: Les statuts `read` et `delivered` sont dans des tables séparées, pas dans `messages`. Pour la sync offline, il faudra joindre ces tables ou les inclure dans la réponse.
|
||||
|
||||
---
|
||||
|
||||
## 4. AUDIT DES TYPES WEBSOCKET
|
||||
|
||||
### 4.1 IncomingMessage (src/websocket/mod.rs)
|
||||
**Types existants**:
|
||||
- `SendMessage`
|
||||
- `JoinConversation`
|
||||
- `LeaveConversation`
|
||||
- `MarkAsRead`
|
||||
- `Typing`
|
||||
- `Delivered`
|
||||
- `EditMessage`
|
||||
- `DeleteMessage`
|
||||
- `Ping`
|
||||
|
||||
**Types manquants**:
|
||||
- ❌ `FetchHistory`
|
||||
- ❌ `SearchMessages`
|
||||
- ❌ `SyncMessages`
|
||||
|
||||
### 4.2 OutgoingMessage (src/websocket/mod.rs)
|
||||
**Types existants**:
|
||||
- `NewMessage`
|
||||
- `MessageRead`
|
||||
- `MessageDelivered`
|
||||
- `UserTyping`
|
||||
- `MessageEdited`
|
||||
- `MessageDeleted`
|
||||
- `ActionConfirmed`
|
||||
- `Error`
|
||||
- `Pong`
|
||||
|
||||
**Types manquants**:
|
||||
- ❌ `HistoryChunk`
|
||||
- ❌ `SearchResults`
|
||||
- ❌ `SyncChunk`
|
||||
|
||||
---
|
||||
|
||||
## 5. AUDIT DU REPOSITORY
|
||||
|
||||
### 5.1 MessageRepository (src/repository/message_repository.rs)
|
||||
**Méthodes existantes**:
|
||||
- ✅ `create()` - Créer un message
|
||||
- ✅ `get_conversation_messages()` - Récupérer messages avec LIMIT
|
||||
- ✅ `get_by_id()` - Récupérer un message par ID
|
||||
- ✅ `update()` - Mettre à jour un message
|
||||
- ✅ `delete()` - Soft delete un message
|
||||
- ✅ `get_by_id_including_deleted()` - Récupérer même si supprimé
|
||||
|
||||
**Méthodes manquantes**:
|
||||
- ❌ `fetch_history()` - Pagination avec before/after
|
||||
- ❌ `search_messages()` - Recherche textuelle
|
||||
- ❌ `fetch_since()` - Sync depuis timestamp
|
||||
|
||||
---
|
||||
|
||||
## 6. AUDIT DES PERMISSIONS
|
||||
|
||||
### 6.1 PermissionService (src/security/permission.rs)
|
||||
**Méthodes existantes** (à vérifier):
|
||||
- `can_send_message()`
|
||||
- `can_read_conversation()`
|
||||
- `can_join_conversation()`
|
||||
- `can_mark_read()`
|
||||
|
||||
**Méthodes nécessaires**:
|
||||
- ✅ Les méthodes existantes suffisent pour les nouvelles fonctionnalités
|
||||
- La recherche nécessite `can_read_conversation()`
|
||||
- La pagination nécessite `can_read_conversation()`
|
||||
- La sync nécessite `can_read_conversation()`
|
||||
|
||||
---
|
||||
|
||||
## 7. RÉSUMÉ DES ACTIONS REQUISES
|
||||
|
||||
### 7.1 Migration SQL
|
||||
1. ✅ Créer index composite `(conversation_id, created_at DESC)`
|
||||
2. ✅ Créer index GIN pour recherche textuelle (tsvector ou trigram)
|
||||
3. ✅ Ajouter colonne `tsv` si choix tsvector
|
||||
|
||||
### 7.2 Repository
|
||||
1. ✅ Implémenter `fetch_history()` avec before/after
|
||||
2. ✅ Implémenter `search_messages()` avec query
|
||||
3. ✅ Implémenter `fetch_since()` avec timestamp
|
||||
|
||||
### 7.3 WebSocket
|
||||
1. ✅ Ajouter `FetchHistory`, `SearchMessages`, `SyncMessages` dans `IncomingMessage`
|
||||
2. ✅ Ajouter `HistoryChunk`, `SearchResults`, `SyncChunk` dans `OutgoingMessage`
|
||||
3. ✅ Implémenter handlers dans `websocket/handler.rs`
|
||||
|
||||
### 7.4 Tests
|
||||
1. ✅ Tests unitaires pour chaque méthode repository
|
||||
2. ✅ Tests d'intégration pour les handlers WebSocket
|
||||
3. ✅ Tests de permissions
|
||||
|
||||
### 7.5 Documentation
|
||||
1. ✅ Créer `docs/CHAT_HISTORY_SEARCH_SYNC.md`
|
||||
2. ✅ Mettre à jour `TRIAGE.md`
|
||||
|
||||
---
|
||||
|
||||
## 8. DÉCISIONS TECHNIQUES
|
||||
|
||||
### 8.1 Recherche textuelle
|
||||
**Choix**: Commencer avec `ILIKE` (plus simple), possibilité d'upgrade vers `tsvector` plus tard.
|
||||
|
||||
**Raison**:
|
||||
- Plus simple à implémenter
|
||||
- Pas besoin de trigger pour maintenir `tsv`
|
||||
- Suffisant pour la plupart des cas d'usage
|
||||
|
||||
### 8.2 Pagination
|
||||
**Choix**: Cursors basés sur `created_at` (timestamp).
|
||||
|
||||
**Raison**:
|
||||
- Plus fiable que les offsets
|
||||
- Meilleure performance
|
||||
- Supporte les insertions concurrentes
|
||||
|
||||
### 8.3 Sync offline
|
||||
**Choix**: Récupérer tous les messages depuis `since`, inclure les updates (edited, deleted).
|
||||
|
||||
**Raison**:
|
||||
- Permet une vraie synchronisation fiable
|
||||
- Compatible avec les statuts edited/deleted
|
||||
- Nécessaire pour les clients mobiles
|
||||
|
||||
---
|
||||
|
||||
**Fin de l'audit**
|
||||
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
# Strategie de Base de Données pour Veza Chat Server
|
||||
|
||||
## Isolation par Schema
|
||||
|
||||
Le `veza-chat-server` partage l'instance PostgreSQL `veza_lab` avec d'autres services (Backend API, Stream Server), mais utilise un **schema dédié** nommé `chat`.
|
||||
|
||||
Cette isolation permet de :
|
||||
1. Éviter les conflits de noms de tables (ex: `users`) avec le Backend API (schema `public`).
|
||||
2. Gérer des migrations SQLx indépendantes et spécifiques au Chat.
|
||||
3. Réinitialiser les données du Chat sans impacter le reste du système.
|
||||
|
||||
## Configuration
|
||||
|
||||
Pour se connecter à la base de données du chat, l'URL de connexion (DSN) doit inclure l'option `search_path=chat`.
|
||||
|
||||
### Exemple de DSN
|
||||
```bash
|
||||
export DATABASE_URL="postgres://user:pass@localhost:5432/veza_lab?sslmode=disable&options=-c%20search_path=chat"
|
||||
```
|
||||
|
||||
### Scripts Lab
|
||||
Les scripts dans `scripts/` configurent automatiquement cet environnement :
|
||||
|
||||
- **`start_lab.sh`** : Démarre le serveur en configurant le schema `chat`.
|
||||
- **`reset_lab_db.sh`** : Supprime et recrée le schema `chat`, puis joue les migrations.
|
||||
|
||||
## Migrations
|
||||
|
||||
Les migrations SQLx se trouvent dans `migrations/`. Elles s'appliquent uniquement au schema `chat`.
|
||||
|
||||
```bash
|
||||
# Appliquer manuellement les migrations
|
||||
export DATABASE_URL="..." # avec search_path=chat
|
||||
sqlx migrate run
|
||||
```
|
||||
|
|
@ -1,412 +0,0 @@
|
|||
# 📬 Delivered Status + Typing Indicators — Documentation complète
|
||||
|
||||
**Date** : 2025-01-27
|
||||
**Version** : 1.0.0
|
||||
**Cible** : `veza-chat-server`
|
||||
|
||||
---
|
||||
|
||||
## 📋 TABLE DES MATIÈRES
|
||||
|
||||
1. [Vue d'ensemble](#vue-densemble)
|
||||
2. [Delivered Status](#delivered-status)
|
||||
3. [Typing Indicators](#typing-indicators)
|
||||
4. [Messages WebSocket](#messages-websocket)
|
||||
5. [Permissions](#permissions)
|
||||
6. [Exemples de payloads](#exemples-de-payloads)
|
||||
7. [Limites et considérations](#limites-et-considérations)
|
||||
|
||||
---
|
||||
|
||||
## 🎯 VUE D'ENSEMBLE
|
||||
|
||||
Deux fonctionnalités essentielles du chat moderne ont été implémentées :
|
||||
|
||||
1. **Delivered Status** : Tracking persistant des messages reçus (mais pas encore lus)
|
||||
2. **Typing Indicators** : Indicateurs en temps réel de frappe avec timeout automatique
|
||||
|
||||
Ces systèmes s'intègrent avec :
|
||||
- ✅ La couche de permissions (P0)
|
||||
- ✅ Les Read Receipts (P0)
|
||||
- ✅ Les événements WebSocket inbound/outbound
|
||||
- ✅ La base de données PostgreSQL (pour Delivered Status)
|
||||
- ✅ Un système de timeout interne (pour Typing Indicators)
|
||||
|
||||
---
|
||||
|
||||
## 📬 DELIVERED STATUS
|
||||
|
||||
### Architecture
|
||||
|
||||
Le Delivered Status est **persistant** et stocké en base de données PostgreSQL.
|
||||
|
||||
### Flux
|
||||
|
||||
```
|
||||
1. Client reçoit un message via WebSocket
|
||||
↓
|
||||
2. Client envoie IncomingMessage::Delivered { message_id, conversation_id }
|
||||
↓
|
||||
3. Serveur :
|
||||
- Vérifie permission can_read_conversation
|
||||
- Vérifie que message appartient à conversation
|
||||
- Stocke en DB (table delivered_status)
|
||||
- Broadcast OutgoingMessage::MessageDelivered
|
||||
```
|
||||
|
||||
### Base de données
|
||||
|
||||
**Table** : `delivered_status`
|
||||
|
||||
```sql
|
||||
CREATE TABLE delivered_status (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
message_id UUID NOT NULL REFERENCES messages(id) ON DELETE CASCADE,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
conversation_id UUID NOT NULL REFERENCES conversations(id) ON DELETE CASCADE,
|
||||
delivered_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(message_id, user_id)
|
||||
);
|
||||
```
|
||||
|
||||
**Index** :
|
||||
- `idx_delivered_status_message_id` : Recherche par message
|
||||
- `idx_delivered_status_user_id` : Recherche par utilisateur
|
||||
- `idx_delivered_status_conversation_id` : Recherche par conversation
|
||||
- `idx_delivered_status_conversation_user` : Composite pour requêtes fréquentes
|
||||
|
||||
### Manager
|
||||
|
||||
**Module** : `src/delivered_status.rs`
|
||||
|
||||
**Méthodes principales** :
|
||||
- `mark_delivered(user_id, message_id, conversation_id)` : Marque un message comme délivré
|
||||
- `get_delivered_for_message(message_id)` : Récupère tous les delivered status pour un message
|
||||
- `is_delivered(message_id, user_id)` : Vérifie si un message a été délivré à un utilisateur
|
||||
- `verify_message_belongs_to_conversation(message_id, conversation_id)` : Vérifie l'appartenance
|
||||
|
||||
### Règles
|
||||
|
||||
- ✅ Un seul delivered status par (message_id, user_id) — contrainte UNIQUE
|
||||
- ✅ Mise à jour automatique de `delivered_at` si le status existe déjà
|
||||
- ✅ Vérification de permission `can_read_conversation` avant marquage
|
||||
- ✅ Vérification que le message appartient à la conversation
|
||||
- ✅ Broadcast automatique à tous les participants de la conversation
|
||||
|
||||
---
|
||||
|
||||
## ⌨️ TYPING INDICATORS
|
||||
|
||||
### Architecture
|
||||
|
||||
Les Typing Indicators sont **éphémères** (non persistants) et gérés en mémoire.
|
||||
|
||||
### Flux
|
||||
|
||||
```
|
||||
1. Client commence à taper
|
||||
↓
|
||||
2. Client envoie IncomingMessage::Typing { conversation_id, is_typing: true }
|
||||
↓
|
||||
3. Serveur :
|
||||
- Vérifie permission can_send_message
|
||||
- Enregistre dans TypingIndicatorManager
|
||||
- Reset timeout de 3 secondes
|
||||
- Broadcast OutgoingMessage::UserTyping { is_typing: true }
|
||||
↓
|
||||
4. Si pas de nouveau signal pendant 3s :
|
||||
- Task de monitoring détecte expiration
|
||||
- Broadcast OutgoingMessage::UserTyping { is_typing: false }
|
||||
```
|
||||
|
||||
### Manager
|
||||
|
||||
**Module** : `src/typing_indicator.rs`
|
||||
|
||||
**Structure interne** :
|
||||
```rust
|
||||
HashMap<conversation_id, HashMap<user_id, last_activity_timestamp>>
|
||||
```
|
||||
|
||||
**Méthodes principales** :
|
||||
- `user_started_typing(user_id, conversation_id)` : Marque un user comme "typing"
|
||||
- `user_stopped_typing(user_id, conversation_id)` : Retire un user
|
||||
- `get_typing_users(conversation_id)` : Liste les users actifs (filtre les expirés)
|
||||
- `monitor_timeouts()` : Détecte les expirations et retourne les changements
|
||||
|
||||
### Task de monitoring
|
||||
|
||||
Un task Tokio tourne en arrière-plan toutes les **500ms** :
|
||||
|
||||
```rust
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_millis(500));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let expired_changes = typing_manager.monitor_timeouts().await;
|
||||
// Broadcast les changements (is_typing = false)
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Règles
|
||||
|
||||
- ✅ Timeout de **3 secondes** (hardcodé, configurable via `timeout_duration`)
|
||||
- ✅ Un seul statut actif par (user_id, conversation_id)
|
||||
- ✅ Reset automatique du timeout à chaque nouveau signal `is_typing: true`
|
||||
- ✅ Broadcast automatique après expiration (via task de monitoring)
|
||||
- ✅ Vérification de permission `can_send_message` avant enregistrement
|
||||
- ✅ Pas de persistance — tout en mémoire
|
||||
|
||||
---
|
||||
|
||||
## 🔌 MESSAGES WEBSOCKET
|
||||
|
||||
### Incoming Messages
|
||||
|
||||
#### Typing
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "Typing",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"is_typing": true
|
||||
}
|
||||
```
|
||||
|
||||
**Rust** :
|
||||
```rust
|
||||
IncomingMessage::Typing {
|
||||
conversation_id: Uuid,
|
||||
is_typing: bool,
|
||||
}
|
||||
```
|
||||
|
||||
#### Delivered
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "Delivered",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"message_id": "660e8400-e29b-41d4-a716-446655440001"
|
||||
}
|
||||
```
|
||||
|
||||
**Rust** :
|
||||
```rust
|
||||
IncomingMessage::Delivered {
|
||||
conversation_id: Uuid,
|
||||
message_id: Uuid,
|
||||
}
|
||||
```
|
||||
|
||||
### Outgoing Messages
|
||||
|
||||
#### UserTyping
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "UserTyping",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"user_id": "770e8400-e29b-41d4-a716-446655440002",
|
||||
"is_typing": true
|
||||
}
|
||||
```
|
||||
|
||||
**Rust** :
|
||||
```rust
|
||||
OutgoingMessage::UserTyping {
|
||||
conversation_id: Uuid,
|
||||
user_id: Uuid,
|
||||
is_typing: bool,
|
||||
}
|
||||
```
|
||||
|
||||
#### MessageDelivered
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "MessageDelivered",
|
||||
"message_id": "660e8400-e29b-41d4-a716-446655440001",
|
||||
"user_id": "770e8400-e29b-41d4-a716-446655440002",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"delivered_at": "2025-01-27T10:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Rust** :
|
||||
```rust
|
||||
OutgoingMessage::MessageDelivered {
|
||||
message_id: Uuid,
|
||||
user_id: Uuid,
|
||||
conversation_id: Uuid,
|
||||
delivered_at: DateTime<Utc>,
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔐 PERMISSIONS
|
||||
|
||||
### Delivered Status
|
||||
|
||||
**Permission requise** : `can_read_conversation(user_id, conversation_id)`
|
||||
|
||||
**Vérifications** :
|
||||
1. L'utilisateur est membre de la conversation
|
||||
2. Le message appartient à la conversation indiquée
|
||||
3. Le message existe
|
||||
|
||||
**Erreurs possibles** :
|
||||
- `PermissionError::NotMember` : Utilisateur non membre
|
||||
- `ChatError::NotFound` : Message inexistant
|
||||
- `ChatError::Validation` : Message n'appartient pas à la conversation
|
||||
|
||||
### Typing Indicators
|
||||
|
||||
**Permission requise** : `can_send_message(user_id, conversation_id)`
|
||||
|
||||
**Vérifications** :
|
||||
1. L'utilisateur peut envoyer des messages dans la conversation
|
||||
|
||||
**Erreurs possibles** :
|
||||
- `PermissionError::NotMember` : Utilisateur non membre
|
||||
- `PermissionError::CannotSend` : Pas de permission d'écriture
|
||||
|
||||
---
|
||||
|
||||
## 📝 EXEMPLES DE PAYLOADS
|
||||
|
||||
### Scénario 1 : Typing Indicator
|
||||
|
||||
**Client A commence à taper** :
|
||||
```json
|
||||
// Incoming
|
||||
{ "type": "Typing", "conversation_id": "conv-123", "is_typing": true }
|
||||
|
||||
// Outgoing (broadcast à tous sauf Client A)
|
||||
{ "type": "UserTyping", "conversation_id": "conv-123", "user_id": "user-a", "is_typing": true }
|
||||
```
|
||||
|
||||
**Client A continue (reset timeout)** :
|
||||
```json
|
||||
// Incoming (après 2s)
|
||||
{ "type": "Typing", "conversation_id": "conv-123", "is_typing": true }
|
||||
// → Timeout reset à 3s
|
||||
```
|
||||
|
||||
**Client A arrête (timeout après 3s)** :
|
||||
```json
|
||||
// Outgoing (automatique après 3s sans signal)
|
||||
{ "type": "UserTyping", "conversation_id": "conv-123", "user_id": "user-a", "is_typing": false }
|
||||
```
|
||||
|
||||
### Scénario 2 : Delivered Status
|
||||
|
||||
**Client B reçoit un message** :
|
||||
```json
|
||||
// Outgoing (nouveau message)
|
||||
{
|
||||
"type": "NewMessage",
|
||||
"conversation_id": "conv-123",
|
||||
"message_id": "msg-456",
|
||||
"sender_id": "user-a",
|
||||
"content": "Hello!",
|
||||
"created_at": "2025-01-27T10:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Client B marque comme délivré** :
|
||||
```json
|
||||
// Incoming
|
||||
{ "type": "Delivered", "conversation_id": "conv-123", "message_id": "msg-456" }
|
||||
|
||||
// Outgoing (broadcast à tous)
|
||||
{
|
||||
"type": "MessageDelivered",
|
||||
"message_id": "msg-456",
|
||||
"user_id": "user-b",
|
||||
"conversation_id": "conv-123",
|
||||
"delivered_at": "2025-01-27T10:30:01Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Client A voit que le message est délivré** :
|
||||
```json
|
||||
// Outgoing (reçu par Client A)
|
||||
{
|
||||
"type": "MessageDelivered",
|
||||
"message_id": "msg-456",
|
||||
"user_id": "user-b",
|
||||
"conversation_id": "conv-123",
|
||||
"delivered_at": "2025-01-27T10:30:01Z"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ LIMITES ET CONSIDÉRATIONS
|
||||
|
||||
### Delivered Status
|
||||
|
||||
- ✅ **Persistant** : Stocké en DB, survit aux redémarrages
|
||||
- ⚠️ **Latence** : Dépend de la latence réseau client → serveur
|
||||
- ⚠️ **Pas de garantie** : Si le client se déconnecte avant d'envoyer `Delivered`, le status n'est pas enregistré
|
||||
- ✅ **Déduplication** : UNIQUE constraint empêche les doublons
|
||||
|
||||
### Typing Indicators
|
||||
|
||||
- ⚠️ **Non persistant** : Perdu au redémarrage du serveur
|
||||
- ⚠️ **Latence de détection** : Maximum 500ms (intervalle du task de monitoring)
|
||||
- ⚠️ **Pas de garantie** : Si le serveur crash, les typing indicators sont perdus
|
||||
- ✅ **Performance** : Tout en mémoire, très rapide
|
||||
- ⚠️ **Scalabilité** : En cas de scaling horizontal, chaque instance a son propre état (nécessiterait Redis pour partager)
|
||||
|
||||
### Recommandations
|
||||
|
||||
1. **Typing Indicators** : Pour la scalabilité horizontale, considérer Redis pour partager l'état entre instances
|
||||
2. **Delivered Status** : La latence est acceptable pour la plupart des cas d'usage
|
||||
3. **Monitoring** : Surveiller la taille de la HashMap des typing indicators en production
|
||||
4. **Cleanup** : Le task de monitoring nettoie automatiquement les entrées expirées
|
||||
|
||||
---
|
||||
|
||||
## 🧪 TESTS
|
||||
|
||||
### Tests unitaires
|
||||
|
||||
**Delivered Status** :
|
||||
- ✅ `test_mark_delivered_creates_status`
|
||||
- ✅ `test_mark_delivered_updates_existing`
|
||||
- ✅ `test_get_delivered_for_message`
|
||||
- ✅ `test_is_delivered`
|
||||
|
||||
**Typing Indicators** :
|
||||
- ✅ `test_typing_indicator_manager`
|
||||
- ✅ Tests de timeout (à implémenter)
|
||||
|
||||
### Tests d'intégration
|
||||
|
||||
**À implémenter** :
|
||||
- Test WebSocket : Client A tape → Client B reçoit event
|
||||
- Test WebSocket : Timeout après 3s → Client B reçoit `is_typing: false`
|
||||
- Test WebSocket : Delivered → Broadcast OK
|
||||
- Test WebSocket : Delivered sans permission → Refus
|
||||
|
||||
---
|
||||
|
||||
## 📚 RÉFÉRENCES
|
||||
|
||||
- **Migration SQL** : `migrations/004_delivered_status.sql`
|
||||
- **Manager Delivered** : `src/delivered_status.rs`
|
||||
- **Manager Typing** : `src/typing_indicator.rs`
|
||||
- **Handler WebSocket** : `src/websocket/handler.rs`
|
||||
- **Messages WebSocket** : `src/websocket/mod.rs`
|
||||
- **Audit initial** : `docs/AUDIT_DELIVERED_TYPING.md`
|
||||
|
||||
---
|
||||
|
||||
**✅ Implémentation complète — Prêt pour production**
|
||||
|
||||
|
|
@ -1,593 +0,0 @@
|
|||
# 📜 Message Search, History Pagination, and Offline Sync
|
||||
|
||||
**Date**: 2025-12-05
|
||||
**Version**: 1.0.0
|
||||
**Statut**: ✅ Implémenté
|
||||
|
||||
---
|
||||
|
||||
## 📋 Table des matières
|
||||
|
||||
1. [Vue d'ensemble](#vue-densemble)
|
||||
2. [History Pagination](#history-pagination)
|
||||
3. [Message Search](#message-search)
|
||||
4. [Offline Sync](#offline-sync)
|
||||
5. [Spécifications techniques](#spécifications-techniques)
|
||||
6. [Exemples d'utilisation](#exemples-dutilisation)
|
||||
7. [Limites et bonnes pratiques](#limites-et-bonnes-pratiques)
|
||||
8. [Impact sur l'UI](#impact-sur-lui)
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Vue d'ensemble
|
||||
|
||||
Ce document décrit trois fonctionnalités majeures ajoutées au `veza-chat-server` :
|
||||
|
||||
1. **History Pagination** : Pagination efficace de l'historique avec cursors `before`/`after`
|
||||
2. **Message Search** : Recherche textuelle de messages dans une conversation
|
||||
3. **Offline Sync** : Synchronisation des messages manquants depuis la dernière connexion
|
||||
|
||||
Toutes ces fonctionnalités sont :
|
||||
- ✅ Sécurisées (permissions strictes via `PermissionService`)
|
||||
- ✅ Performantes (index SQL optimisés)
|
||||
- ✅ Compatibles avec les statuts (edited, deleted, delivered, read)
|
||||
- ✅ Disponibles via WebSocket
|
||||
|
||||
---
|
||||
|
||||
## 📜 History Pagination
|
||||
|
||||
### Description
|
||||
|
||||
Permet de récupérer l'historique d'une conversation avec pagination par cursors basés sur `created_at`. Plus efficace que l'offset/limit classique car :
|
||||
- Supporte les insertions concurrentes
|
||||
- Meilleure performance avec les index
|
||||
- Pas de problèmes de doublons lors de nouvelles insertions
|
||||
|
||||
### Inbound WebSocket Message
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "FetchHistory",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"before": "2025-12-05T10:30:00Z",
|
||||
"after": null,
|
||||
"limit": 50
|
||||
}
|
||||
```
|
||||
|
||||
**Paramètres**:
|
||||
- `conversation_id` (UUID, requis) : ID de la conversation
|
||||
- `before` (DateTime ISO8601, optionnel) : Récupère les messages avant ce timestamp
|
||||
- `after` (DateTime ISO8601, optionnel) : Récupère les messages après ce timestamp
|
||||
- `limit` (usize, optionnel, défaut: 50, max: 100) : Nombre de messages à récupérer
|
||||
|
||||
**Règles**:
|
||||
- Si `before` est fourni : tri DESC (messages plus anciens)
|
||||
- Si `after` est fourni : tri ASC (messages plus récents)
|
||||
- Si les deux sont fournis : messages entre `after` et `before` (tri ASC)
|
||||
- Si aucun n'est fourni : messages les plus récents (tri DESC)
|
||||
- Les résultats sont **toujours retournés en ordre ASC** (du plus ancien au plus récent)
|
||||
|
||||
### Outbound WebSocket Message
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "HistoryChunk",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"messages": [
|
||||
{
|
||||
"id": "...",
|
||||
"conversation_id": "...",
|
||||
"sender_id": "...",
|
||||
"content": "Hello world",
|
||||
"created_at": "2025-12-05T10:00:00Z",
|
||||
"is_edited": false,
|
||||
"is_deleted": false,
|
||||
...
|
||||
}
|
||||
],
|
||||
"has_more_before": true,
|
||||
"has_more_after": false
|
||||
}
|
||||
```
|
||||
|
||||
**Champs**:
|
||||
- `messages` : Liste des messages (toujours triés ASC)
|
||||
- `has_more_before` : Indique s'il y a des messages plus anciens
|
||||
- `has_more_after` : Indique s'il y a des messages plus récents
|
||||
|
||||
### Exemples d'utilisation
|
||||
|
||||
#### Charger les messages les plus récents
|
||||
```json
|
||||
{
|
||||
"type": "FetchHistory",
|
||||
"conversation_id": "...",
|
||||
"before": null,
|
||||
"after": null,
|
||||
"limit": 50
|
||||
}
|
||||
```
|
||||
|
||||
#### Charger les messages plus anciens (scroll up)
|
||||
```json
|
||||
{
|
||||
"type": "FetchHistory",
|
||||
"conversation_id": "...",
|
||||
"before": "2025-12-05T10:00:00Z",
|
||||
"after": null,
|
||||
"limit": 50
|
||||
}
|
||||
```
|
||||
|
||||
#### Charger les nouveaux messages (scroll down)
|
||||
```json
|
||||
{
|
||||
"type": "FetchHistory",
|
||||
"conversation_id": "...",
|
||||
"before": null,
|
||||
"after": "2025-12-05T10:00:00Z",
|
||||
"limit": 50
|
||||
}
|
||||
```
|
||||
|
||||
### Index SQL
|
||||
|
||||
```sql
|
||||
CREATE INDEX idx_messages_conv_created_at
|
||||
ON messages(conversation_id, created_at DESC);
|
||||
|
||||
CREATE INDEX idx_messages_conv_created_not_deleted
|
||||
ON messages(conversation_id, created_at DESC)
|
||||
WHERE is_deleted = false;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Message Search
|
||||
|
||||
### Description
|
||||
|
||||
Recherche textuelle de messages dans une conversation. Utilise `ILIKE` avec index trigram pour une recherche performante et insensible à la casse.
|
||||
|
||||
### Inbound WebSocket Message
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "SearchMessages",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"query": "hello world",
|
||||
"limit": 50,
|
||||
"offset": 0
|
||||
}
|
||||
```
|
||||
|
||||
**Paramètres**:
|
||||
- `conversation_id` (UUID, requis) : ID de la conversation
|
||||
- `query` (String, requis) : Terme de recherche (ne peut pas être vide)
|
||||
- `limit` (usize, optionnel, défaut: 50, max: 100) : Nombre de résultats par page
|
||||
- `offset` (usize, optionnel, défaut: 0) : Offset pour pagination
|
||||
|
||||
### Outbound WebSocket Message
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "SearchResults",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"messages": [
|
||||
{
|
||||
"id": "...",
|
||||
"content": "Hello world!",
|
||||
"created_at": "2025-12-05T10:00:00Z",
|
||||
...
|
||||
}
|
||||
],
|
||||
"query": "hello world",
|
||||
"total": 123
|
||||
}
|
||||
```
|
||||
|
||||
**Champs**:
|
||||
- `messages` : Liste des messages correspondants (triés par `created_at DESC`)
|
||||
- `query` : La requête de recherche originale
|
||||
- `total` : Nombre total de résultats (pour pagination)
|
||||
|
||||
### Exemples d'utilisation
|
||||
|
||||
#### Recherche simple
|
||||
```json
|
||||
{
|
||||
"type": "SearchMessages",
|
||||
"conversation_id": "...",
|
||||
"query": "meeting",
|
||||
"limit": 20,
|
||||
"offset": 0
|
||||
}
|
||||
```
|
||||
|
||||
#### Pagination des résultats
|
||||
```json
|
||||
{
|
||||
"type": "SearchMessages",
|
||||
"conversation_id": "...",
|
||||
"query": "meeting",
|
||||
"limit": 20,
|
||||
"offset": 20
|
||||
}
|
||||
```
|
||||
|
||||
### Index SQL
|
||||
|
||||
```sql
|
||||
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
||||
|
||||
CREATE INDEX idx_messages_content_trgm
|
||||
ON messages USING GIN(content gin_trgm_ops);
|
||||
|
||||
CREATE INDEX idx_messages_conv_content_trgm
|
||||
ON messages USING GIN(conversation_id, content gin_trgm_ops);
|
||||
```
|
||||
|
||||
### Comportement
|
||||
|
||||
- ✅ Recherche insensible à la casse (`ILIKE`)
|
||||
- ✅ Recherche partielle (contient le terme)
|
||||
- ✅ Exclut les messages supprimés par défaut
|
||||
- ✅ Tri par `created_at DESC` (plus récents en premier)
|
||||
|
||||
---
|
||||
|
||||
## 🔄 Offline Sync
|
||||
|
||||
### Description
|
||||
|
||||
Synchronise tous les messages manquants depuis la dernière connexion. Inclut :
|
||||
- Messages créés depuis `since`
|
||||
- Messages édités depuis `since` (même si créés avant)
|
||||
- Messages supprimés depuis `since` (même si créés avant)
|
||||
|
||||
Permet aux clients mobiles d'avoir une synchronisation fiable après une déconnexion.
|
||||
|
||||
### Inbound WebSocket Message
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "SyncMessages",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"since": "2025-12-05T09:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Paramètres**:
|
||||
- `conversation_id` (UUID, requis) : ID de la conversation
|
||||
- `since` (DateTime ISO8601, requis) : Timestamp de la dernière synchronisation
|
||||
|
||||
### Outbound WebSocket Message
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "SyncChunk",
|
||||
"conversation_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"messages": [
|
||||
{
|
||||
"id": "...",
|
||||
"content": "New message",
|
||||
"created_at": "2025-12-05T10:00:00Z",
|
||||
"is_edited": false,
|
||||
"is_deleted": false,
|
||||
...
|
||||
},
|
||||
{
|
||||
"id": "...",
|
||||
"content": "Edited content",
|
||||
"created_at": "2025-12-05T08:00:00Z",
|
||||
"is_edited": true,
|
||||
"edited_at": "2025-12-05T10:30:00Z",
|
||||
...
|
||||
},
|
||||
{
|
||||
"id": "...",
|
||||
"content": "Deleted message",
|
||||
"created_at": "2025-12-05T08:30:00Z",
|
||||
"is_deleted": true,
|
||||
"deleted_at": "2025-12-05T10:45:00Z",
|
||||
...
|
||||
}
|
||||
],
|
||||
"last_sync": "2025-12-05T11:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Champs**:
|
||||
- `messages` : Tous les messages créés ou modifiés depuis `since` (triés par `created_at ASC`)
|
||||
- `last_sync` : Timestamp actuel (à utiliser pour la prochaine sync)
|
||||
|
||||
### Exemples d'utilisation
|
||||
|
||||
#### Synchronisation initiale
|
||||
```json
|
||||
{
|
||||
"type": "SyncMessages",
|
||||
"conversation_id": "...",
|
||||
"since": "2025-12-05T00:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
#### Synchronisation après déconnexion
|
||||
```json
|
||||
{
|
||||
"type": "SyncMessages",
|
||||
"conversation_id": "...",
|
||||
"since": "2025-12-05T09:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Index SQL
|
||||
|
||||
```sql
|
||||
CREATE INDEX idx_messages_conv_created_sync
|
||||
ON messages(conversation_id, created_at ASC)
|
||||
WHERE is_deleted = false;
|
||||
|
||||
CREATE INDEX idx_messages_conv_updated_sync
|
||||
ON messages(conversation_id, updated_at ASC)
|
||||
WHERE is_deleted = false;
|
||||
```
|
||||
|
||||
### Comportement
|
||||
|
||||
- ✅ Inclut tous les messages créés depuis `since`
|
||||
- ✅ Inclut tous les messages édités depuis `since` (même créés avant)
|
||||
- ✅ Inclut tous les messages supprimés depuis `since` (même créés avant)
|
||||
- ✅ Tri par `created_at ASC` (du plus ancien au plus récent)
|
||||
- ✅ Le client doit gérer les updates (édits) et deletes (suppressions)
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Spécifications techniques
|
||||
|
||||
### Repository Methods
|
||||
|
||||
#### `fetch_history`
|
||||
```rust
|
||||
pub async fn fetch_history(
|
||||
&self,
|
||||
conversation_id: Uuid,
|
||||
before: Option<DateTime<Utc>>,
|
||||
after: Option<DateTime<Utc>>,
|
||||
limit: usize,
|
||||
include_deleted: bool,
|
||||
) -> Result<(Vec<Message>, bool, bool)>
|
||||
```
|
||||
|
||||
Retourne : `(messages, has_more_before, has_more_after)`
|
||||
|
||||
#### `search_messages`
|
||||
```rust
|
||||
pub async fn search_messages(
|
||||
&self,
|
||||
conversation_id: Uuid,
|
||||
query: &str,
|
||||
limit: usize,
|
||||
offset: usize,
|
||||
include_deleted: bool,
|
||||
) -> Result<(Vec<Message>, i64)>
|
||||
```
|
||||
|
||||
Retourne : `(messages, total_count)`
|
||||
|
||||
#### `fetch_since`
|
||||
```rust
|
||||
pub async fn fetch_since(
|
||||
&self,
|
||||
conversation_id: Uuid,
|
||||
since: DateTime<Utc>,
|
||||
) -> Result<Vec<Message>>
|
||||
```
|
||||
|
||||
### Permissions
|
||||
|
||||
Toutes les fonctionnalités nécessitent :
|
||||
- `can_read_conversation(user_id, conversation_id)` : L'utilisateur doit avoir accès à la conversation
|
||||
|
||||
### Erreurs possibles
|
||||
|
||||
- `ChatError::Unauthorized` : Pas de permission pour lire la conversation
|
||||
- `ChatError::ValidationError` : Query de recherche vide
|
||||
- `ChatError::InternalError` : Erreur de base de données
|
||||
|
||||
---
|
||||
|
||||
## 📱 Exemples d'utilisation
|
||||
|
||||
### Client Web (React)
|
||||
|
||||
```typescript
|
||||
// History Pagination
|
||||
const fetchHistory = async (conversationId: string, before?: Date) => {
|
||||
ws.send(JSON.stringify({
|
||||
type: "FetchHistory",
|
||||
conversation_id: conversationId,
|
||||
before: before?.toISOString(),
|
||||
after: null,
|
||||
limit: 50
|
||||
}));
|
||||
};
|
||||
|
||||
// Message Search
|
||||
const searchMessages = async (conversationId: string, query: string) => {
|
||||
ws.send(JSON.stringify({
|
||||
type: "SearchMessages",
|
||||
conversation_id: conversationId,
|
||||
query: query,
|
||||
limit: 50,
|
||||
offset: 0
|
||||
}));
|
||||
};
|
||||
|
||||
// Offline Sync
|
||||
const syncMessages = async (conversationId: string, lastSync: Date) => {
|
||||
ws.send(JSON.stringify({
|
||||
type: "SyncMessages",
|
||||
conversation_id: conversationId,
|
||||
since: lastSync.toISOString()
|
||||
}));
|
||||
};
|
||||
```
|
||||
|
||||
### Client Mobile (React Native)
|
||||
|
||||
```typescript
|
||||
// Sync après reconnexion
|
||||
const syncAfterReconnect = async (conversationId: string) => {
|
||||
const lastSync = await AsyncStorage.getItem(`last_sync_${conversationId}`);
|
||||
const since = lastSync ? new Date(lastSync) : new Date(0);
|
||||
|
||||
ws.send(JSON.stringify({
|
||||
type: "SyncMessages",
|
||||
conversation_id: conversationId,
|
||||
since: since.toISOString()
|
||||
}));
|
||||
|
||||
// Écouter SyncChunk et mettre à jour last_sync
|
||||
ws.on('message', (msg) => {
|
||||
if (msg.type === 'SyncChunk') {
|
||||
AsyncStorage.setItem(`last_sync_${conversationId}`, msg.last_sync);
|
||||
// Mettre à jour l'UI avec les messages
|
||||
}
|
||||
});
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Limites et bonnes pratiques
|
||||
|
||||
### Limites
|
||||
|
||||
1. **History Pagination** :
|
||||
- `limit` max : 100 messages
|
||||
- Utiliser `before`/`after` plutôt que offset pour de meilleures performances
|
||||
|
||||
2. **Message Search** :
|
||||
- `limit` max : 100 résultats
|
||||
- `query` minimum : 1 caractère
|
||||
- Recherche partielle (contient), pas de recherche exacte
|
||||
|
||||
3. **Offline Sync** :
|
||||
- Pas de limite sur le nombre de messages (peut être volumineux)
|
||||
- Le client doit gérer les updates et deletes
|
||||
|
||||
### Bonnes pratiques
|
||||
|
||||
1. **History Pagination** :
|
||||
- Toujours utiliser `before` pour charger plus d'anciens messages
|
||||
- Utiliser `after` pour charger les nouveaux messages
|
||||
- Stocker le `created_at` du premier/dernier message pour la pagination
|
||||
|
||||
2. **Message Search** :
|
||||
- Implémenter un debounce sur la recherche (300-500ms)
|
||||
- Limiter la longueur minimale de la query (3 caractères recommandé)
|
||||
- Afficher un indicateur de chargement pendant la recherche
|
||||
|
||||
3. **Offline Sync** :
|
||||
- Stocker `last_sync` localement (AsyncStorage, localStorage)
|
||||
- Sync automatique après reconnexion
|
||||
- Gérer les conflits si un message est édité localement et sur le serveur
|
||||
|
||||
---
|
||||
|
||||
## 🎨 Impact sur l'UI
|
||||
|
||||
### History Pagination
|
||||
|
||||
**Scroll infini vers le haut** :
|
||||
```typescript
|
||||
const [messages, setMessages] = useState<Message[]>([]);
|
||||
const [hasMore, setHasMore] = useState(true);
|
||||
|
||||
const loadMore = async () => {
|
||||
if (!hasMore) return;
|
||||
|
||||
const oldestMessage = messages[0];
|
||||
const before = oldestMessage?.created_at;
|
||||
|
||||
fetchHistory(conversationId, before).then((chunk) => {
|
||||
setMessages([...chunk.messages, ...messages]);
|
||||
setHasMore(chunk.has_more_before);
|
||||
});
|
||||
};
|
||||
```
|
||||
|
||||
### Message Search
|
||||
|
||||
**Barre de recherche avec résultats** :
|
||||
```typescript
|
||||
const [searchQuery, setSearchQuery] = useState("");
|
||||
const [searchResults, setSearchResults] = useState<Message[]>([]);
|
||||
|
||||
const handleSearch = debounce((query: string) => {
|
||||
if (query.length < 3) return;
|
||||
|
||||
searchMessages(conversationId, query).then((results) => {
|
||||
setSearchResults(results.messages);
|
||||
});
|
||||
}, 300);
|
||||
```
|
||||
|
||||
### Offline Sync
|
||||
|
||||
**Indicateur de synchronisation** :
|
||||
```typescript
|
||||
const [isSyncing, setIsSyncing] = useState(false);
|
||||
|
||||
const sync = async () => {
|
||||
setIsSyncing(true);
|
||||
const lastSync = await getLastSync(conversationId);
|
||||
syncMessages(conversationId, lastSync);
|
||||
// setIsSyncing(false) dans le handler SyncChunk
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📊 Performance
|
||||
|
||||
### Index utilisés
|
||||
|
||||
- `idx_messages_conv_created_at` : Pagination efficace
|
||||
- `idx_messages_content_trgm` : Recherche textuelle rapide
|
||||
- `idx_messages_conv_created_sync` : Sync optimisée
|
||||
|
||||
### Métriques attendues
|
||||
|
||||
- **History Pagination** : < 50ms pour 50 messages
|
||||
- **Message Search** : < 100ms pour 1000 messages
|
||||
- **Offline Sync** : < 200ms pour 100 messages
|
||||
|
||||
---
|
||||
|
||||
## 🔐 Sécurité
|
||||
|
||||
- ✅ Toutes les fonctionnalités vérifient les permissions via `PermissionService`
|
||||
- ✅ Les messages supprimés sont exclus par défaut (sauf si `include_deleted = true`)
|
||||
- ✅ Validation des paramètres (query non vide, limit max, etc.)
|
||||
- ✅ Pas d'injection SQL (utilisation de paramètres liés)
|
||||
|
||||
---
|
||||
|
||||
## 📝 Migration
|
||||
|
||||
Pour activer ces fonctionnalités, exécuter :
|
||||
|
||||
```bash
|
||||
psql -d veza_db -f migrations/006_history_search_sync.sql
|
||||
```
|
||||
|
||||
Cette migration crée tous les index nécessaires.
|
||||
|
||||
---
|
||||
|
||||
**Fin du document**
|
||||
|
||||
|
|
@ -1,444 +0,0 @@
|
|||
# Documentation : Édition et Suppression de Messages
|
||||
|
||||
**Date de création** : 2025-12-05
|
||||
**Version** : 1.0.0
|
||||
**Statut** : ✅ Implémenté
|
||||
|
||||
## Vue d'ensemble
|
||||
|
||||
Ce document décrit l'implémentation complète de l'édition et de la suppression (soft delete) de messages dans le serveur de chat Veza. Ces fonctionnalités sont essentielles pour un système de chat moderne et respectent les meilleures pratiques de sécurité, permissions et cohérence temps réel.
|
||||
|
||||
## Table des matières
|
||||
|
||||
1. [Architecture](#architecture)
|
||||
2. [Événements WebSocket](#événements-websocket)
|
||||
3. [Permissions](#permissions)
|
||||
4. [Base de données](#base-de-données)
|
||||
5. [Services](#services)
|
||||
6. [Exemples d'utilisation](#exemples-dutilisation)
|
||||
7. [Conséquences UX](#conséquences-ux)
|
||||
8. [Impact sur la recherche et pagination](#impact-sur-la-recherche-et-pagination)
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
### Composants principaux
|
||||
|
||||
1. **Migration SQL** (`migrations/005_message_edit_delete.sql`)
|
||||
- Ajoute `deleted_at` pour la traçabilité
|
||||
- Index pour les requêtes de nettoyage
|
||||
|
||||
2. **PermissionService** (`src/security/permission.rs`)
|
||||
- `can_edit_message()` : Vérifie les permissions d'édition
|
||||
- `can_delete_message()` : Vérifie les permissions de suppression
|
||||
|
||||
3. **MessageEditService** (`src/services/message_edit_service.rs`)
|
||||
- `edit_message()` : Édite un message avec validation
|
||||
- `delete_message()` : Supprime un message (soft delete)
|
||||
|
||||
4. **MessageRepository** (`src/repository/message_repository.rs`)
|
||||
- `update()` : Met à jour le contenu d'un message
|
||||
- `delete()` : Marque un message comme supprimé
|
||||
- `get_by_id_including_deleted()` : Récupère même les messages supprimés
|
||||
|
||||
5. **WebSocket Handlers** (`src/websocket/handler.rs`)
|
||||
- Gère les événements `EditMessage` et `DeleteMessage`
|
||||
- Broadcast les événements `MessageEdited` et `MessageDeleted`
|
||||
|
||||
---
|
||||
|
||||
## Événements WebSocket
|
||||
|
||||
### Inbound Events (Client → Serveur)
|
||||
|
||||
#### EditMessage
|
||||
|
||||
Édite un message existant.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "EditMessage",
|
||||
"message_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"conversation_id": "660e8400-e29b-41d4-a716-446655440000",
|
||||
"new_content": "Nouveau contenu du message"
|
||||
}
|
||||
```
|
||||
|
||||
**Règles de validation** :
|
||||
- `new_content` doit être différent du contenu précédent
|
||||
- `new_content` ne peut pas être vide (après trim)
|
||||
- `new_content` ne peut pas dépasser 4000 caractères
|
||||
- Le message ne doit pas être supprimé
|
||||
- L'utilisateur doit avoir les permissions d'édition
|
||||
|
||||
#### DeleteMessage
|
||||
|
||||
Supprime un message (soft delete).
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "DeleteMessage",
|
||||
"message_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"conversation_id": "660e8400-e29b-41d4-a716-446655440000"
|
||||
}
|
||||
```
|
||||
|
||||
**Règles de validation** :
|
||||
- L'utilisateur doit avoir les permissions de suppression
|
||||
- L'opération est idempotente (supprimer un message déjà supprimé retourne OK)
|
||||
|
||||
### Outbound Events (Serveur → Client)
|
||||
|
||||
#### MessageEdited
|
||||
|
||||
Notifie tous les clients d'une conversation qu'un message a été édité.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "MessageEdited",
|
||||
"message_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"conversation_id": "660e8400-e29b-41d4-a716-446655440000",
|
||||
"editor_id": "770e8400-e29b-41d4-a716-446655440000",
|
||||
"edited_at": "2025-12-05T10:30:00Z",
|
||||
"new_content": "Nouveau contenu du message"
|
||||
}
|
||||
```
|
||||
|
||||
#### MessageDeleted
|
||||
|
||||
Notifie tous les clients d'une conversation qu'un message a été supprimé.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "MessageDeleted",
|
||||
"message_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"conversation_id": "660e8400-e29b-41d4-a716-446655440000",
|
||||
"deleter_id": "770e8400-e29b-41d4-a716-446655440000",
|
||||
"deleted_at": "2025-12-05T10:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Permissions
|
||||
|
||||
### Règles d'édition
|
||||
|
||||
Un utilisateur peut éditer un message si :
|
||||
|
||||
1. **Il est l'auteur du message** : L'auteur peut toujours éditer son propre message
|
||||
2. **Il est admin/modérateur de la conversation** : Les admins et modérateurs peuvent éditer n'importe quel message dans leur conversation
|
||||
3. **Le message n'est pas supprimé** : Un message supprimé ne peut jamais être édité
|
||||
|
||||
### Règles de suppression
|
||||
|
||||
Un utilisateur peut supprimer un message si :
|
||||
|
||||
1. **Il est l'auteur du message** : L'auteur peut toujours supprimer son propre message
|
||||
2. **Il est admin/modérateur de la conversation** : Les admins et modérateurs peuvent supprimer n'importe quel message dans leur conversation
|
||||
|
||||
### Limitations de temps
|
||||
|
||||
Actuellement, il n'y a pas de limitation de temps pour l'édition ou la suppression. Un message peut être édité ou supprimé à tout moment tant que les permissions sont respectées.
|
||||
|
||||
**Note** : Pour une implémentation future, on pourrait ajouter :
|
||||
- Fenêtre d'édition limitée (ex: 15 minutes après l'envoi)
|
||||
- Fenêtre de suppression limitée (ex: 5 minutes après l'envoi)
|
||||
|
||||
---
|
||||
|
||||
## Base de données
|
||||
|
||||
### Schéma
|
||||
|
||||
La table `messages` contient les colonnes suivantes pour l'édition et la suppression :
|
||||
|
||||
```sql
|
||||
CREATE TABLE messages (
|
||||
id UUID PRIMARY KEY,
|
||||
conversation_id UUID NOT NULL,
|
||||
sender_id UUID NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
-- ... autres colonnes ...
|
||||
is_edited BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
edited_at TIMESTAMPTZ,
|
||||
deleted_at TIMESTAMPTZ,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
```
|
||||
|
||||
### Migration
|
||||
|
||||
La migration `005_message_edit_delete.sql` ajoute :
|
||||
- `deleted_at` : Timestamp de suppression (pour la traçabilité)
|
||||
- Index sur `deleted_at` pour les requêtes de nettoyage
|
||||
- Index sur `edited_at` pour les requêtes de recherche
|
||||
|
||||
### Soft Delete
|
||||
|
||||
Les messages ne sont **jamais supprimés physiquement** de la base de données. Au lieu de cela :
|
||||
- `is_deleted` est mis à `true`
|
||||
- `deleted_at` est mis à `NOW()`
|
||||
- Le contenu reste dans la base de données (pour audit futur)
|
||||
|
||||
**Note** : Pour une implémentation future, on pourrait :
|
||||
- Créer une table `message_archive` pour stocker les messages supprimés
|
||||
- Vider le contenu du message après suppression (mettre `content` à `NULL` ou `""`)
|
||||
|
||||
---
|
||||
|
||||
## Services
|
||||
|
||||
### MessageEditService
|
||||
|
||||
Service centralisé pour l'édition et la suppression de messages.
|
||||
|
||||
#### `edit_message(user_id, message_id, new_content) -> Result<Message>`
|
||||
|
||||
Édite un message avec validation complète.
|
||||
|
||||
**Validation** :
|
||||
1. Contenu non vide (après trim)
|
||||
2. Longueur maximale (4000 caractères)
|
||||
3. Contenu différent de l'original
|
||||
4. Message non supprimé
|
||||
5. Permissions d'édition
|
||||
|
||||
**Mise à jour DB** :
|
||||
- `content` = nouveau contenu
|
||||
- `is_edited` = `true`
|
||||
- `edited_at` = `NOW()`
|
||||
- `updated_at` = `NOW()`
|
||||
|
||||
#### `delete_message(user_id, message_id) -> Result<Message>`
|
||||
|
||||
Supprime un message (soft delete).
|
||||
|
||||
**Validation** :
|
||||
1. Permissions de suppression
|
||||
|
||||
**Mise à jour DB** :
|
||||
- `is_deleted` = `true`
|
||||
- `deleted_at` = `NOW()`
|
||||
- `updated_at` = `NOW()`
|
||||
|
||||
**Idempotence** : Si le message est déjà supprimé, retourne le message tel quel sans erreur.
|
||||
|
||||
---
|
||||
|
||||
## Exemples d'utilisation
|
||||
|
||||
### Édition d'un message
|
||||
|
||||
**Client** :
|
||||
```json
|
||||
{
|
||||
"type": "EditMessage",
|
||||
"message_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"conversation_id": "660e8400-e29b-41d4-a716-446655440000",
|
||||
"new_content": "Correction : Nouveau contenu"
|
||||
}
|
||||
```
|
||||
|
||||
**Réponse (confirmation)** :
|
||||
```json
|
||||
{
|
||||
"type": "ActionConfirmed",
|
||||
"action": "message_edited",
|
||||
"success": true
|
||||
}
|
||||
```
|
||||
|
||||
**Broadcast (tous les clients de la conversation)** :
|
||||
```json
|
||||
{
|
||||
"type": "MessageEdited",
|
||||
"message_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"conversation_id": "660e8400-e29b-41d4-a716-446655440000",
|
||||
"editor_id": "770e8400-e29b-41d4-a716-446655440000",
|
||||
"edited_at": "2025-12-05T10:30:00Z",
|
||||
"new_content": "Correction : Nouveau contenu"
|
||||
}
|
||||
```
|
||||
|
||||
### Suppression d'un message
|
||||
|
||||
**Client** :
|
||||
```json
|
||||
{
|
||||
"type": "DeleteMessage",
|
||||
"message_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"conversation_id": "660e8400-e29b-41d4-a716-446655440000"
|
||||
}
|
||||
```
|
||||
|
||||
**Réponse (confirmation)** :
|
||||
```json
|
||||
{
|
||||
"type": "ActionConfirmed",
|
||||
"action": "message_deleted",
|
||||
"success": true
|
||||
}
|
||||
```
|
||||
|
||||
**Broadcast (tous les clients de la conversation)** :
|
||||
```json
|
||||
{
|
||||
"type": "MessageDeleted",
|
||||
"message_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"conversation_id": "660e8400-e29b-41d4-a716-446655440000",
|
||||
"deleter_id": "770e8400-e29b-41d4-a716-446655440000",
|
||||
"deleted_at": "2025-12-05T10:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Gestion des erreurs
|
||||
|
||||
**Permission refusée** :
|
||||
```json
|
||||
{
|
||||
"type": "Error",
|
||||
"message": "Permissions insuffisantes pour edit_message dans la conversation 660e8400-e29b-41d4-a716-446655440000"
|
||||
}
|
||||
```
|
||||
|
||||
**Message introuvable** :
|
||||
```json
|
||||
{
|
||||
"type": "Error",
|
||||
"message": "Message 550e8400-e29b-41d4-a716-446655440000 introuvable"
|
||||
}
|
||||
```
|
||||
|
||||
**Message supprimé (tentative d'édition)** :
|
||||
```json
|
||||
{
|
||||
"type": "Error",
|
||||
"message": "Un message supprimé ne peut pas être édité"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Conséquences UX
|
||||
|
||||
### Affichage des messages édités
|
||||
|
||||
Lorsqu'un message est édité, l'interface utilisateur doit :
|
||||
|
||||
1. **Afficher le nouveau contenu** : Remplacer l'ancien contenu par le nouveau
|
||||
2. **Indicateur visuel** : Afficher un indicateur "Édité" (ex: "✏️ Édité")
|
||||
3. **Timestamp d'édition** : Optionnellement afficher `edited_at` au survol
|
||||
4. **Historique** : Pour une implémentation future, on pourrait afficher l'historique des éditions
|
||||
|
||||
**Exemple d'affichage** :
|
||||
```
|
||||
[Utilisateur] Message original ✏️ Édité
|
||||
```
|
||||
|
||||
### Affichage des messages supprimés
|
||||
|
||||
Lorsqu'un message est supprimé, l'interface utilisateur doit :
|
||||
|
||||
1. **Placeholder** : Afficher un placeholder comme "Message supprimé" ou "Ce message a été supprimé"
|
||||
2. **Style visuel** : Utiliser un style atténué (gris, italique)
|
||||
3. **Informations limitées** : Ne pas afficher le contenu original
|
||||
4. **Timestamp** : Optionnellement afficher `deleted_at`
|
||||
|
||||
**Exemple d'affichage** :
|
||||
```
|
||||
[Utilisateur] Ce message a été supprimé
|
||||
```
|
||||
|
||||
### Cohérence multi-device
|
||||
|
||||
Les événements WebSocket garantissent que :
|
||||
- Tous les clients connectés à la conversation reçoivent les mises à jour en temps réel
|
||||
- Les modifications sont synchronisées instantanément
|
||||
- Pas besoin de rafraîchir la page
|
||||
|
||||
---
|
||||
|
||||
## Impact sur la recherche et pagination
|
||||
|
||||
### Recherche
|
||||
|
||||
Les messages supprimés sont **exclus** des résultats de recherche par défaut.
|
||||
|
||||
**Requête SQL** :
|
||||
```sql
|
||||
SELECT * FROM messages
|
||||
WHERE conversation_id = $1
|
||||
AND is_deleted = false
|
||||
AND content ILIKE $2
|
||||
ORDER BY created_at DESC;
|
||||
```
|
||||
|
||||
**Note** : Pour une implémentation future, on pourrait :
|
||||
- Permettre aux admins de rechercher dans les messages supprimés
|
||||
- Créer une vue `messages_active` qui exclut automatiquement les messages supprimés
|
||||
|
||||
### Pagination
|
||||
|
||||
Les messages supprimés sont **exclus** de la pagination par défaut.
|
||||
|
||||
**Requête SQL** :
|
||||
```sql
|
||||
SELECT * FROM messages
|
||||
WHERE conversation_id = $1
|
||||
AND is_deleted = false
|
||||
ORDER BY created_at DESC
|
||||
LIMIT $2 OFFSET $3;
|
||||
```
|
||||
|
||||
**Placeholder dans la liste** : Si un message est supprimé pendant qu'un utilisateur consulte l'historique, il peut être remplacé par un placeholder dans la liste.
|
||||
|
||||
### Impact sur les métriques
|
||||
|
||||
- Les messages supprimés ne sont pas comptés dans les statistiques de messages
|
||||
- Les messages édités sont comptés comme des messages normaux (pas de double comptage)
|
||||
|
||||
---
|
||||
|
||||
## Tests
|
||||
|
||||
Les tests sont disponibles dans `tests/chat_edit_delete.rs` :
|
||||
|
||||
- ✅ Édition par l'auteur
|
||||
- ✅ Édition interdite pour un non-auteur
|
||||
- ✅ Édition interdite pour un message supprimé
|
||||
- ✅ Édition avec contenu identique interdite
|
||||
- ✅ Édition avec contenu vide interdite
|
||||
- ✅ Suppression par l'auteur
|
||||
- ✅ Suppression par un admin
|
||||
- ✅ Suppression interdite pour un non-auteur
|
||||
- ✅ Suppression idempotente
|
||||
- ✅ Validation de la longueur maximale
|
||||
|
||||
**Note** : Les tests nécessitent une base de données de test et sont marqués avec `#[ignore]`.
|
||||
|
||||
---
|
||||
|
||||
## Améliorations futures
|
||||
|
||||
1. **Limitation de temps** : Fenêtre d'édition/suppression limitée
|
||||
2. **Historique d'édition** : Stocker l'historique des modifications
|
||||
3. **Archive de messages** : Table séparée pour les messages supprimés
|
||||
4. **Raison de suppression** : Champ optionnel pour la raison de suppression (modération)
|
||||
5. **Recherche dans les supprimés** : Permettre aux admins de rechercher dans les messages supprimés
|
||||
6. **Notifications** : Notifier l'auteur lorsqu'un admin supprime son message
|
||||
|
||||
---
|
||||
|
||||
## Références
|
||||
|
||||
- Migration : `migrations/005_message_edit_delete.sql`
|
||||
- Service : `src/services/message_edit_service.rs`
|
||||
- Permissions : `src/security/permission.rs`
|
||||
- Repository : `src/repository/message_repository.rs`
|
||||
- WebSocket : `src/websocket/handler.rs`
|
||||
- Tests : `tests/chat_edit_delete.rs`
|
||||
|
||||
|
|
@ -1,241 +0,0 @@
|
|||
# 🎯 CHAT SERVER — ZERO PANIC CLEANUP
|
||||
|
||||
**Date** : 2025-01-27
|
||||
**Objectif** : Éliminer tous les `unwrap()` / `expect()` déclenchables par des inputs extérieurs
|
||||
**Status** : 🔄 En cours
|
||||
|
||||
---
|
||||
|
||||
## 📊 RÉSUMÉ EXÉCUTIF
|
||||
|
||||
| Catégorie | 🔴 Critique | 🟠 Moyen | 🟢 Acceptable | Total |
|
||||
|-----------|-------------|----------|---------------|-------|
|
||||
| **Config & Init** | 2 | 1 | 0 | 3 |
|
||||
| **DB** | 0 | 0 | 0 | 0 |
|
||||
| **JWT & Auth** | 2 | 0 | 0 | 2 |
|
||||
| **WebSocket & Handlers** | 0 | 0 | 0 | 0 |
|
||||
| **Managers** | 3 | 0 | 0 | 3 |
|
||||
| **Security/Regex** | 0 | 0 | 70+ | 70+ |
|
||||
| **Tests** | 0 | 0 | 30+ | 30+ |
|
||||
| **TOTAL** | **7** | **1** | **100+** | **108+** |
|
||||
|
||||
---
|
||||
|
||||
## 🔴 CRITIQUE — À CORRIGER IMMÉDIATEMENT
|
||||
|
||||
### 1. Config & Init
|
||||
|
||||
#### `main.rs:127` — Prometheus recorder
|
||||
```rust
|
||||
let prometheus_handle = builder
|
||||
.install_recorder()
|
||||
.expect("failed to install Prometheus recorder");
|
||||
```
|
||||
- **Risque** : 🔴 Peut échouer si Prometheus est mal configuré
|
||||
- **Impact** : Crash au démarrage
|
||||
- **Solution** : Retourner `ChatError::Configuration` et loguer l'erreur
|
||||
|
||||
#### `main.rs:148` — Database pool required
|
||||
```rust
|
||||
let pool_ref = database_pool.as_ref().expect("Database pool is required");
|
||||
```
|
||||
- **Risque** : 🔴 Crash si DB pool n'est pas initialisé (même si c'est optionnel)
|
||||
- **Impact** : Crash au démarrage si DB down
|
||||
- **Solution** : Vérifier `if let Some(pool) = database_pool.as_ref()` et retourner erreur appropriée
|
||||
|
||||
#### `main.rs:326` — EventBus unwrap
|
||||
```rust
|
||||
if state.event_bus.is_none() || !state.event_bus.as_ref().unwrap().is_enabled {
|
||||
```
|
||||
- **Risque** : 🔴 Panic si `event_bus` est `None` après le check
|
||||
- **Impact** : Panic dans readiness check
|
||||
- **Solution** : Utiliser `if let Some(ref bus) = state.event_bus`
|
||||
|
||||
### 2. JWT & Auth
|
||||
|
||||
#### `jwt_manager.rs:516,529,535,545,553,565,577,589,592,598` — Tests avec unwrap
|
||||
- **Risque** : 🔴 Tests qui peuvent panic
|
||||
- **Impact** : Tests instables
|
||||
- **Solution** : Utiliser `?` et propager les erreurs dans les tests
|
||||
|
||||
#### `auth.rs:312-313` — SystemTime duration_since
|
||||
```rust
|
||||
exp: (SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs()) + 3600,
|
||||
iat: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
```
|
||||
- **Risque** : 🔴 Panic si l'horloge système est réglée en arrière (rare mais possible)
|
||||
- **Impact** : Panic lors de la création de tokens de test
|
||||
- **Solution** : Utiliser `chrono::Utc::now()` ou gérer l'erreur explicitement
|
||||
|
||||
### 3. Managers
|
||||
|
||||
#### `authentication.rs:177` — Session get unwrap
|
||||
```rust
|
||||
Ok(self.sessions.get(&user_id).unwrap())
|
||||
```
|
||||
- **Risque** : 🔴 Panic si la session n'existe pas après insertion (race condition)
|
||||
- **Impact** : Panic lors de la création de session
|
||||
- **Solution** : Utiliser `ok_or_else` avec `ChatError::Internal`
|
||||
|
||||
#### `core/advanced_rate_limiter.rs:378,457` — Bucket get_mut unwrap
|
||||
```rust
|
||||
let bucket = ip_limiter.buckets.get_mut(limit_type).unwrap();
|
||||
let bucket = user_limiter.buckets.get_mut(limit_type).unwrap();
|
||||
```
|
||||
- **Risque** : 🔴 Panic si le `limit_type` n'existe pas dans la HashMap
|
||||
- **Impact** : Panic lors du rate limiting
|
||||
- **Solution** : Utiliser `get_or_insert_with` ou vérifier l'existence
|
||||
|
||||
#### `security_legacy.rs:409` — User actions get_mut unwrap
|
||||
```rust
|
||||
let actions = self.user_actions.get_mut(&key).unwrap();
|
||||
```
|
||||
- **Risque** : 🔴 Panic si la clé n'existe pas
|
||||
- **Impact** : Panic lors de la gestion des actions utilisateur
|
||||
- **Solution** : Utiliser `entry().or_insert_with()` ou vérifier l'existence
|
||||
|
||||
---
|
||||
|
||||
## 🟠 MOYEN — À CORRIGER
|
||||
|
||||
### 1. Config & Init
|
||||
|
||||
#### `lib.rs:42` — Unwrap dans lib
|
||||
- **Risque** : 🟠 Peut échouer selon le contexte
|
||||
- **Impact** : Crash au démarrage
|
||||
- **Solution** : Retourner une erreur appropriée
|
||||
|
||||
---
|
||||
|
||||
## 🟢 ACCEPTABLE — Regex patterns (statiques)
|
||||
|
||||
### `security_legacy.rs:37-101` — 70+ Regex::new().unwrap()
|
||||
|
||||
Ces `unwrap()` sont **acceptables** car :
|
||||
- Les patterns sont **statiques** et compilés au démarrage
|
||||
- Ils ne peuvent pas échouer sauf si le code est mal écrit (bug interne)
|
||||
- Ils sont dans un contexte d'initialisation de sécurité
|
||||
|
||||
**Recommandation** : Documenter explicitement pourquoi ils sont sûrs, ou utiliser `lazy_static` avec `once_cell::sync::Lazy` pour une meilleure gestion.
|
||||
|
||||
---
|
||||
|
||||
## 🟢 ACCEPTABLE — Tests
|
||||
|
||||
### Tests avec `unwrap()` / `expect()`
|
||||
|
||||
Les tests dans :
|
||||
- `jwt_manager.rs` (tests)
|
||||
- `config.rs` (tests)
|
||||
- `delivered_status.rs` (tests)
|
||||
- `read_receipts.rs` (tests)
|
||||
- `repository/tests.rs` (tests)
|
||||
- `security/csrf.rs` (tests)
|
||||
- `rate_limiter.rs` (tests)
|
||||
- `message_store.rs` (tests)
|
||||
- `core/rich_messages.rs` (tests)
|
||||
- `chat_management.rs` (tests)
|
||||
- `services/room_service.rs` (tests commentés)
|
||||
- `services/message_edit_service.rs` (tests commentés)
|
||||
|
||||
**Recommandation** : Les `unwrap()` dans les tests sont généralement acceptables, mais on peut améliorer en utilisant `?` pour propager les erreurs de manière plus propre.
|
||||
|
||||
---
|
||||
|
||||
## 📋 PLAN D'ACTION
|
||||
|
||||
### Phase 1 : Cartographie ✅
|
||||
- [x] Identifier tous les `unwrap()` / `expect()`
|
||||
- [x] Classer par catégorie et gravité
|
||||
- [x] Documenter dans ce fichier
|
||||
|
||||
### Phase 2 : Design d'erreurs
|
||||
- [x] Vérifier que `ChatError` existe et est complet
|
||||
- [ ] Ajouter helpers manquants si nécessaire
|
||||
|
||||
### Phase 3 : Remplacement systématique ✅
|
||||
- [x] Corriger `main.rs:127` (Prometheus) - Retourne `ChatError::Configuration`
|
||||
- [x] Corriger `main.rs:148` (DB pool) - Utilise `ok_or_else` avec `ChatError`
|
||||
- [x] Corriger `main.rs:326` (EventBus) - Utilise `if let Some(ref event_bus)`
|
||||
- [x] Corriger `auth.rs:312-313` (SystemTime) - Documenté avec expect justifié
|
||||
- [x] Corriger `authentication.rs:177` (Session) - Utilise `ok_or_else` avec `ChatError`
|
||||
- [x] Corriger `core/advanced_rate_limiter.rs:378,457` (Buckets) - Utilise `ok_or_else` avec `ChatError`
|
||||
- [x] Corriger `security_legacy.rs:409` (User actions) - Utilise `ok_or_else` avec `ChatError`
|
||||
|
||||
### Phase 4 : Panic Boundaries ✅
|
||||
- [x] Documentation ajoutée pour `handle_socket` - Toutes les erreurs gérées explicitement
|
||||
- [x] Documentation ajoutée pour les tasks `tokio::spawn` - Tokio capture automatiquement les panics
|
||||
- [x] Supervision documentée pour le typing monitor task - Toutes les erreurs gérées explicitement
|
||||
|
||||
### Phase 5 : Tests anti-panic ✅
|
||||
- [x] Créer `tests/panic_safety_tests.rs`
|
||||
- [x] Tests pour JWT invalides
|
||||
- [x] Tests pour UUID invalides
|
||||
- [x] Tests pour JSON malformé
|
||||
- [x] Tests pour messages WebSocket invalides
|
||||
- [x] Tests de résilience générale
|
||||
|
||||
### Phase 6 : Documentation finale ✅
|
||||
- [x] Mettre à jour ce fichier avec les corrections
|
||||
- [ ] Mettre à jour `TRIAGE.md`
|
||||
- [x] Documenter les invariants restants
|
||||
|
||||
---
|
||||
|
||||
## 📝 NOTES
|
||||
|
||||
### Invariants documentés (🟢 Acceptables)
|
||||
|
||||
1. **Regex patterns statiques** (`security_legacy.rs`) : Patterns compilés au démarrage, ne peuvent pas échouer sauf bug interne.
|
||||
2. **Tests** : Les `unwrap()` dans les tests sont généralement acceptables pour simplifier le code de test.
|
||||
|
||||
### Changements structurants
|
||||
|
||||
- ✅ `ChatError` existe déjà et est complet
|
||||
- ✅ Type `Result<T> = std::result::Result<T, ChatError>` déjà défini
|
||||
- ⏳ Panic boundaries à ajouter
|
||||
- ⏳ Supervision des tasks à améliorer
|
||||
|
||||
---
|
||||
|
||||
## ✅ CRITÈRES DE FIN
|
||||
|
||||
- [x] Tous les 🔴 critiques corrigés
|
||||
- [x] Tous les 🟠 moyens corrigés (1 seul, dans lib.rs:42 - test, acceptable)
|
||||
- [x] Panic boundaries documentées (tokio gère automatiquement, toutes erreurs explicites)
|
||||
- [x] Tasks supervisées (toutes erreurs gérées explicitement)
|
||||
- [x] Tests anti-panic créés
|
||||
- [x] Documentation à jour
|
||||
|
||||
## 📝 RÉSUMÉ DES CORRECTIONS
|
||||
|
||||
### Corrections appliquées
|
||||
|
||||
1. **main.rs:127** - Prometheus recorder : `expect()` → `map_err()` avec `ChatError::Configuration`
|
||||
2. **main.rs:148** - DB pool : `expect()` → `ok_or_else()` avec `ChatError::Configuration`
|
||||
3. **main.rs:326** - EventBus unwrap : `unwrap()` → `if let Some(ref event_bus)`
|
||||
4. **authentication.rs:177** - Session get : `unwrap()` → `ok_or_else()` avec `ChatError::Internal`
|
||||
5. **core/advanced_rate_limiter.rs:378,457** - Buckets get_mut : `unwrap()` → `ok_or_else()` avec `ChatError::Internal`
|
||||
6. **security_legacy.rs:409** - User actions get_mut : `unwrap()` → `ok_or_else()` avec `ChatError::Internal`
|
||||
7. **auth.rs:312-313** - SystemTime : Documenté avec `expect()` justifié (très rare, bug système)
|
||||
|
||||
### Approche des panic boundaries
|
||||
|
||||
Au lieu d'utiliser `catch_unwind()` (qui ne fonctionne pas bien avec les types async contenant de la mutabilité intérieure), nous avons :
|
||||
|
||||
1. **Géré toutes les erreurs explicitement** : Tous les `unwrap()`/`expect()` déclenchables par des inputs extérieurs ont été remplacés par une gestion d'erreurs explicite avec `ChatError`.
|
||||
|
||||
2. **Documenté la supervision** : Tokio capture automatiquement les panics dans les tasks `tokio::spawn`, mais nous nous assurons que toutes les erreurs sont gérées explicitement pour éviter les panics en premier lieu.
|
||||
|
||||
3. **Handler WebSocket** : Toutes les erreurs sont gérées avec `?` ou `match`, aucune panic possible sur des inputs malformés.
|
||||
|
||||
### Tests créés
|
||||
|
||||
- `tests/panic_safety_tests.rs` : Tests pour JWT invalides, UUID invalides, JSON malformé, messages WebSocket invalides, et résilience générale.
|
||||
|
||||
### Invariants documentés (🟢 Acceptables)
|
||||
|
||||
1. **Regex patterns statiques** (`security_legacy.rs`) : Patterns compilés au démarrage, ne peuvent pas échouer sauf bug interne.
|
||||
2. **Tests** : Les `unwrap()` dans les tests sont généralement acceptables pour simplifier le code de test.
|
||||
3. **SystemTime::duration_since** (`auth.rs`) : Très rare (bug système), documenté avec `expect()` justifié.
|
||||
|
||||
|
|
@ -1,328 +0,0 @@
|
|||
# Système de Permissions du Chat Server
|
||||
|
||||
## Vue d'ensemble
|
||||
|
||||
Le système de permissions du chat server Veza fournit un contrôle d'accès granulaire pour les conversations, avec support des rôles (admin, moderator, member) et vérifications centralisées.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Module `security/permission.rs`
|
||||
|
||||
Le module `PermissionService` centralise toutes les vérifications de permissions :
|
||||
|
||||
```rust
|
||||
pub struct PermissionService {
|
||||
pool: PgPool,
|
||||
}
|
||||
```
|
||||
|
||||
### Fonctions principales
|
||||
|
||||
#### `user_in_conversation(user_id, conversation_id) -> Result<bool>`
|
||||
|
||||
Vérifie si un utilisateur est membre d'une conversation.
|
||||
|
||||
**Retourne** : `true` si membre, `false` sinon.
|
||||
|
||||
#### `user_role_in_conversation(user_id, conversation_id) -> Result<Role>`
|
||||
|
||||
Récupère le rôle d'un utilisateur dans une conversation spécifique.
|
||||
|
||||
**Retourne** : Le rôle (`Admin`, `Moderator`, `User`, `SuperAdmin`) ou une erreur si non membre.
|
||||
|
||||
#### `user_global_role(user_id) -> Result<Role>`
|
||||
|
||||
Récupère le rôle global d'un utilisateur depuis la table `users`.
|
||||
|
||||
**Retourne** : Le rôle global, ou `User` par défaut.
|
||||
|
||||
#### `can_send_message(user_id, conversation_id) -> Result<()>`
|
||||
|
||||
Vérifie si un utilisateur peut envoyer un message dans une conversation.
|
||||
|
||||
**Règles** :
|
||||
- Les membres peuvent envoyer des messages
|
||||
- Les admins globaux peuvent envoyer des messages même sans être membres
|
||||
- Les non-membres (non-admin) sont refusés
|
||||
|
||||
#### `can_read_conversation(user_id, conversation_id) -> Result<()>`
|
||||
|
||||
Vérifie si un utilisateur peut lire une conversation.
|
||||
|
||||
**Règles** :
|
||||
- Les membres peuvent lire
|
||||
- Les admins globaux peuvent lire même sans être membres
|
||||
- Les non-membres (non-admin) sont refusés
|
||||
|
||||
#### `can_mark_read(user_id, conversation_id) -> Result<()>`
|
||||
|
||||
Vérifie si un utilisateur peut marquer un message comme lu.
|
||||
|
||||
**Règles** : Identiques à `can_read_conversation`.
|
||||
|
||||
#### `can_join_conversation(user_id, conversation_id) -> Result<()>`
|
||||
|
||||
Vérifie si un utilisateur peut rejoindre une conversation.
|
||||
|
||||
**Règles** :
|
||||
- Les conversations publiques peuvent être rejointes par tous
|
||||
- Les conversations privées nécessitent d'être membre ou admin global
|
||||
|
||||
## Rôles et Permissions
|
||||
|
||||
### Rôles disponibles
|
||||
|
||||
| Rôle | Description |
|
||||
|------|-------------|
|
||||
| `User` | Utilisateur standard |
|
||||
| `Moderator` | Modérateur avec permissions étendues |
|
||||
| `Admin` | Administrateur avec tous les pouvoirs |
|
||||
| `SuperAdmin` | Super administrateur |
|
||||
|
||||
### Matrice des permissions
|
||||
|
||||
| Action | User | Moderator | Admin | SuperAdmin |
|
||||
|--------|------|-----------|-------|------------|
|
||||
| Envoyer message (membre) | ✅ | ✅ | ✅ | ✅ |
|
||||
| Envoyer message (non-membre) | ❌ | ❌ | ✅ | ✅ |
|
||||
| Lire conversation (membre) | ✅ | ✅ | ✅ | ✅ |
|
||||
| Lire conversation (non-membre) | ❌ | ❌ | ✅ | ✅ |
|
||||
| Marquer comme lu | ✅ | ✅ | ✅ | ✅ |
|
||||
| Rejoindre conversation publique | ✅ | ✅ | ✅ | ✅ |
|
||||
| Rejoindre conversation privée | ❌* | ❌* | ✅ | ✅ |
|
||||
|
||||
\* Nécessite d'être membre de la conversation
|
||||
|
||||
## Intégration dans les Handlers
|
||||
|
||||
### WebSocket Handler (`websocket/handler.rs`)
|
||||
|
||||
Tous les handlers WebSocket vérifient les permissions avant d'exécuter les actions :
|
||||
|
||||
#### `SendMessage`
|
||||
|
||||
```rust
|
||||
// Vérifier les permissions avant d'envoyer le message
|
||||
state
|
||||
.permission_service
|
||||
.can_send_message(sender_uuid, conversation_id)
|
||||
.await?;
|
||||
```
|
||||
|
||||
#### `JoinConversation`
|
||||
|
||||
```rust
|
||||
// Vérifier les permissions avant de rejoindre
|
||||
state
|
||||
.permission_service
|
||||
.can_join_conversation(user_uuid, conversation_id)
|
||||
.await?;
|
||||
```
|
||||
|
||||
#### `MarkAsRead`
|
||||
|
||||
```rust
|
||||
// Vérifier les permissions pour marquer comme lu
|
||||
state
|
||||
.permission_service
|
||||
.can_mark_read(user_uuid, conversation_id)
|
||||
.await?;
|
||||
```
|
||||
|
||||
### Message Handler (`message_handler.rs`)
|
||||
|
||||
Les handlers de messages vérifient également les permissions :
|
||||
|
||||
#### `handle_room_message`
|
||||
|
||||
Vérifie `can_send_message` avant d'envoyer un message dans un salon.
|
||||
|
||||
#### `handle_direct_message`
|
||||
|
||||
Vérifie `can_send_message` avant d'envoyer un message direct.
|
||||
|
||||
#### `handle_room_history`
|
||||
|
||||
Vérifie `can_read_conversation` via `can_read_room_history`.
|
||||
|
||||
#### `handle_dm_history`
|
||||
|
||||
Vérifie `can_read_conversation` via `can_read_dm_conversation`.
|
||||
|
||||
## Schéma de Base de Données
|
||||
|
||||
### Table `conversation_members`
|
||||
|
||||
```sql
|
||||
CREATE TABLE conversation_members (
|
||||
conversation_id UUID REFERENCES conversations(id) ON DELETE CASCADE,
|
||||
user_id UUID REFERENCES users(id) ON DELETE CASCADE,
|
||||
role VARCHAR(50) NOT NULL DEFAULT 'user',
|
||||
joined_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
PRIMARY KEY (conversation_id, user_id)
|
||||
);
|
||||
```
|
||||
|
||||
**Colonne `role`** : Peut être `'user'`, `'moderator'`, `'admin'`, ou `'superadmin'`.
|
||||
|
||||
### Table `users`
|
||||
|
||||
```sql
|
||||
CREATE TABLE users (
|
||||
id UUID PRIMARY KEY,
|
||||
username VARCHAR(50) UNIQUE NOT NULL,
|
||||
email VARCHAR(255) UNIQUE NOT NULL,
|
||||
role VARCHAR(20) DEFAULT 'user', -- Rôle global
|
||||
...
|
||||
);
|
||||
```
|
||||
|
||||
**Colonne `role`** : Rôle global de l'utilisateur dans le système.
|
||||
|
||||
## Gestion des Erreurs
|
||||
|
||||
### Types d'erreurs
|
||||
|
||||
#### `PermissionError::NotMember`
|
||||
|
||||
L'utilisateur n'est pas membre de la conversation.
|
||||
|
||||
**Code HTTP** : 403 Forbidden
|
||||
|
||||
#### `PermissionError::InsufficientPermissions`
|
||||
|
||||
L'utilisateur n'a pas les permissions suffisantes pour l'action.
|
||||
|
||||
**Code HTTP** : 403 Forbidden
|
||||
|
||||
#### `PermissionError::InvalidRole`
|
||||
|
||||
Le rôle spécifié est invalide.
|
||||
|
||||
**Code HTTP** : 500 Internal Server Error
|
||||
|
||||
### Logging
|
||||
|
||||
Toutes les violations de permissions sont loggées avec `tracing::warn!` :
|
||||
|
||||
```rust
|
||||
warn!(
|
||||
user_id = %user_id,
|
||||
conversation_id = %conversation_id,
|
||||
error = %e,
|
||||
"Permission refusée pour l'envoi de message"
|
||||
);
|
||||
```
|
||||
|
||||
## Messages WebSocket d'Erreur
|
||||
|
||||
Lorsqu'une permission est refusée, le client reçoit un message d'erreur :
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "error",
|
||||
"message": "Permission refusée: Utilisateur non membre de la conversation",
|
||||
"code": "permission_denied"
|
||||
}
|
||||
```
|
||||
|
||||
## JWT Manager
|
||||
|
||||
Le `JwtManager` a été mis à jour pour récupérer les informations utilisateur depuis la base de données lors du refresh token :
|
||||
|
||||
```rust
|
||||
// Récupérer username et role depuis la DB
|
||||
let user_info: Option<(String, Option<String>)> = sqlx::query_as(
|
||||
r#"
|
||||
SELECT username, role FROM users
|
||||
WHERE id = $1
|
||||
"#,
|
||||
)
|
||||
.bind(user_uuid)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
```
|
||||
|
||||
**Fallback** : Si l'utilisateur n'est pas trouvé ou si le pool DB n'est pas disponible, utilise `"user"` / `"user"` par défaut (avec warning).
|
||||
|
||||
## Tests
|
||||
|
||||
Les tests sont disponibles dans `tests/test_permissions.rs` :
|
||||
|
||||
- `test_can_send_message_non_member` : Vérifie qu'un non-membre ne peut pas envoyer
|
||||
- `test_can_send_message_member` : Vérifie qu'un membre peut envoyer
|
||||
- `test_can_send_message_admin_global` : Vérifie qu'un admin global peut envoyer sans être membre
|
||||
- `test_can_read_conversation_non_member` : Vérifie qu'un non-membre ne peut pas lire
|
||||
- `test_can_read_conversation_member` : Vérifie qu'un membre peut lire
|
||||
- `test_user_in_conversation` : Vérifie la fonction `user_in_conversation`
|
||||
- `test_user_role_in_conversation` : Vérifie la fonction `user_role_in_conversation`
|
||||
- `test_integration_send_message_with_permissions` : Test d'intégration complet
|
||||
|
||||
**Note** : Les tests nécessitent une base de données de test et sont marqués avec `#[ignore]`.
|
||||
|
||||
## Exemples d'utilisation
|
||||
|
||||
### Vérifier les permissions avant d'envoyer un message
|
||||
|
||||
```rust
|
||||
use chat_server::security::permission::PermissionService;
|
||||
|
||||
let permission_service = PermissionService::new(pool);
|
||||
|
||||
// Vérifier avant d'envoyer
|
||||
permission_service
|
||||
.can_send_message(user_id, conversation_id)
|
||||
.await?;
|
||||
|
||||
// Envoyer le message...
|
||||
```
|
||||
|
||||
### Vérifier les permissions avant de lire
|
||||
|
||||
```rust
|
||||
// Vérifier avant de lire
|
||||
permission_service
|
||||
.can_read_conversation(user_id, conversation_id)
|
||||
.await?;
|
||||
|
||||
// Récupérer les messages...
|
||||
```
|
||||
|
||||
### Récupérer le rôle d'un utilisateur
|
||||
|
||||
```rust
|
||||
// Rôle dans une conversation spécifique
|
||||
let role = permission_service
|
||||
.user_role_in_conversation(user_id, conversation_id)
|
||||
.await?;
|
||||
|
||||
// Rôle global
|
||||
let global_role = permission_service
|
||||
.user_global_role(user_id)
|
||||
.await?;
|
||||
```
|
||||
|
||||
## Sécurité
|
||||
|
||||
### Bonnes pratiques
|
||||
|
||||
1. **Toujours vérifier les permissions** avant d'exécuter une action
|
||||
2. **Logger les violations** pour audit et monitoring
|
||||
3. **Ne jamais faire confiance au client** : toutes les vérifications sont côté serveur
|
||||
4. **Utiliser le service centralisé** : ne pas dupliquer la logique de vérification
|
||||
5. **Gérer les erreurs gracieusement** : envoyer des messages d'erreur clairs au client
|
||||
|
||||
### Points d'attention
|
||||
|
||||
- Les admins globaux peuvent contourner certaines restrictions (par design)
|
||||
- Les conversations privées nécessitent une vérification explicite d'appartenance
|
||||
- Le rôle dans `conversation_members` peut différer du rôle global dans `users`
|
||||
|
||||
## Évolution future
|
||||
|
||||
- Support de permissions custom par conversation
|
||||
- Permissions granulaires (edit, delete, pin, etc.)
|
||||
- Système de rôles hiérarchiques
|
||||
- Permissions temporaires (time-based)
|
||||
- Audit trail des changements de permissions
|
||||
|
||||
|
|
@ -1,352 +0,0 @@
|
|||
# Système de Read Receipts - Veza Chat Server
|
||||
|
||||
## Vue d'ensemble
|
||||
|
||||
Le système de read receipts permet de tracker quels messages ont été lus par quels utilisateurs dans une conversation. Cette fonctionnalité est essentielle pour fournir un feedback visuel aux utilisateurs (indicateurs "lu" / "non lu") et améliorer l'expérience utilisateur.
|
||||
|
||||
**Statut** : ✅ **Opérationnel** (implémenté et testé)
|
||||
|
||||
**Date d'implémentation** : 2025-12-05
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
### Composants principaux
|
||||
|
||||
1. **Table de base de données** : `read_receipts`
|
||||
2. **Manager** : `ReadReceiptManager` (`src/read_receipts.rs`)
|
||||
3. **Handler WebSocket** : Intégration dans `src/websocket/handler.rs`
|
||||
4. **Messages WebSocket** : `MarkAsRead` (inbound) et `MessageRead` (outbound)
|
||||
|
||||
### Schéma de base de données
|
||||
|
||||
La table `read_receipts` est créée par la migration `003_read_receipts.sql` :
|
||||
|
||||
```sql
|
||||
CREATE TABLE read_receipts (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
message_id UUID NOT NULL REFERENCES messages(id) ON DELETE CASCADE,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
conversation_id UUID NOT NULL REFERENCES conversations(id) ON DELETE CASCADE,
|
||||
read_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(message_id, user_id)
|
||||
);
|
||||
```
|
||||
|
||||
**Index** :
|
||||
- `idx_read_receipts_message_id` : Recherche par message
|
||||
- `idx_read_receipts_user_id` : Recherche par utilisateur
|
||||
- `idx_read_receipts_conversation_id` : Recherche par conversation
|
||||
- `idx_read_receipts_conversation_user` : Requêtes fréquentes (dernière lecture)
|
||||
|
||||
---
|
||||
|
||||
## Contrat WebSocket
|
||||
|
||||
### Message Inbound : `MarkAsRead`
|
||||
|
||||
Envoyé par le client pour marquer un message comme lu.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "MarkAsRead",
|
||||
"conversation_id": "uuid-de-la-conversation",
|
||||
"message_id": "uuid-du-message"
|
||||
}
|
||||
```
|
||||
|
||||
**Validation côté serveur** :
|
||||
1. Le message existe et appartient à la conversation indiquée
|
||||
2. L'utilisateur est membre de la conversation
|
||||
3. Le JWT est valide (vérifié automatiquement par le handler)
|
||||
|
||||
**Réponses possibles** :
|
||||
- ✅ `ActionConfirmed` : Le message a été marqué comme lu
|
||||
- ❌ `Error` : Erreur de validation ou de permission
|
||||
|
||||
### Message Outbound : `MessageRead`
|
||||
|
||||
Envoyé à tous les participants de la conversation lorsqu'un message est marqué comme lu.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "MessageRead",
|
||||
"message_id": "uuid-du-message",
|
||||
"user_id": "uuid-de-l-utilisateur-qui-a-lu",
|
||||
"conversation_id": "uuid-de-la-conversation",
|
||||
"read_at": "2025-12-05T10:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Broadcast** : Ce message est automatiquement diffusé à tous les clients connectés à la conversation (sauf l'utilisateur qui a initié l'action, qui reçoit `ActionConfirmed`).
|
||||
|
||||
---
|
||||
|
||||
## Comportement serveur
|
||||
|
||||
### Flux de traitement
|
||||
|
||||
1. **Réception** : Le client envoie `MarkAsRead` via WebSocket
|
||||
2. **Validation** :
|
||||
- Vérification de l'existence du message
|
||||
- Vérification de l'appartenance du message à la conversation
|
||||
- Vérification de l'appartenance de l'utilisateur à la conversation
|
||||
3. **Persistance** :
|
||||
- Création d'un nouveau read receipt si inexistant
|
||||
- Mise à jour du timestamp `read_at` si le read receipt existe déjà
|
||||
4. **Notification** :
|
||||
- Broadcast de `MessageRead` à tous les participants
|
||||
- Envoi de `ActionConfirmed` au client initiateur
|
||||
|
||||
### Gestion des erreurs
|
||||
|
||||
| Erreur | Code | Comportement |
|
||||
|--------|------|--------------|
|
||||
| Message inexistant | `not_found` | Retourne une erreur au client |
|
||||
| Message n'appartient pas à la conversation | `validation_error` | Retourne une erreur au client |
|
||||
| Utilisateur non membre | `unauthorized` | Retourne une erreur au client |
|
||||
| Erreur DB | `internal_error` | Log l'erreur, retourne une erreur générique au client |
|
||||
|
||||
### Logs structurés
|
||||
|
||||
Les événements suivants sont loggés avec `tracing` :
|
||||
- ✅ Message marqué comme lu (info)
|
||||
- ✅ Read receipt créé (info)
|
||||
- ✅ Read receipt mis à jour (debug)
|
||||
- ❌ Erreurs de validation/permission (error)
|
||||
|
||||
---
|
||||
|
||||
## API du ReadReceiptManager
|
||||
|
||||
### Méthodes principales
|
||||
|
||||
#### `mark_as_read(user_id, message_id, conversation_id)`
|
||||
|
||||
Marque un message comme lu par un utilisateur.
|
||||
|
||||
**Retourne** : `ReadReceipt` (créé ou mis à jour)
|
||||
|
||||
#### `get_receipt(message_id, user_id)`
|
||||
|
||||
Récupère le read receipt pour un message et un utilisateur spécifiques.
|
||||
|
||||
**Retourne** : `Option<ReadReceipt>`
|
||||
|
||||
#### `get_receipts_for_message(message_id)`
|
||||
|
||||
Récupère tous les read receipts pour un message (tous les utilisateurs qui l'ont lu).
|
||||
|
||||
**Retourne** : `Vec<ReadReceipt>`
|
||||
|
||||
#### `get_message_status(message_id, user_id)`
|
||||
|
||||
Récupère le statut de lecture d'un message pour un utilisateur.
|
||||
|
||||
**Retourne** : `MessageReadStatus` (`Sent`, `Delivered`, ou `Read`)
|
||||
|
||||
#### `is_user_in_conversation(user_id, conversation_id)`
|
||||
|
||||
Vérifie si un utilisateur est membre d'une conversation.
|
||||
|
||||
**Retourne** : `bool`
|
||||
|
||||
#### `get_last_read_message(conversation_id, user_id)`
|
||||
|
||||
Récupère l'ID du dernier message lu par un utilisateur dans une conversation.
|
||||
|
||||
**Retourne** : `Option<Uuid>`
|
||||
|
||||
#### `get_unread_count(conversation_id, user_id, last_read_message_id)`
|
||||
|
||||
Calcule le nombre de messages non lus pour un utilisateur dans une conversation.
|
||||
|
||||
**Retourne** : `i64`
|
||||
|
||||
---
|
||||
|
||||
## Prérequis
|
||||
|
||||
### Base de données
|
||||
|
||||
1. **Migration** : Exécuter `migrations/003_read_receipts.sql`
|
||||
2. **Extensions PostgreSQL** : `uuid-ossp` (déjà requis par les migrations précédentes)
|
||||
|
||||
### Configuration
|
||||
|
||||
Aucune configuration spécifique requise. Le système utilise le pool de connexions PostgreSQL déjà configuré.
|
||||
|
||||
---
|
||||
|
||||
## Tests
|
||||
|
||||
### Tests unitaires
|
||||
|
||||
Les tests unitaires sont dans `src/read_receipts.rs` (module `tests`).
|
||||
|
||||
**Exécution** :
|
||||
```bash
|
||||
cd veza-chat-server
|
||||
cargo test --lib read_receipts -- --ignored
|
||||
```
|
||||
|
||||
**Tests disponibles** :
|
||||
- `test_mark_as_read_creates_receipt` : Vérifie la création d'un read receipt
|
||||
- `test_mark_as_read_updates_existing` : Vérifie la mise à jour d'un read receipt existant
|
||||
- `test_get_receipt` : Vérifie la récupération d'un read receipt
|
||||
- `test_get_message_status` : Vérifie le statut de lecture
|
||||
- `test_get_receipts_for_message` : Vérifie la récupération de tous les read receipts d'un message
|
||||
|
||||
### Tests d'intégration
|
||||
|
||||
Le test d'intégration est dans `tests/integration_test.rs` : `test_read_receipts_websocket`.
|
||||
|
||||
**Exécution** :
|
||||
```bash
|
||||
cd veza-chat-server
|
||||
# 1. Démarrer le serveur : cargo run
|
||||
# 2. Dans un autre terminal :
|
||||
cargo test --test integration_test test_read_receipts_websocket -- --ignored
|
||||
```
|
||||
|
||||
**Prérequis** :
|
||||
- Serveur chat-server en cours d'exécution
|
||||
- Base de données avec migrations appliquées
|
||||
- Variable d'environnement `DATABASE_URL` configurée
|
||||
|
||||
---
|
||||
|
||||
## Exemples d'utilisation
|
||||
|
||||
### Côté client (WebSocket)
|
||||
|
||||
```javascript
|
||||
// Marquer un message comme lu
|
||||
const markAsRead = {
|
||||
type: "MarkAsRead",
|
||||
conversation_id: "conversation-uuid",
|
||||
message_id: "message-uuid"
|
||||
};
|
||||
|
||||
websocket.send(JSON.stringify(markAsRead));
|
||||
|
||||
// Écouter les notifications de lecture
|
||||
websocket.onmessage = (event) => {
|
||||
const message = JSON.parse(event.data);
|
||||
|
||||
if (message.type === "MessageRead") {
|
||||
console.log(`Message ${message.message_id} lu par ${message.user_id}`);
|
||||
// Mettre à jour l'UI pour afficher l'indicateur "lu"
|
||||
}
|
||||
|
||||
if (message.type === "ActionConfirmed" && message.action === "marked_as_read") {
|
||||
console.log("Message marqué comme lu avec succès");
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Côté serveur (Rust)
|
||||
|
||||
```rust
|
||||
use chat_server::read_receipts::ReadReceiptManager;
|
||||
|
||||
// Dans votre handler
|
||||
let manager = ReadReceiptManager::new(pool);
|
||||
|
||||
// Marquer un message comme lu
|
||||
let receipt = manager
|
||||
.mark_as_read(user_id, message_id, conversation_id)
|
||||
.await?;
|
||||
|
||||
// Vérifier le statut
|
||||
let status = manager
|
||||
.get_message_status(message_id, user_id)
|
||||
.await?;
|
||||
|
||||
match status {
|
||||
MessageReadStatus::Read => println!("Message lu"),
|
||||
MessageReadStatus::Sent => println!("Message envoyé"),
|
||||
MessageReadStatus::Delivered => println!("Message livré"),
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Limitations et améliorations futures
|
||||
|
||||
### Limitations actuelles
|
||||
|
||||
1. **Statut "Delivered"** : Le système ne track pas encore le statut "livré" (message reçu mais pas encore lu). Actuellement, un message est soit `Sent` soit `Read`.
|
||||
|
||||
2. **Batch operations** : La méthode `mark_multiple_as_read` existe mais n'est pas encore exposée via WebSocket.
|
||||
|
||||
### Améliorations possibles
|
||||
|
||||
1. **Support "Delivered"** : Implémenter un système de tracking "delivered" (message reçu par le client mais pas encore ouvert).
|
||||
|
||||
2. **API REST** : Exposer une API REST pour :
|
||||
- Récupérer les read receipts d'un message
|
||||
- Récupérer le nombre de messages non lus
|
||||
- Marquer plusieurs messages comme lus en une requête
|
||||
|
||||
3. **Optimisations** :
|
||||
- Cache des read receipts fréquemment consultés
|
||||
- Batch processing pour les marquages multiples
|
||||
|
||||
4. **Métriques** : Ajouter des métriques Prometheus pour :
|
||||
- Nombre de read receipts créés par seconde
|
||||
- Temps moyen entre l'envoi et la lecture d'un message
|
||||
- Taux de lecture par conversation
|
||||
|
||||
---
|
||||
|
||||
## Migration depuis l'ancien système
|
||||
|
||||
Si vous migrez depuis un système utilisant `i64` pour les IDs :
|
||||
|
||||
1. **Exécuter la migration** : `migrations/003_read_receipts.sql`
|
||||
2. **Migrer les données existantes** (si applicable) :
|
||||
```sql
|
||||
-- Exemple de migration de données (à adapter selon votre schéma)
|
||||
INSERT INTO read_receipts (message_id, user_id, conversation_id, read_at)
|
||||
SELECT
|
||||
message_id::uuid,
|
||||
user_id::uuid,
|
||||
conversation_id::uuid,
|
||||
read_at
|
||||
FROM old_read_receipts;
|
||||
```
|
||||
3. **Mettre à jour le code client** : S'assurer que les clients utilisent des UUID au lieu d'entiers
|
||||
|
||||
---
|
||||
|
||||
## Support et maintenance
|
||||
|
||||
### Logs à surveiller
|
||||
|
||||
- Erreurs de validation/permission lors du marquage comme lu
|
||||
- Erreurs de base de données lors de la création/mise à jour de read receipts
|
||||
- Temps de réponse élevés pour les requêtes de read receipts
|
||||
|
||||
### Monitoring recommandé
|
||||
|
||||
- Nombre de read receipts créés par minute
|
||||
- Taux d'erreur lors du marquage comme lu
|
||||
- Temps de réponse des requêtes `get_receipts_for_message`
|
||||
|
||||
---
|
||||
|
||||
## Références
|
||||
|
||||
- **Migration** : `migrations/003_read_receipts.sql`
|
||||
- **Code source** : `src/read_receipts.rs`
|
||||
- **Handler WebSocket** : `src/websocket/handler.rs`
|
||||
- **Types WebSocket** : `src/websocket/mod.rs`
|
||||
|
||||
---
|
||||
|
||||
**Dernière mise à jour** : 2025-12-05
|
||||
|
||||
|
|
@ -1,132 +0,0 @@
|
|||
# =================================================================
|
||||
# CONFIGURATION SERVEUR VEZA CHAT
|
||||
# =================================================================
|
||||
|
||||
# Environnement (development, staging, production)
|
||||
RUST_ENV=development
|
||||
RUST_LOG=debug
|
||||
|
||||
# =================================================================
|
||||
# BASE DE DONNÉES
|
||||
# =================================================================
|
||||
DATABASE_URL=postgresql://veza_user:veza_password@localhost:5432/veza_chat
|
||||
DB_MAX_CONNECTIONS=10
|
||||
DB_CONNECT_TIMEOUT=10
|
||||
DB_AUTO_MIGRATE=true
|
||||
|
||||
# =================================================================
|
||||
# SÉCURITÉ ET AUTHENTIFICATION
|
||||
# =================================================================
|
||||
JWT_SECRET=your-super-secret-jwt-key-change-this-in-production
|
||||
JWT_ACCESS_DURATION=15m
|
||||
JWT_REFRESH_DURATION=7d
|
||||
JWT_ALGORITHM=HS256
|
||||
JWT_AUDIENCE=veza-chat
|
||||
JWT_ISSUER=veza-chat-server
|
||||
|
||||
# Authentification 2FA
|
||||
ENABLE_2FA=false
|
||||
TOTP_WINDOW=30
|
||||
|
||||
# Sécurité des mots de passe
|
||||
PASSWORD_MIN_LENGTH=8
|
||||
BCRYPT_COST=12
|
||||
|
||||
# =================================================================
|
||||
# SERVEUR ET RÉSEAU
|
||||
# =================================================================
|
||||
SERVER_BIND_ADDR=127.0.0.1:8080
|
||||
SERVER_WORKERS=0
|
||||
CONNECTION_TIMEOUT=30
|
||||
HEARTBEAT_INTERVAL=30
|
||||
SHUTDOWN_TIMEOUT=10
|
||||
|
||||
# =================================================================
|
||||
# CACHE REDIS (OPTIONNEL)
|
||||
# =================================================================
|
||||
REDIS_URL=redis://localhost:6379
|
||||
REDIS_POOL_SIZE=10
|
||||
REDIS_CONNECT_TIMEOUT=5
|
||||
REDIS_DEFAULT_TTL=3600
|
||||
REDIS_KEY_PREFIX=veza_chat:
|
||||
REDIS_ENABLED=true
|
||||
|
||||
# =================================================================
|
||||
# LIMITES ET QUOTAS
|
||||
# =================================================================
|
||||
MAX_MESSAGE_LENGTH=2000
|
||||
MAX_CONNECTIONS_PER_USER=5
|
||||
MAX_MESSAGES_PER_MINUTE=60
|
||||
MAX_FILE_SIZE=10485760
|
||||
MAX_FILES_PER_USER=100
|
||||
MAX_ROOMS_PER_USER=50
|
||||
MAX_MEMBERS_PER_ROOM=1000
|
||||
|
||||
# =================================================================
|
||||
# FONCTIONNALITÉS
|
||||
# =================================================================
|
||||
ENABLE_FILE_UPLOADS=true
|
||||
ENABLE_MESSAGE_REACTIONS=true
|
||||
ENABLE_USER_MENTIONS=true
|
||||
ENABLE_PINNED_MESSAGES=true
|
||||
ENABLE_MESSAGE_THREADS=true
|
||||
ENABLE_WEBHOOKS=true
|
||||
ENABLE_PUSH_NOTIFICATIONS=false
|
||||
ENABLE_MESSAGE_HISTORY=true
|
||||
|
||||
# =================================================================
|
||||
# LOGGING
|
||||
# =================================================================
|
||||
LOG_LEVEL=info
|
||||
LOG_FORMAT=json
|
||||
LOG_FILE=logs/chat-server.log
|
||||
LOG_ROTATION_SIZE=100MB
|
||||
LOG_ROTATION_FILES=10
|
||||
LOG_COMPRESSION=true
|
||||
|
||||
# =================================================================
|
||||
# INTÉGRATIONS EXTERNES
|
||||
# =================================================================
|
||||
|
||||
# Email (optionnel)
|
||||
SMTP_HOST=smtp.gmail.com
|
||||
SMTP_PORT=587
|
||||
SMTP_USERNAME=your-email@gmail.com
|
||||
SMTP_PASSWORD=your-app-password
|
||||
EMAIL_FROM_ADDRESS=noreply@veza-chat.com
|
||||
EMAIL_FROM_NAME=Veza Chat
|
||||
|
||||
# Prometheus métriques (optionnel)
|
||||
PROMETHEUS_BIND_ADDR=127.0.0.1:9090
|
||||
PROMETHEUS_PATH=/metrics
|
||||
|
||||
# Webhooks sortants (optionnel)
|
||||
WEBHOOK_USER_EVENTS=https://api.yourapp.com/webhooks/user-events
|
||||
WEBHOOK_MESSAGE_EVENTS=https://api.yourapp.com/webhooks/message-events
|
||||
WEBHOOK_SECRET=your-webhook-secret
|
||||
|
||||
# =================================================================
|
||||
# DÉVELOPPEMENT
|
||||
# =================================================================
|
||||
|
||||
# Base de données de test
|
||||
TEST_DATABASE_URL=postgresql://veza_test:veza_test@localhost:5432/veza_chat_test
|
||||
|
||||
# Debug et développement
|
||||
ENABLE_DEBUG_ENDPOINTS=true
|
||||
ENABLE_CORS=true
|
||||
CORS_ALLOWED_ORIGINS=http://localhost:3000,http://localhost:3001
|
||||
|
||||
# =================================================================
|
||||
# PRODUCTION
|
||||
# =================================================================
|
||||
|
||||
# SSL/TLS (production uniquement)
|
||||
ENABLE_TLS=false
|
||||
TLS_CERT_PATH=/path/to/cert.pem
|
||||
TLS_KEY_PATH=/path/to/key.pem
|
||||
|
||||
# Performance (production)
|
||||
ENABLE_COMPRESSION=true
|
||||
ENABLE_METRICS=true
|
||||
ENABLE_HEALTH_CHECK=true
|
||||
|
|
@ -1,116 +0,0 @@
|
|||
-- Migration: Structure de base de données simplifiée pour chat server
|
||||
-- Création: 2025-07-26
|
||||
-- Version: 1.0.0 Production Ready
|
||||
|
||||
-- Extensions requises
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||
|
||||
-- ================================================================
|
||||
-- TABLE UTILISATEURS
|
||||
-- ================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
username VARCHAR(50) UNIQUE NOT NULL,
|
||||
email VARCHAR(255) UNIQUE NOT NULL,
|
||||
display_name VARCHAR(100),
|
||||
avatar_url TEXT,
|
||||
is_active BOOLEAN NOT NULL DEFAULT true,
|
||||
last_seen TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- ================================================================
|
||||
-- TABLE CONVERSATIONS
|
||||
-- ================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS conversations (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name VARCHAR(255) NOT NULL,
|
||||
description TEXT,
|
||||
conversation_type VARCHAR(50) NOT NULL DEFAULT 'direct',
|
||||
is_private BOOLEAN NOT NULL DEFAULT false,
|
||||
created_by UUID REFERENCES users(id) ON DELETE CASCADE,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- ================================================================
|
||||
-- TABLE MEMBRES DES CONVERSATIONS
|
||||
-- ================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS conversation_members (
|
||||
conversation_id UUID REFERENCES conversations(id) ON DELETE CASCADE,
|
||||
user_id UUID REFERENCES users(id) ON DELETE CASCADE,
|
||||
role VARCHAR(50) NOT NULL DEFAULT 'user',
|
||||
joined_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
PRIMARY KEY (conversation_id, user_id)
|
||||
);
|
||||
|
||||
-- ================================================================
|
||||
-- TABLE MESSAGES
|
||||
-- ================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS messages (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
conversation_id UUID REFERENCES conversations(id) ON DELETE CASCADE,
|
||||
sender_id UUID REFERENCES users(id) ON DELETE CASCADE,
|
||||
content TEXT NOT NULL,
|
||||
message_type VARCHAR(50) NOT NULL DEFAULT 'text',
|
||||
parent_message_id UUID REFERENCES messages(id) ON DELETE CASCADE,
|
||||
is_pinned BOOLEAN NOT NULL DEFAULT false,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT false,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
status VARCHAR(50) NOT NULL DEFAULT 'sent'
|
||||
);
|
||||
|
||||
-- ================================================================
|
||||
-- INDEX POUR PERFORMANCE
|
||||
-- ================================================================
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conversation_id ON messages(conversation_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_sender_id ON messages(sender_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_created_at ON messages(created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_conversation_members_conversation_id ON conversation_members(conversation_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_conversation_members_user_id ON conversation_members(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_username ON users(username);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
|
||||
|
||||
-- ================================================================
|
||||
-- TRIGGERS POUR MISE À JOUR AUTOMATIQUE
|
||||
-- ================================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_users_updated_at BEFORE UPDATE ON users
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
CREATE TRIGGER update_conversations_updated_at BEFORE UPDATE ON conversations
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
CREATE TRIGGER update_messages_updated_at BEFORE UPDATE ON messages
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- ================================================================
|
||||
-- DONNÉES DE TEST
|
||||
-- ================================================================
|
||||
|
||||
-- Insérer un utilisateur de test
|
||||
INSERT INTO users (username, email, display_name)
|
||||
VALUES ('test_user', 'test@veza.com', 'Test User')
|
||||
ON CONFLICT (username) DO NOTHING;
|
||||
|
||||
-- Insérer une conversation de test
|
||||
INSERT INTO conversations (name, description, conversation_type, created_by)
|
||||
SELECT 'Test Room', 'Room de test pour Veza', 'group', id
|
||||
FROM users WHERE username = 'test_user'
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
|
@ -1,223 +0,0 @@
|
|||
-- Migration pour les fonctionnalités avancées du serveur de chat
|
||||
-- Exécuter après les migrations de base
|
||||
|
||||
-- Table des sanctions/modération
|
||||
CREATE TABLE IF NOT EXISTS sanctions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
moderator_id UUID NOT NULL REFERENCES users(id), -- NULL pour système automatique
|
||||
sanction_type VARCHAR(50) NOT NULL, -- JSON serialized SanctionType
|
||||
reason VARCHAR(100) NOT NULL, -- JSON serialized SanctionReason
|
||||
message TEXT, -- Message optionnel du modérateur
|
||||
expires_at TIMESTAMP WITH TIME ZONE, -- Expiration pour sanctions temporaires
|
||||
is_active BOOLEAN NOT NULL DEFAULT true,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Index pour les sanctions
|
||||
CREATE INDEX idx_sanctions_user_id ON sanctions(user_id);
|
||||
CREATE INDEX idx_sanctions_active ON sanctions(user_id, is_active) WHERE is_active = true;
|
||||
|
||||
-- Table des réactions aux messages
|
||||
CREATE TABLE IF NOT EXISTS message_reactions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
message_id UUID NOT NULL REFERENCES messages(id) ON DELETE CASCADE,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
reaction_type VARCHAR(100) NOT NULL, -- JSON serialized ReactionType
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
-- Un utilisateur ne peut avoir qu'une réaction de chaque type par message
|
||||
UNIQUE(message_id, user_id, reaction_type)
|
||||
);
|
||||
|
||||
-- Index pour les réactions
|
||||
CREATE INDEX idx_message_reactions_message ON message_reactions(message_id);
|
||||
|
||||
-- Table des blocages entre utilisateurs
|
||||
CREATE TABLE IF NOT EXISTS user_blocks (
|
||||
id SERIAL PRIMARY KEY,
|
||||
blocker_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
blocked_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
reason VARCHAR(255),
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
-- Un utilisateur ne peut bloquer un autre qu'une seule fois
|
||||
UNIQUE(blocker_id, blocked_id),
|
||||
|
||||
-- Un utilisateur ne peut pas se bloquer lui-même
|
||||
CHECK (blocker_id != blocked_id)
|
||||
);
|
||||
|
||||
-- Index pour les blocages
|
||||
CREATE INDEX idx_user_blocks_blocker ON user_blocks(blocker_id);
|
||||
CREATE INDEX idx_user_blocks_blocked ON user_blocks(blocked_id);
|
||||
|
||||
-- Table des salons avec métadonnées
|
||||
CREATE TABLE IF NOT EXISTS rooms (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(50) NOT NULL UNIQUE,
|
||||
display_name VARCHAR(100),
|
||||
description TEXT,
|
||||
creator_id UUID NOT NULL REFERENCES users(id),
|
||||
is_private BOOLEAN NOT NULL DEFAULT false,
|
||||
max_members INTEGER DEFAULT 100,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Index pour les salons
|
||||
CREATE INDEX idx_rooms_name ON rooms(name);
|
||||
CREATE INDEX idx_rooms_creator ON rooms(creator_id);
|
||||
CREATE INDEX idx_rooms_private ON rooms(is_private);
|
||||
|
||||
-- Table des membres de salons avec rôles
|
||||
CREATE TABLE IF NOT EXISTS room_members (
|
||||
id SERIAL PRIMARY KEY,
|
||||
room_id INTEGER NOT NULL REFERENCES rooms(id) ON DELETE CASCADE,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
role VARCHAR(20) NOT NULL DEFAULT 'member', -- 'admin', 'moderator', 'member'
|
||||
joined_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
last_read_at TIMESTAMP WITH TIME ZONE,
|
||||
|
||||
UNIQUE(room_id, user_id)
|
||||
);
|
||||
|
||||
-- Index pour les membres de salon
|
||||
CREATE INDEX idx_room_members_room ON room_members(room_id);
|
||||
CREATE INDEX idx_room_members_user ON room_members(user_id);
|
||||
CREATE INDEX idx_room_members_role ON room_members(room_id, role);
|
||||
|
||||
-- Table des notifications
|
||||
CREATE TABLE IF NOT EXISTS notifications (
|
||||
id SERIAL PRIMARY KEY,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
type VARCHAR(50) NOT NULL, -- 'dm', 'mention', 'room_invite', etc.
|
||||
title VARCHAR(255) NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
metadata JSONB, -- Données additionnelles spécifiques au type
|
||||
is_read BOOLEAN NOT NULL DEFAULT false,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
read_at TIMESTAMP WITH TIME ZONE
|
||||
);
|
||||
|
||||
-- Index pour les notifications
|
||||
CREATE INDEX idx_notifications_user ON notifications(user_id);
|
||||
CREATE INDEX idx_notifications_unread ON notifications(user_id, is_read) WHERE is_read = false;
|
||||
CREATE INDEX idx_notifications_type ON notifications(type);
|
||||
|
||||
-- Table des sessions utilisateur (pour la gestion des connexions multiples)
|
||||
CREATE TABLE IF NOT EXISTS user_sessions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
session_token VARCHAR(255) NOT NULL UNIQUE,
|
||||
device_info VARCHAR(255), -- User-Agent ou info appareil
|
||||
ip_address INET,
|
||||
last_activity TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
is_active BOOLEAN NOT NULL DEFAULT true
|
||||
);
|
||||
|
||||
-- Index pour les sessions
|
||||
CREATE INDEX idx_user_sessions_user ON user_sessions(user_id);
|
||||
CREATE INDEX idx_user_sessions_token ON user_sessions(session_token);
|
||||
CREATE INDEX idx_user_sessions_active ON user_sessions(user_id, is_active) WHERE is_active = true;
|
||||
CREATE INDEX idx_user_sessions_expires ON user_sessions(expires_at);
|
||||
|
||||
-- Table des logs d'audit pour le monitoring
|
||||
CREATE TABLE IF NOT EXISTS audit_logs (
|
||||
id SERIAL PRIMARY KEY,
|
||||
user_id UUID REFERENCES users(id), -- NULL pour les actions système
|
||||
action VARCHAR(100) NOT NULL, -- 'login', 'message_sent', 'user_banned', etc.
|
||||
resource_type VARCHAR(50), -- 'user', 'message', 'room', etc.
|
||||
resource_id VARCHAR(100), -- ID de la ressource concernée
|
||||
details JSONB, -- Détails spécifiques à l'action
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Index pour les logs d'audit
|
||||
CREATE INDEX idx_audit_logs_user ON audit_logs(user_id);
|
||||
CREATE INDEX idx_audit_logs_action ON audit_logs(action);
|
||||
CREATE INDEX idx_audit_logs_resource ON audit_logs(resource_type, resource_id);
|
||||
CREATE INDEX idx_audit_logs_created ON audit_logs(created_at);
|
||||
|
||||
-- Mise à jour de la table messages pour supporter plus de métadonnées
|
||||
ALTER TABLE messages
|
||||
ADD COLUMN IF NOT EXISTS message_type VARCHAR(20) DEFAULT 'text',
|
||||
ADD COLUMN IF NOT EXISTS reply_to_id UUID REFERENCES messages(id),
|
||||
ADD COLUMN IF NOT EXISTS is_edited BOOLEAN DEFAULT false,
|
||||
ADD COLUMN IF NOT EXISTS edited_at TIMESTAMP WITH TIME ZONE,
|
||||
ADD COLUMN IF NOT EXISTS metadata JSONB;
|
||||
|
||||
-- Index pour les nouvelles colonnes de messages
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_type ON messages(message_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_reply ON messages(reply_to_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_edited ON messages(is_edited) WHERE is_edited = true;
|
||||
|
||||
-- Mise à jour de la table users pour supporter les rôles et statuts
|
||||
ALTER TABLE users
|
||||
ADD COLUMN IF NOT EXISTS role VARCHAR(20) DEFAULT 'user',
|
||||
ADD COLUMN IF NOT EXISTS status VARCHAR(20) DEFAULT 'offline',
|
||||
ADD COLUMN IF NOT EXISTS last_seen TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
ADD COLUMN IF NOT EXISTS reputation_score INTEGER DEFAULT 100,
|
||||
ADD COLUMN IF NOT EXISTS is_banned BOOLEAN DEFAULT false,
|
||||
ADD COLUMN IF NOT EXISTS is_muted BOOLEAN DEFAULT false;
|
||||
|
||||
-- Index pour les nouvelles colonnes utilisateurs
|
||||
CREATE INDEX IF NOT EXISTS idx_users_role ON users(role);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_status ON users(status);
|
||||
|
||||
-- Vue pour les statistiques en temps réel
|
||||
CREATE OR REPLACE VIEW server_stats AS
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM users WHERE last_seen > CURRENT_TIMESTAMP - INTERVAL '5 minutes') as active_users,
|
||||
(SELECT COUNT(*) FROM users) as total_users,
|
||||
(SELECT COUNT(*) FROM messages WHERE created_at > CURRENT_DATE) as messages_today,
|
||||
(SELECT COUNT(*) FROM messages) as total_messages;
|
||||
|
||||
-- Fonction pour nettoyer les sessions expirées
|
||||
CREATE OR REPLACE FUNCTION cleanup_expired_sessions()
|
||||
RETURNS INTEGER AS $$
|
||||
DECLARE
|
||||
deleted_count INTEGER;
|
||||
BEGIN
|
||||
DELETE FROM user_sessions WHERE expires_at < CURRENT_TIMESTAMP;
|
||||
GET DIAGNOSTICS deleted_count = ROW_COUNT;
|
||||
RETURN deleted_count;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Fonction pour nettoyer les anciens logs d'audit (garder 30 jours)
|
||||
CREATE OR REPLACE FUNCTION cleanup_old_audit_logs()
|
||||
RETURNS INTEGER AS $$
|
||||
DECLARE
|
||||
deleted_count INTEGER;
|
||||
BEGIN
|
||||
DELETE FROM audit_logs WHERE created_at < CURRENT_TIMESTAMP - INTERVAL '30 days';
|
||||
GET DIAGNOSTICS deleted_count = ROW_COUNT;
|
||||
RETURN deleted_count;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Contraintes de sécurité
|
||||
ALTER TABLE messages ADD CONSTRAINT chk_message_content_length CHECK (length(content) <= 4000);
|
||||
ALTER TABLE rooms ADD CONSTRAINT chk_room_name_length CHECK (length(name) <= 50 AND length(name) >= 1);
|
||||
ALTER TABLE rooms ADD CONSTRAINT chk_room_max_members CHECK (max_members > 0 AND max_members <= 1000);
|
||||
|
||||
-- Commentaires pour la documentation
|
||||
COMMENT ON TABLE sanctions IS 'Table des sanctions de modération (warnings, mutes, bans)';
|
||||
COMMENT ON TABLE message_reactions IS 'Table des réactions aux messages (like, love, etc.)';
|
||||
COMMENT ON TABLE user_blocks IS 'Table des blocages entre utilisateurs';
|
||||
COMMENT ON TABLE rooms IS 'Table des salons de chat avec métadonnées';
|
||||
COMMENT ON TABLE room_members IS 'Table des membres de salon avec leurs rôles';
|
||||
COMMENT ON TABLE notifications IS 'Table des notifications push/in-app';
|
||||
COMMENT ON TABLE user_sessions IS 'Table des sessions utilisateur actives';
|
||||
COMMENT ON TABLE audit_logs IS 'Table des logs d''audit pour le monitoring';
|
||||
|
||||
COMMENT ON VIEW server_stats IS 'Vue des statistiques serveur en temps réel';
|
||||
|
||||
-- Permissions par défaut (ajuster selon vos besoins)
|
||||
-- GRANT SELECT ON server_stats TO chat_readonly_user;
|
||||
-- GRANT SELECT, INSERT ON audit_logs TO chat_api_user;
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
-- Migration: Table read_receipts pour le système de read receipts
|
||||
-- Création: 2025-12-05
|
||||
-- Version: 1.0.0
|
||||
|
||||
-- ================================================================
|
||||
-- TABLE READ RECEIPTS
|
||||
-- ================================================================
|
||||
|
||||
-- Table pour tracker les read receipts (marquage de messages comme lus)
|
||||
CREATE TABLE IF NOT EXISTS read_receipts (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
message_id UUID NOT NULL REFERENCES messages(id) ON DELETE CASCADE,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
conversation_id UUID NOT NULL REFERENCES conversations(id) ON DELETE CASCADE,
|
||||
read_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Un utilisateur ne peut avoir qu'un seul read receipt par message
|
||||
UNIQUE(message_id, user_id)
|
||||
);
|
||||
|
||||
-- ================================================================
|
||||
-- INDEX POUR PERFORMANCE
|
||||
-- ================================================================
|
||||
|
||||
-- Index pour rechercher les read receipts par message
|
||||
CREATE INDEX IF NOT EXISTS idx_read_receipts_message_id ON read_receipts(message_id);
|
||||
|
||||
-- Index pour rechercher les read receipts par utilisateur
|
||||
CREATE INDEX IF NOT EXISTS idx_read_receipts_user_id ON read_receipts(user_id);
|
||||
|
||||
-- Index pour rechercher les read receipts par conversation
|
||||
CREATE INDEX IF NOT EXISTS idx_read_receipts_conversation_id ON read_receipts(conversation_id);
|
||||
|
||||
-- Index composite pour les requêtes fréquentes (dernière lecture dans une conversation)
|
||||
CREATE INDEX IF NOT EXISTS idx_read_receipts_conversation_user ON read_receipts(conversation_id, user_id, read_at DESC);
|
||||
|
||||
-- Index pour les requêtes de comptage de messages non lus
|
||||
CREATE INDEX IF NOT EXISTS idx_read_receipts_message_user ON read_receipts(message_id, user_id);
|
||||
|
||||
-- ================================================================
|
||||
-- TRIGGERS POUR MISE À JOUR AUTOMATIQUE
|
||||
-- ================================================================
|
||||
|
||||
CREATE TRIGGER update_read_receipts_updated_at BEFORE UPDATE ON read_receipts
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- ================================================================
|
||||
-- COMMENTAIRES POUR DOCUMENTATION
|
||||
-- ================================================================
|
||||
|
||||
COMMENT ON TABLE read_receipts IS 'Table des read receipts pour tracker quels messages ont été lus par quels utilisateurs';
|
||||
COMMENT ON COLUMN read_receipts.message_id IS 'ID du message marqué comme lu';
|
||||
COMMENT ON COLUMN read_receipts.user_id IS 'ID de l''utilisateur qui a lu le message';
|
||||
COMMENT ON COLUMN read_receipts.conversation_id IS 'ID de la conversation (pour optimiser les requêtes)';
|
||||
COMMENT ON COLUMN read_receipts.read_at IS 'Timestamp de la lecture du message';
|
||||
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
-- Migration: Table delivered_status pour le système de delivered status
|
||||
-- Création: 2025-01-27
|
||||
-- Version: 1.0.0
|
||||
|
||||
-- ================================================================
|
||||
-- TABLE DELIVERED STATUS
|
||||
-- ================================================================
|
||||
|
||||
-- Table pour tracker les delivered status (messages reçus mais pas encore lus)
|
||||
CREATE TABLE IF NOT EXISTS delivered_status (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
message_id UUID NOT NULL REFERENCES messages(id) ON DELETE CASCADE,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
conversation_id UUID NOT NULL REFERENCES conversations(id) ON DELETE CASCADE,
|
||||
delivered_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Un utilisateur ne peut avoir qu'un seul delivered status par message
|
||||
UNIQUE(message_id, user_id)
|
||||
);
|
||||
|
||||
-- ================================================================
|
||||
-- INDEX POUR PERFORMANCE
|
||||
-- ================================================================
|
||||
|
||||
-- Index pour rechercher les delivered status par message
|
||||
CREATE INDEX IF NOT EXISTS idx_delivered_status_message_id ON delivered_status(message_id);
|
||||
|
||||
-- Index pour rechercher les delivered status par utilisateur
|
||||
CREATE INDEX IF NOT EXISTS idx_delivered_status_user_id ON delivered_status(user_id);
|
||||
|
||||
-- Index pour rechercher les delivered status par conversation
|
||||
CREATE INDEX IF NOT EXISTS idx_delivered_status_conversation_id ON delivered_status(conversation_id);
|
||||
|
||||
-- Index composite pour les requêtes fréquentes (dernière délivrance dans une conversation)
|
||||
CREATE INDEX IF NOT EXISTS idx_delivered_status_conversation_user ON delivered_status(conversation_id, user_id, delivered_at DESC);
|
||||
|
||||
-- Index pour les requêtes de comptage de messages non délivrés
|
||||
CREATE INDEX IF NOT EXISTS idx_delivered_status_message_user ON delivered_status(message_id, user_id);
|
||||
|
||||
-- ================================================================
|
||||
-- TRIGGERS POUR MISE À JOUR AUTOMATIQUE
|
||||
-- ================================================================
|
||||
|
||||
CREATE TRIGGER update_delivered_status_updated_at BEFORE UPDATE ON delivered_status
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- ================================================================
|
||||
-- COMMENTAIRES POUR DOCUMENTATION
|
||||
-- ================================================================
|
||||
|
||||
COMMENT ON TABLE delivered_status IS 'Table des delivered status pour tracker quels messages ont été délivrés (reçus) par quels utilisateurs';
|
||||
COMMENT ON COLUMN delivered_status.message_id IS 'ID du message délivré';
|
||||
COMMENT ON COLUMN delivered_status.user_id IS 'ID de l''utilisateur qui a reçu le message';
|
||||
COMMENT ON COLUMN delivered_status.conversation_id IS 'ID de la conversation (pour optimiser les requêtes)';
|
||||
COMMENT ON COLUMN delivered_status.delivered_at IS 'Timestamp de la délivrance du message';
|
||||
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
-- Migration: Support pour l'édition et la suppression de messages
|
||||
-- Création: 2025-12-05
|
||||
-- Version: 1.0.0
|
||||
-- Description: Ajoute les colonnes nécessaires pour l'édition et la suppression (soft delete) de messages
|
||||
|
||||
-- Ajouter deleted_at pour la traçabilité (is_deleted existe déjà)
|
||||
ALTER TABLE messages
|
||||
ADD COLUMN IF NOT EXISTS deleted_at TIMESTAMP WITH TIME ZONE;
|
||||
|
||||
-- Index pour les messages supprimés (pour les requêtes de nettoyage)
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_deleted_at ON messages(deleted_at) WHERE deleted_at IS NOT NULL;
|
||||
|
||||
-- Index pour les messages édités (pour les requêtes de recherche)
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_edited_at ON messages(edited_at) WHERE edited_at IS NOT NULL;
|
||||
|
||||
-- Commentaire pour la documentation
|
||||
COMMENT ON COLUMN messages.deleted_at IS 'Timestamp de suppression du message (soft delete)';
|
||||
COMMENT ON COLUMN messages.edited_at IS 'Timestamp de dernière édition du message';
|
||||
COMMENT ON COLUMN messages.is_edited IS 'Indicateur si le message a été édité';
|
||||
COMMENT ON COLUMN messages.is_deleted IS 'Indicateur si le message a été supprimé (soft delete)';
|
||||
|
||||
|
||||
|
|
@ -1,59 +0,0 @@
|
|||
-- Migration: Support pour History Pagination, Message Search, et Offline Sync
|
||||
-- Création: 2025-12-05
|
||||
-- Version: 1.0.0
|
||||
-- Description: Ajoute les index nécessaires pour la pagination, recherche et synchronisation
|
||||
|
||||
-- ================================================================
|
||||
-- INDEX POUR PAGINATION (HISTORY)
|
||||
-- ================================================================
|
||||
|
||||
-- Index composite pour la pagination efficace par conversation et date
|
||||
-- Permet les requêtes ORDER BY created_at avec WHERE conversation_id
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conv_created_at
|
||||
ON messages(conversation_id, created_at DESC);
|
||||
|
||||
-- Index pour les requêtes avec filtre is_deleted (pour exclure les messages supprimés)
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conv_created_not_deleted
|
||||
ON messages(conversation_id, created_at DESC)
|
||||
WHERE is_deleted = false;
|
||||
|
||||
-- ================================================================
|
||||
-- INDEX POUR RECHERCHE TEXTUELLE
|
||||
-- ================================================================
|
||||
|
||||
-- Extension pour recherche trigram (recherche partielle efficace)
|
||||
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
||||
|
||||
-- Index GIN trigram pour recherche ILIKE performante sur content
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_content_trgm
|
||||
ON messages USING GIN(content gin_trgm_ops);
|
||||
|
||||
-- Index pour recherche avec filtre conversation_id + content
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conv_content_trgm
|
||||
ON messages USING GIN(conversation_id, content gin_trgm_ops);
|
||||
|
||||
-- ================================================================
|
||||
-- INDEX POUR SYNC OFFLINE
|
||||
-- ================================================================
|
||||
|
||||
-- Index pour les requêtes WHERE created_at > timestamp (sync depuis)
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conv_created_sync
|
||||
ON messages(conversation_id, created_at ASC)
|
||||
WHERE is_deleted = false;
|
||||
|
||||
-- Index pour les requêtes WHERE updated_at > timestamp (pour les edits)
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conv_updated_sync
|
||||
ON messages(conversation_id, updated_at ASC)
|
||||
WHERE is_deleted = false;
|
||||
|
||||
-- ================================================================
|
||||
-- COMMENTAIRES POUR DOCUMENTATION
|
||||
-- ================================================================
|
||||
|
||||
COMMENT ON INDEX idx_messages_conv_created_at IS 'Index pour pagination efficace de l''historique par conversation';
|
||||
COMMENT ON INDEX idx_messages_conv_created_not_deleted IS 'Index pour pagination en excluant les messages supprimés';
|
||||
COMMENT ON INDEX idx_messages_content_trgm IS 'Index GIN trigram pour recherche textuelle performante sur le contenu';
|
||||
COMMENT ON INDEX idx_messages_conv_content_trgm IS 'Index pour recherche textuelle par conversation';
|
||||
COMMENT ON INDEX idx_messages_conv_created_sync IS 'Index pour synchronisation offline (messages depuis timestamp)';
|
||||
COMMENT ON INDEX idx_messages_conv_updated_sync IS 'Index pour synchronisation offline (updates depuis timestamp)';
|
||||
|
||||
|
|
@ -1,69 +0,0 @@
|
|||
-- Migration pour les DM enrichis - Veza Chat Server
|
||||
-- Ajoute la table dm_conversations pour séparer les DM des salons
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Table pour les conversations DM enrichies
|
||||
CREATE TABLE IF NOT EXISTS dm_conversations (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
uuid UUID UNIQUE NOT NULL DEFAULT gen_random_uuid(),
|
||||
user1_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
user2_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
is_blocked BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
blocked_by UUID REFERENCES users(id) ON DELETE SET NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Contraintes
|
||||
CONSTRAINT dm_conversations_different_users CHECK (user1_id != user2_id),
|
||||
CONSTRAINT dm_conversations_ordered_users CHECK (user1_id < user2_id),
|
||||
CONSTRAINT dm_conversations_unique_pair UNIQUE (user1_id, user2_id)
|
||||
);
|
||||
|
||||
-- Index pour les performances
|
||||
CREATE INDEX IF NOT EXISTS idx_dm_conversations_user1 ON dm_conversations(user1_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_dm_conversations_user2 ON dm_conversations(user2_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_dm_conversations_updated_at ON dm_conversations(updated_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_dm_conversations_uuid ON dm_conversations(uuid);
|
||||
|
||||
-- Trigger pour mettre à jour updated_at automatiquement
|
||||
CREATE OR REPLACE FUNCTION update_dm_conversations_updated_at()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER trigger_dm_conversations_updated_at
|
||||
BEFORE UPDATE ON dm_conversations
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_dm_conversations_updated_at();
|
||||
|
||||
-- Migration des données DM supprimée car les colonnes from_user, to_user, room n'existent pas
|
||||
-- dans le schéma de base. La table dm_conversations est créée vide et sera peuplée
|
||||
-- par l'application lors de la création de nouveaux DM.
|
||||
|
||||
-- Log de la migration supprimé car la table audit_logs n'existe pas dans le schéma de base
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Vérifications post-migration
|
||||
DO $$
|
||||
DECLARE
|
||||
dm_conversations_count INTEGER;
|
||||
migrated_messages_count INTEGER;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO dm_conversations_count FROM dm_conversations;
|
||||
SELECT COUNT(*) INTO migrated_messages_count FROM messages WHERE conversation_id IN (SELECT uuid FROM dm_conversations);
|
||||
|
||||
RAISE NOTICE 'Migration DM enrichis terminée:';
|
||||
RAISE NOTICE ' - % conversations DM créées', dm_conversations_count;
|
||||
RAISE NOTICE ' - % messages DM migrés', migrated_messages_count;
|
||||
|
||||
IF dm_conversations_count = 0 THEN
|
||||
RAISE NOTICE ' ⚠️ Aucune conversation DM trouvée (normal si pas de DM existants)';
|
||||
ELSE
|
||||
RAISE NOTICE ' ✅ Migration réussie';
|
||||
END IF;
|
||||
END $$;
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
-- ================================================================
|
||||
-- CORRECTIONS POST-MIGRATION
|
||||
-- Corrige les erreurs résiduelles de la migration principale
|
||||
-- ================================================================
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 1: FINALISER LA TABLE USERS
|
||||
-- ================================================================
|
||||
|
||||
-- Colonnes de profil supprimées car elles existent déjà dans la migration de base
|
||||
|
||||
-- Type user_role supprimé car non défini dans le schéma de base
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 2: FINALISER LA TABLE MESSAGES
|
||||
-- ================================================================
|
||||
|
||||
-- Colonnes de messages supprimées car elles existent déjà dans la migration de base
|
||||
|
||||
-- Type message_status supprimé car non défini dans le schéma de base
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 3: FINALISER LA TABLE MESSAGE_REACTIONS
|
||||
-- ================================================================
|
||||
|
||||
-- Colonne emoji supprimée car la table message_reactions utilise reaction_type
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 4: CRÉATION DES INDEX MANQUÉS
|
||||
-- ================================================================
|
||||
|
||||
-- Index supprimés car les colonnes référencées n'existent pas dans le schéma de base
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 5: NETTOYAGE DES DÉPENDANCES PROBLÉMATIQUES
|
||||
-- ================================================================
|
||||
|
||||
-- Supprimer le trigger problématique avant la fonction
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM information_schema.triggers
|
||||
WHERE trigger_name = 'trigger_handle_mentions_secure') THEN
|
||||
DROP TRIGGER IF EXISTS trigger_handle_mentions_secure ON messages;
|
||||
RAISE NOTICE 'Trigger trigger_handle_mentions_secure supprimé';
|
||||
END IF;
|
||||
|
||||
-- Maintenant supprimer la fonction
|
||||
DROP FUNCTION IF EXISTS handle_mentions_secure() CASCADE;
|
||||
RAISE NOTICE 'Fonction handle_mentions_secure supprimée';
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
RAISE NOTICE 'Erreur lors du nettoyage: %', SQLERRM;
|
||||
END $$;
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 6: MISE À JOUR DES DONNÉES EXISTANTES
|
||||
-- ================================================================
|
||||
|
||||
-- Migration des données supprimée car les colonnes room n'existent pas dans le schéma de base
|
||||
|
||||
-- Migration des conversations supprimée car les colonnes et types n'existent pas dans le schéma de base
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 7: VÉRIFICATIONS FINALES
|
||||
-- ================================================================
|
||||
|
||||
-- Vérifications finales simplifiées
|
||||
DO $$
|
||||
DECLARE
|
||||
orphan_messages INTEGER;
|
||||
BEGIN
|
||||
-- Compter les messages sans conversation
|
||||
SELECT COUNT(*) INTO orphan_messages FROM messages WHERE conversation_id IS NULL;
|
||||
|
||||
RAISE NOTICE 'Vérifications finales:';
|
||||
RAISE NOTICE '- Messages orphelins: %', orphan_messages;
|
||||
|
||||
IF orphan_messages > 0 THEN
|
||||
RAISE WARNING 'Il reste % messages sans conversation_id', orphan_messages;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Actualiser les statistiques
|
||||
ANALYZE users;
|
||||
ANALYZE messages;
|
||||
ANALYZE conversations;
|
||||
ANALYZE message_reactions;
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
-- Migration: Ajout de colonnes UUID aux tables manquantes
|
||||
-- Création: 2025-01-27
|
||||
-- Version: 1.0.0
|
||||
-- Description: Ajoute des colonnes UUID aux tables conversation_members, audit_logs et security_events
|
||||
-- pour permettre la migration du code Rust de i64 vers Uuid
|
||||
|
||||
-- ================================================================
|
||||
-- TABLE conversation_members
|
||||
-- ================================================================
|
||||
|
||||
-- Ajouter la colonne uuid (cette table n'a pas de colonne id, seulement une PK composite)
|
||||
ALTER TABLE conversation_members
|
||||
ADD COLUMN IF NOT EXISTS uuid UUID DEFAULT gen_random_uuid();
|
||||
|
||||
-- Ajouter la contrainte UNIQUE
|
||||
ALTER TABLE conversation_members
|
||||
ADD CONSTRAINT conversation_members_uuid_unique UNIQUE (uuid);
|
||||
|
||||
-- Ajouter la contrainte NOT NULL (après le backfill par default)
|
||||
-- Note: Les valeurs existantes ont déjà été remplies par DEFAULT, donc on peut ajouter NOT NULL
|
||||
ALTER TABLE conversation_members
|
||||
ALTER COLUMN uuid SET NOT NULL;
|
||||
|
||||
-- Index pour performance
|
||||
CREATE INDEX IF NOT EXISTS idx_conversation_members_uuid ON conversation_members(uuid);
|
||||
|
||||
-- ================================================================
|
||||
-- TABLE audit_logs
|
||||
-- ================================================================
|
||||
|
||||
-- Ajouter la colonne uuid (cette table a déjà un id SERIAL)
|
||||
ALTER TABLE audit_logs
|
||||
ADD COLUMN IF NOT EXISTS uuid UUID DEFAULT gen_random_uuid();
|
||||
|
||||
-- Ajouter la contrainte UNIQUE
|
||||
ALTER TABLE audit_logs
|
||||
ADD CONSTRAINT audit_logs_uuid_unique UNIQUE (uuid);
|
||||
|
||||
-- Ajouter la contrainte NOT NULL (après le backfill par default)
|
||||
ALTER TABLE audit_logs
|
||||
ALTER COLUMN uuid SET NOT NULL;
|
||||
|
||||
-- Index pour performance
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_logs_uuid ON audit_logs(uuid);
|
||||
|
||||
-- ================================================================
|
||||
-- TABLE security_events (block suppressed)
|
||||
-- ================================================================
|
||||
-- Table externe non gérée dans ce schéma isolé.
|
||||
|
||||
-- ================================================================
|
||||
-- COMMENTAIRES
|
||||
-- ================================================================
|
||||
|
||||
COMMENT ON COLUMN conversation_members.uuid IS 'UUID unique pour chaque membre de conversation (pour migration i64 -> UUID)';
|
||||
COMMENT ON COLUMN audit_logs.uuid IS 'UUID unique pour chaque log d''audit (pour migration i64 -> UUID)';
|
||||
|
||||
|
||||
|
|
@ -1,402 +0,0 @@
|
|||
-- ================================================================
|
||||
-- MIGRATION DE NETTOYAGE ET MISE À JOUR POUR PRODUCTION
|
||||
-- Version: 0.2.0 - Compatible avec structure existante
|
||||
-- ================================================================
|
||||
-- ⚠️ ATTENTION: Cette migration est partiellement destructive
|
||||
-- 🔒 Assurez-vous d'avoir une sauvegarde complète avant exécution
|
||||
--
|
||||
-- Utilisation:
|
||||
-- psql -h 10.5.191.47 -U veza -d veza_db -f migrations/999_cleanup_production_ready_fixed.sql
|
||||
-- ================================================================
|
||||
|
||||
-- Début de la migration de production
|
||||
|
||||
-- Vérifier que nous sommes dans la bonne base
|
||||
SELECT current_database() as current_db, current_user as current_user_name;
|
||||
|
||||
-- Créer l'extension UUID si pas présente
|
||||
-- Extension uuid-ossp non nécessaire, gen_random_uuid() est utilisé
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 1: SAUVEGARDE DES DONNÉES EXISTANTES
|
||||
-- ================================================================
|
||||
|
||||
-- Sauvegarde des données existantes
|
||||
|
||||
-- Sauvegarder les utilisateurs existants
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'users') THEN
|
||||
CREATE TEMP TABLE temp_old_users AS
|
||||
SELECT id, username, email, created_at, role
|
||||
FROM users;
|
||||
|
||||
RAISE NOTICE 'Sauvegarde de % utilisateurs', (SELECT COUNT(*) FROM temp_old_users);
|
||||
ELSE
|
||||
RAISE NOTICE 'Table users non trouvée, création nécessaire';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Sauvegarder les messages existants
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'messages') THEN
|
||||
CREATE TEMP TABLE temp_old_messages AS
|
||||
SELECT id, sender_id, content, created_at, message_type
|
||||
FROM messages;
|
||||
|
||||
RAISE NOTICE 'Sauvegarde de % messages', (SELECT COUNT(*) FROM temp_old_messages);
|
||||
ELSE
|
||||
RAISE NOTICE 'Table messages non trouvée, création nécessaire';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 2: SUPPRESSION SÉCURISÉE DES TABLES REDONDANTES
|
||||
-- ================================================================
|
||||
|
||||
-- Suppression des tables redondantes
|
||||
|
||||
-- Supprimer uniquement les tables qui existent et sont redondantes
|
||||
DROP TABLE IF EXISTS users_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS users_backup CASCADE;
|
||||
DROP TABLE IF EXISTS rooms_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS messages_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS message_mentions_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS message_mentions_secure CASCADE;
|
||||
DROP TABLE IF EXISTS message_reactions_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS room_members_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS user_sessions_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS user_sessions_secure CASCADE;
|
||||
DROP TABLE IF EXISTS user_blocks_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS user_blocks_secure CASCADE;
|
||||
DROP TABLE IF EXISTS security_events_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS security_events_secure CASCADE;
|
||||
|
||||
-- Supprimer les tables métier obsolètes (si elles existent)
|
||||
DROP TABLE IF EXISTS listings CASCADE;
|
||||
DROP TABLE IF EXISTS categories CASCADE;
|
||||
DROP TABLE IF EXISTS user_products CASCADE;
|
||||
DROP TABLE IF EXISTS internal_documents CASCADE;
|
||||
DROP TABLE IF EXISTS shared_ressources CASCADE;
|
||||
DROP TABLE IF EXISTS shared_ressource_tags CASCADE;
|
||||
DROP TABLE IF EXISTS ressource_tags CASCADE;
|
||||
DROP TABLE IF EXISTS tracks CASCADE;
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 3: CRÉATION DES TYPES ENUMS NÉCESSAIRES
|
||||
-- ================================================================
|
||||
|
||||
-- Création des types énumérés
|
||||
|
||||
-- Type user_role supprimé car non nécessaire pour la migration de base
|
||||
|
||||
-- Type message_status supprimé car non nécessaire pour la migration de base
|
||||
|
||||
-- Type conversation_type supprimé car non nécessaire pour la migration de base
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 4: MISE À JOUR DE LA TABLE USERS
|
||||
-- ================================================================
|
||||
|
||||
-- Mise à jour de la table users
|
||||
|
||||
-- Colonnes uuid supprimées car les tables utilisent déjà id avec UUID
|
||||
|
||||
-- Ajouter les nouvelles colonnes de sécurité
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Colonnes de sécurité 2FA supprimées car non nécessaires pour la migration de base
|
||||
|
||||
-- Colonnes de profil (display_name et avatar_url existent déjà dans la migration de base)
|
||||
-- Colonne bio supprimée car non nécessaire pour la migration de base
|
||||
|
||||
-- Colonnes de métadonnées
|
||||
-- Colonne last_login supprimée car non nécessaire pour la migration de base
|
||||
|
||||
-- Colonne last_activity supprimée car non nécessaire pour la migration de base
|
||||
|
||||
-- updated_at existe déjà dans la migration de base, pas besoin de l'ajouter
|
||||
|
||||
-- Colonnes de permissions (is_active existe déjà dans la migration de base)
|
||||
-- Colonne is_verified supprimée car non nécessaire pour la migration de base
|
||||
END $$;
|
||||
|
||||
-- Mise à jour du type de rôle
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Vérifier si la colonne role existe et la convertir
|
||||
IF EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'users' AND column_name = 'role') THEN
|
||||
-- Sauvegarder les valeurs existantes avant conversion
|
||||
UPDATE users SET role = 'user' WHERE role IS NULL OR role = '';
|
||||
|
||||
-- Convertir vers le nouveau type (si ce n'est pas déjà fait)
|
||||
BEGIN
|
||||
ALTER TABLE users ALTER COLUMN role TYPE user_role USING role::user_role;
|
||||
RAISE NOTICE 'Colonne role convertie vers user_role';
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
RAISE NOTICE 'Colonne role déjà au bon type ou erreur: %', SQLERRM;
|
||||
END;
|
||||
ELSE
|
||||
ALTER TABLE users ADD COLUMN role user_role DEFAULT 'user' NOT NULL;
|
||||
RAISE NOTICE 'Colonne role ajoutée avec type user_role';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 5: CRÉATION DE LA TABLE CONVERSATIONS
|
||||
-- ================================================================
|
||||
|
||||
-- Création de la table conversations
|
||||
|
||||
-- Table conversations existe déjà dans la migration 001, pas besoin de la redéfinir
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 6: MISE À JOUR DE LA TABLE MESSAGES
|
||||
-- ================================================================
|
||||
|
||||
-- Mise à jour de la table messages
|
||||
|
||||
-- Colonnes uuid supprimées car les tables utilisent déjà id avec UUID
|
||||
|
||||
-- Colonnes déjà cohérentes avec la migration de base
|
||||
|
||||
-- Colonne conversation_id existe déjà dans la migration de base
|
||||
|
||||
-- Colonnes avancées existent déjà dans la migration de base
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 7: CRÉATION DES TABLES COMPLÉMENTAIRES
|
||||
-- ================================================================
|
||||
|
||||
-- Création des tables complémentaires
|
||||
|
||||
-- Table conversation_members existe déjà dans la migration 001, pas besoin de la redéfinir
|
||||
|
||||
-- Table message_reactions existe déjà dans la migration 002, pas besoin de la redéfinir
|
||||
|
||||
-- Table pour les mentions
|
||||
CREATE TABLE IF NOT EXISTS message_mentions (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
message_id UUID NOT NULL,
|
||||
mentioned_user_id UUID NOT NULL,
|
||||
is_read BOOLEAN DEFAULT FALSE,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
||||
|
||||
CONSTRAINT message_mentions_message_fk FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
|
||||
CONSTRAINT message_mentions_user_fk FOREIGN KEY (mentioned_user_id) REFERENCES users(id) ON DELETE CASCADE,
|
||||
CONSTRAINT message_mentions_unique UNIQUE (message_id, mentioned_user_id)
|
||||
);
|
||||
|
||||
-- Table pour l'historique des messages
|
||||
CREATE TABLE IF NOT EXISTS message_history (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
message_id UUID NOT NULL,
|
||||
old_content TEXT NOT NULL,
|
||||
edited_by UUID NOT NULL,
|
||||
edited_at TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
||||
|
||||
CONSTRAINT message_history_message_fk FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
|
||||
CONSTRAINT message_history_user_fk FOREIGN KEY (edited_by) REFERENCES users(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Table pour les sessions utilisateur
|
||||
CREATE TABLE IF NOT EXISTS user_sessions (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id UUID NOT NULL,
|
||||
session_token VARCHAR(255) UNIQUE NOT NULL,
|
||||
refresh_token VARCHAR(255) UNIQUE,
|
||||
device_info TEXT,
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
is_active BOOLEAN DEFAULT TRUE,
|
||||
expires_at TIMESTAMPTZ NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
||||
last_activity TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
||||
|
||||
CONSTRAINT user_sessions_user_fk FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 8: CRÉATION DES INDEX DE PERFORMANCE
|
||||
-- ================================================================
|
||||
|
||||
-- Création des index de performance
|
||||
|
||||
-- Index pour users
|
||||
CREATE INDEX IF NOT EXISTS idx_users_username_active
|
||||
ON users(username) WHERE is_active = TRUE;
|
||||
|
||||
-- Index last_login supprimé car la colonne n'existe pas dans la migration de base
|
||||
|
||||
-- Index email_verified supprimé car la colonne is_verified n'existe pas dans la migration de base
|
||||
|
||||
-- Index last_activity supprimé car la colonne n'existe pas dans la migration de base
|
||||
|
||||
-- Index pour conversations
|
||||
CREATE INDEX IF NOT EXISTS idx_conversations_type_public
|
||||
ON conversations(conversation_type) WHERE is_private = FALSE;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_conversations_owner_active
|
||||
ON conversations(created_by);
|
||||
|
||||
-- Index pour messages (performance critique)
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conversation_time
|
||||
ON messages(conversation_id, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_sender_time
|
||||
ON messages(sender_id, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_threads
|
||||
ON messages(parent_message_id, created_at) WHERE parent_message_id IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_pinned
|
||||
ON messages(conversation_id) WHERE is_pinned = TRUE;
|
||||
|
||||
-- Index pour réactions (utilise reaction_type au lieu d'emoji)
|
||||
CREATE INDEX IF NOT EXISTS idx_reactions_message
|
||||
ON message_reactions(message_id, reaction_type);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_reactions_user
|
||||
ON message_reactions(user_id, created_at DESC);
|
||||
|
||||
-- Index pour mentions
|
||||
CREATE INDEX IF NOT EXISTS idx_mentions_user_unread
|
||||
ON message_mentions(mentioned_user_id) WHERE is_read = FALSE;
|
||||
|
||||
-- Index pour sessions
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_user_active
|
||||
ON user_sessions(user_id) WHERE is_active = TRUE;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_token
|
||||
ON user_sessions(session_token);
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 9: CRÉATION DES FONCTIONS UTILITAIRES
|
||||
-- ================================================================
|
||||
|
||||
-- Création des fonctions utilitaires
|
||||
|
||||
-- Fonction pour obtenir l'ID utilisateur courant (à implémenter côté app)
|
||||
CREATE OR REPLACE FUNCTION current_user_id()
|
||||
RETURNS BIGINT AS $$
|
||||
BEGIN
|
||||
-- Cette fonction doit être implémentée côté application
|
||||
-- Pour l'instant, elle retourne NULL
|
||||
RETURN NULL;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Fonction pour nettoyer les sessions expirées
|
||||
CREATE OR REPLACE FUNCTION cleanup_expired_sessions()
|
||||
RETURNS INTEGER AS $$
|
||||
DECLARE
|
||||
deleted_count INTEGER;
|
||||
BEGIN
|
||||
DELETE FROM user_sessions
|
||||
WHERE expires_at < NOW() OR (last_activity < NOW() - INTERVAL '30 days');
|
||||
|
||||
GET DIAGNOSTICS deleted_count = ROW_COUNT;
|
||||
RETURN deleted_count;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Fonction trigger pour mettre à jour updated_at
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 10: CRÉATION DES TRIGGERS
|
||||
-- ================================================================
|
||||
|
||||
-- Création des triggers
|
||||
|
||||
-- Triggers pour updated_at
|
||||
DROP TRIGGER IF EXISTS update_users_updated_at ON users;
|
||||
CREATE TRIGGER update_users_updated_at
|
||||
BEFORE UPDATE ON users
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
DROP TRIGGER IF EXISTS update_conversations_updated_at ON conversations;
|
||||
CREATE TRIGGER update_conversations_updated_at
|
||||
BEFORE UPDATE ON conversations
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
DROP TRIGGER IF EXISTS update_messages_updated_at ON messages;
|
||||
CREATE TRIGGER update_messages_updated_at
|
||||
BEFORE UPDATE ON messages
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 11: VÉRIFICATION ET NETTOYAGE FINAL
|
||||
-- ================================================================
|
||||
|
||||
-- Vérification finale
|
||||
|
||||
-- Nettoyer les fonctions obsolètes
|
||||
DROP FUNCTION IF EXISTS cleanup_expired_sessions_secure();
|
||||
DROP FUNCTION IF EXISTS cleanup_old_audit_logs();
|
||||
DROP FUNCTION IF EXISTS cleanup_old_data_secure();
|
||||
DROP FUNCTION IF EXISTS handle_mentions_secure();
|
||||
|
||||
-- Mettre à jour les statistiques
|
||||
ANALYZE users;
|
||||
ANALYZE conversations;
|
||||
ANALYZE messages;
|
||||
ANALYZE message_reactions;
|
||||
ANALYZE message_mentions;
|
||||
ANALYZE user_sessions;
|
||||
|
||||
-- Validation finale
|
||||
DO $$
|
||||
DECLARE
|
||||
user_count INTEGER;
|
||||
message_count INTEGER;
|
||||
conversation_count INTEGER;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO user_count FROM users;
|
||||
SELECT COUNT(*) INTO message_count FROM messages;
|
||||
SELECT COUNT(*) INTO conversation_count FROM conversations;
|
||||
|
||||
RAISE NOTICE '✅ Migration terminée avec succès:';
|
||||
RAISE NOTICE ' - Utilisateurs: %', user_count;
|
||||
RAISE NOTICE ' - Messages: %', message_count;
|
||||
RAISE NOTICE ' - Conversations: %', conversation_count;
|
||||
|
||||
-- Warnings si problèmes détectés
|
||||
IF user_count = 0 THEN
|
||||
RAISE WARNING '⚠️ Aucun utilisateur trouvé après migration';
|
||||
END IF;
|
||||
|
||||
IF message_count > 0 AND conversation_count = 0 THEN
|
||||
RAISE WARNING '⚠️ Messages présents mais aucune conversation';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Nettoyer les tables temporaires
|
||||
DROP TABLE IF EXISTS temp_old_users;
|
||||
DROP TABLE IF EXISTS temp_old_messages;
|
||||
|
||||
-- ================================================================
|
||||
-- FINALISATION
|
||||
-- ================================================================
|
||||
|
||||
-- Migration de production terminée avec succès
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename IN ('users', 'conversations', 'messages', 'message_reactions', 'message_mentions', 'user_sessions')
|
||||
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC;
|
||||
|
|
@ -1,270 +0,0 @@
|
|||
-- Migration 003: Schéma amélioré avec sécurité renforcée et séparation DM/salons
|
||||
|
||||
-- Extension pour UUID
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS pgcrypto;
|
||||
|
||||
-- ================================================
|
||||
-- UTILISATEURS AVEC SÉCURITÉ RENFORCÉE
|
||||
-- ================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users_enhanced (
|
||||
id SERIAL PRIMARY KEY,
|
||||
username VARCHAR(50) NOT NULL UNIQUE,
|
||||
email VARCHAR(255) NOT NULL UNIQUE,
|
||||
password_hash VARCHAR(255) NOT NULL,
|
||||
|
||||
-- Rôles et permissions
|
||||
role VARCHAR(20) NOT NULL DEFAULT 'user' CHECK (role IN ('admin', 'moderator', 'user', 'guest')),
|
||||
|
||||
-- Statut et sécurité
|
||||
is_active BOOLEAN NOT NULL DEFAULT true,
|
||||
is_banned BOOLEAN NOT NULL DEFAULT false,
|
||||
is_verified BOOLEAN NOT NULL DEFAULT false,
|
||||
|
||||
-- Statut de présence
|
||||
status VARCHAR(20) DEFAULT 'offline' CHECK (status IN ('online', 'away', 'busy', 'invisible', 'offline')),
|
||||
status_message VARCHAR(100),
|
||||
|
||||
-- Modération
|
||||
reputation_score INTEGER DEFAULT 100,
|
||||
|
||||
-- Métadonnées
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- ================================================
|
||||
-- SALONS AVEC GESTION AVANCÉE
|
||||
-- ================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS rooms_enhanced (
|
||||
id VARCHAR(100) PRIMARY KEY,
|
||||
name VARCHAR(100) NOT NULL,
|
||||
description TEXT,
|
||||
|
||||
-- Propriétaire
|
||||
owner_id INTEGER NOT NULL REFERENCES users_enhanced(id) ON DELETE CASCADE,
|
||||
|
||||
-- Configuration
|
||||
is_public BOOLEAN NOT NULL DEFAULT true,
|
||||
is_archived BOOLEAN DEFAULT false,
|
||||
max_members INTEGER DEFAULT 1000,
|
||||
|
||||
-- Métadonnées
|
||||
member_count INTEGER DEFAULT 0,
|
||||
message_count INTEGER DEFAULT 0,
|
||||
|
||||
-- Timestamps
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- ================================================
|
||||
-- MESSAGES UNIFIÉS
|
||||
-- ================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS messages_enhanced (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
-- Type et contenu
|
||||
message_type VARCHAR(20) NOT NULL CHECK (message_type IN ('room_message', 'direct_message', 'system_message')),
|
||||
content TEXT NOT NULL CHECK (LENGTH(content) <= 4000),
|
||||
|
||||
-- Auteur
|
||||
author_id INTEGER NOT NULL REFERENCES users_enhanced(id) ON DELETE CASCADE,
|
||||
author_username VARCHAR(50) NOT NULL,
|
||||
|
||||
-- Destination (exclusion mutuelle)
|
||||
room_id VARCHAR(100) REFERENCES rooms_enhanced(id) ON DELETE CASCADE,
|
||||
recipient_id INTEGER REFERENCES users_enhanced(id) ON DELETE CASCADE,
|
||||
recipient_username VARCHAR(50),
|
||||
|
||||
-- Threading
|
||||
parent_message_id BIGINT REFERENCES messages_enhanced(id) ON DELETE SET NULL,
|
||||
thread_count INTEGER DEFAULT 0,
|
||||
|
||||
-- Statut
|
||||
status VARCHAR(20) DEFAULT 'sent' CHECK (status IN ('sent', 'delivered', 'read', 'edited', 'deleted')),
|
||||
is_pinned BOOLEAN DEFAULT false,
|
||||
is_edited BOOLEAN DEFAULT false,
|
||||
original_content TEXT,
|
||||
|
||||
-- Modération
|
||||
is_flagged BOOLEAN DEFAULT false,
|
||||
moderation_notes TEXT,
|
||||
|
||||
-- Timestamps
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ,
|
||||
|
||||
-- Contraintes logiques
|
||||
CONSTRAINT message_destination_check CHECK (
|
||||
(message_type = 'room_message' AND room_id IS NOT NULL AND recipient_id IS NULL) OR
|
||||
(message_type = 'direct_message' AND room_id IS NULL AND recipient_id IS NOT NULL) OR
|
||||
(message_type = 'system_message')
|
||||
)
|
||||
);
|
||||
|
||||
-- ================================================
|
||||
-- RÉACTIONS AUX MESSAGES
|
||||
-- ================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS message_reactions_enhanced (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
message_id BIGINT NOT NULL REFERENCES messages_enhanced(id) ON DELETE CASCADE,
|
||||
user_id INTEGER NOT NULL REFERENCES users_enhanced(id) ON DELETE CASCADE,
|
||||
emoji VARCHAR(100) NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
UNIQUE (message_id, user_id, emoji)
|
||||
);
|
||||
|
||||
-- ================================================
|
||||
-- MENTIONS DANS LES MESSAGES
|
||||
-- ================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS message_mentions_enhanced (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
message_id BIGINT NOT NULL REFERENCES messages_enhanced(id) ON DELETE CASCADE,
|
||||
user_id INTEGER NOT NULL REFERENCES users_enhanced(id) ON DELETE CASCADE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
is_read BOOLEAN DEFAULT false,
|
||||
|
||||
UNIQUE (message_id, user_id)
|
||||
);
|
||||
|
||||
-- ================================================
|
||||
-- MEMBRES DES SALONS
|
||||
-- ================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS room_members_enhanced (
|
||||
room_id VARCHAR(100) NOT NULL REFERENCES rooms_enhanced(id) ON DELETE CASCADE,
|
||||
user_id INTEGER NOT NULL REFERENCES users_enhanced(id) ON DELETE CASCADE,
|
||||
|
||||
role VARCHAR(20) DEFAULT 'member' CHECK (role IN ('owner', 'admin', 'moderator', 'member')),
|
||||
joined_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
last_read_message_id BIGINT,
|
||||
|
||||
PRIMARY KEY (room_id, user_id)
|
||||
);
|
||||
|
||||
-- ================================================
|
||||
-- BLOCAGES UTILISATEURS
|
||||
-- ================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_blocks_enhanced (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
blocker_id INTEGER NOT NULL REFERENCES users_enhanced(id) ON DELETE CASCADE,
|
||||
blocked_id INTEGER NOT NULL REFERENCES users_enhanced(id) ON DELETE CASCADE,
|
||||
reason VARCHAR(500),
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
UNIQUE (blocker_id, blocked_id),
|
||||
CONSTRAINT no_self_block CHECK (blocker_id != blocked_id)
|
||||
);
|
||||
|
||||
-- ================================================
|
||||
-- SESSIONS SÉCURISÉES
|
||||
-- ================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_sessions_enhanced (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
user_id INTEGER NOT NULL REFERENCES users_enhanced(id) ON DELETE CASCADE,
|
||||
|
||||
token_hash VARCHAR(128) NOT NULL UNIQUE,
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
last_activity TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
expires_at TIMESTAMPTZ NOT NULL,
|
||||
is_active BOOLEAN DEFAULT true
|
||||
);
|
||||
|
||||
-- ================================================
|
||||
-- LOGS DE SÉCURITÉ
|
||||
-- ================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS security_events_enhanced (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
event_type VARCHAR(50) NOT NULL,
|
||||
severity VARCHAR(20) DEFAULT 'info' CHECK (severity IN ('debug', 'info', 'warning', 'error', 'critical')),
|
||||
|
||||
user_id INTEGER REFERENCES users_enhanced(id) ON DELETE SET NULL,
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
|
||||
details JSONB DEFAULT '{}'::jsonb,
|
||||
success BOOLEAN,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- ================================================
|
||||
-- INDEX POUR PERFORMANCE
|
||||
-- ================================================
|
||||
|
||||
-- Messages
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_room_enhanced ON messages_enhanced (room_id, created_at DESC) WHERE message_type = 'room_message' AND status != 'deleted';
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_dm_enhanced ON messages_enhanced (author_id, recipient_id, created_at DESC) WHERE message_type = 'direct_message' AND status != 'deleted';
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_dm_reverse_enhanced ON messages_enhanced (recipient_id, author_id, created_at DESC) WHERE message_type = 'direct_message' AND status != 'deleted';
|
||||
|
||||
-- Réactions
|
||||
CREATE INDEX IF NOT EXISTS idx_reactions_message_enhanced ON message_reactions_enhanced (message_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_reactions_user_enhanced ON message_reactions_enhanced (user_id);
|
||||
|
||||
-- Mentions
|
||||
CREATE INDEX IF NOT EXISTS idx_mentions_user_enhanced ON message_mentions_enhanced (user_id, is_read);
|
||||
|
||||
-- Sessions
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_user_enhanced ON user_sessions_enhanced (user_id, is_active);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_token_enhanced ON user_sessions_enhanced (token_hash);
|
||||
|
||||
-- ================================================
|
||||
-- TRIGGERS
|
||||
-- ================================================
|
||||
|
||||
-- Fonction pour mettre à jour updated_at
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Appliquer aux tables principales
|
||||
CREATE TRIGGER update_users_enhanced_updated_at
|
||||
BEFORE UPDATE ON users_enhanced
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
CREATE TRIGGER update_rooms_enhanced_updated_at
|
||||
BEFORE UPDATE ON rooms_enhanced
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- ================================================
|
||||
-- FONCTIONS UTILITAIRES
|
||||
-- ================================================
|
||||
|
||||
-- Fonction pour nettoyer les sessions expirées
|
||||
CREATE OR REPLACE FUNCTION cleanup_expired_sessions()
|
||||
RETURNS void AS $$
|
||||
BEGIN
|
||||
DELETE FROM user_sessions_enhanced
|
||||
WHERE expires_at < NOW() AND is_active = false;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- ================================================
|
||||
-- COMMENTAIRES
|
||||
-- ================================================
|
||||
|
||||
COMMENT ON TABLE users_enhanced IS 'Utilisateurs avec sécurité renforcée';
|
||||
COMMENT ON TABLE messages_enhanced IS 'Messages unifiés avec séparation logique DM/salons';
|
||||
COMMENT ON TABLE message_reactions_enhanced IS 'Réactions aux messages';
|
||||
COMMENT ON TABLE message_mentions_enhanced IS 'Mentions d''utilisateurs';
|
||||
COMMENT ON TABLE user_blocks_enhanced IS 'Blocages entre utilisateurs';
|
||||
COMMENT ON TABLE user_sessions_enhanced IS 'Sessions sécurisées';
|
||||
COMMENT ON TABLE security_events_enhanced IS 'Journal de sécurité';
|
||||
|
|
@ -1,130 +0,0 @@
|
|||
-- Migration 003: Schéma amélioré - VERSION CORRIGÉE
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Supprimer les fonctions qui pourraient être en conflit
|
||||
DROP FUNCTION IF EXISTS cleanup_expired_sessions();
|
||||
DROP FUNCTION IF EXISTS cleanup_old_data();
|
||||
|
||||
-- Supprimer les vues qui pourraient être en conflit
|
||||
DROP VIEW IF EXISTS server_stats CASCADE;
|
||||
|
||||
-- Extensions nécessaires
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS "pg_trgm";
|
||||
|
||||
-- Table des sessions utilisateur sécurisées
|
||||
CREATE TABLE IF NOT EXISTS user_sessions_secure (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
token_hash VARCHAR(255) NOT NULL UNIQUE,
|
||||
refresh_token_hash VARCHAR(255) UNIQUE,
|
||||
device_info JSONB DEFAULT '{}',
|
||||
ip_address INET NOT NULL,
|
||||
user_agent TEXT,
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
expires_at TIMESTAMPTZ NOT NULL DEFAULT (NOW() + INTERVAL '7 days'),
|
||||
last_used TIMESTAMPTZ DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Table des événements de sécurité
|
||||
CREATE TABLE IF NOT EXISTS security_events_secure (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id INTEGER REFERENCES users(id) ON DELETE SET NULL,
|
||||
event_type VARCHAR(50) NOT NULL,
|
||||
severity VARCHAR(20) DEFAULT 'info' CHECK (severity IN ('critical', 'high', 'medium', 'low', 'info')),
|
||||
description TEXT NOT NULL,
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
additional_data JSONB DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
resolved_at TIMESTAMPTZ,
|
||||
resolved_by INTEGER REFERENCES users(id) ON DELETE SET NULL
|
||||
);
|
||||
|
||||
-- Table des mentions
|
||||
CREATE TABLE IF NOT EXISTS message_mentions_secure (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
message_id INTEGER NOT NULL REFERENCES messages(id) ON DELETE CASCADE,
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
is_read BOOLEAN DEFAULT false,
|
||||
read_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE (message_id, user_id)
|
||||
);
|
||||
|
||||
-- Table des blocages utilisateur
|
||||
CREATE TABLE IF NOT EXISTS user_blocks_secure (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
blocker_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
blocked_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
reason VARCHAR(500),
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE (blocker_id, blocked_id),
|
||||
CONSTRAINT no_self_block CHECK (blocker_id != blocked_id)
|
||||
);
|
||||
|
||||
-- Index pour les performances
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_secure_user ON user_sessions_secure (user_id, is_active);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_secure_expires ON user_sessions_secure (expires_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_security_events_user ON security_events_secure (user_id, created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_mentions_secure_user ON message_mentions_secure (user_id, is_read);
|
||||
CREATE INDEX IF NOT EXISTS idx_blocks_secure_blocker ON user_blocks_secure (blocker_id);
|
||||
|
||||
-- Fonction de nettoyage des sessions expirées (nom unique)
|
||||
CREATE OR REPLACE FUNCTION cleanup_expired_sessions_secure()
|
||||
RETURNS INTEGER AS $$
|
||||
DECLARE
|
||||
deleted_count INTEGER;
|
||||
BEGIN
|
||||
DELETE FROM user_sessions_secure
|
||||
WHERE expires_at < NOW() OR last_used < NOW() - INTERVAL '30 days';
|
||||
|
||||
GET DIAGNOSTICS deleted_count = ROW_COUNT;
|
||||
RETURN deleted_count;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Fonction de nettoyage général (nom unique)
|
||||
CREATE OR REPLACE FUNCTION cleanup_old_data_secure()
|
||||
RETURNS void AS $$
|
||||
BEGIN
|
||||
-- Supprimer les sessions expirées
|
||||
PERFORM cleanup_expired_sessions_secure();
|
||||
|
||||
-- Nettoyer les événements de sécurité anciens
|
||||
DELETE FROM security_events_secure
|
||||
WHERE created_at < NOW() - INTERVAL '6 months' AND severity = 'info';
|
||||
|
||||
RAISE NOTICE 'Nettoyage terminé';
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger pour les mentions automatiques
|
||||
CREATE OR REPLACE FUNCTION handle_mentions_secure()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
-- Extraire les mentions @username du contenu
|
||||
INSERT INTO message_mentions_secure (message_id, user_id)
|
||||
SELECT NEW.id, u.id
|
||||
FROM users u
|
||||
WHERE NEW.content ~* ('@' || u.username || '\M')
|
||||
AND u.id != NEW.from_user
|
||||
ON CONFLICT (message_id, user_id) DO NOTHING;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
DROP TRIGGER IF EXISTS trigger_handle_mentions_secure ON messages;
|
||||
CREATE TRIGGER trigger_handle_mentions_secure
|
||||
AFTER INSERT ON messages
|
||||
FOR EACH ROW EXECUTE FUNCTION handle_mentions_secure();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO veza;
|
||||
GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO veza;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO veza;
|
||||
|
||||
COMMIT;
|
||||
|
|
@ -1,121 +0,0 @@
|
|||
-- Migration 003: Schéma amélioré - VERSION SIMPLIFIÉE
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Supprimer les fonctions qui pourraient être en conflit
|
||||
DROP FUNCTION IF EXISTS cleanup_expired_sessions();
|
||||
DROP FUNCTION IF EXISTS cleanup_old_data();
|
||||
|
||||
-- Extensions nécessaires
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
-- Table des sessions utilisateur sécurisées
|
||||
CREATE TABLE IF NOT EXISTS user_sessions_secure (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
token_hash VARCHAR(255) NOT NULL UNIQUE,
|
||||
refresh_token_hash VARCHAR(255) UNIQUE,
|
||||
device_info JSONB DEFAULT '{}',
|
||||
ip_address INET NOT NULL,
|
||||
user_agent TEXT,
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
expires_at TIMESTAMPTZ NOT NULL DEFAULT (NOW() + INTERVAL '7 days'),
|
||||
last_used TIMESTAMPTZ DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Table des événements de sécurité
|
||||
CREATE TABLE IF NOT EXISTS security_events_secure (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id INTEGER REFERENCES users(id) ON DELETE SET NULL,
|
||||
event_type VARCHAR(50) NOT NULL,
|
||||
severity VARCHAR(20) DEFAULT 'info' CHECK (severity IN ('critical', 'high', 'medium', 'low', 'info')),
|
||||
description TEXT NOT NULL,
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
additional_data JSONB DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
resolved_at TIMESTAMPTZ,
|
||||
resolved_by INTEGER REFERENCES users(id) ON DELETE SET NULL
|
||||
);
|
||||
|
||||
-- Table des mentions
|
||||
CREATE TABLE IF NOT EXISTS message_mentions_secure (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
message_id INTEGER NOT NULL REFERENCES messages(id) ON DELETE CASCADE,
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
is_read BOOLEAN DEFAULT false,
|
||||
read_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE (message_id, user_id)
|
||||
);
|
||||
|
||||
-- Table des blocages utilisateur
|
||||
CREATE TABLE IF NOT EXISTS user_blocks_secure (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
blocker_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
blocked_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
reason VARCHAR(500),
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE (blocker_id, blocked_id),
|
||||
CONSTRAINT no_self_block CHECK (blocker_id != blocked_id)
|
||||
);
|
||||
|
||||
-- Index pour les performances
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_secure_user ON user_sessions_secure (user_id, is_active);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_secure_expires ON user_sessions_secure (expires_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_security_events_user ON security_events_secure (user_id, created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_mentions_secure_user ON message_mentions_secure (user_id, is_read);
|
||||
CREATE INDEX IF NOT EXISTS idx_blocks_secure_blocker ON user_blocks_secure (blocker_id);
|
||||
|
||||
-- Fonction de nettoyage des sessions expirées
|
||||
CREATE OR REPLACE FUNCTION cleanup_expired_sessions_secure()
|
||||
RETURNS INTEGER AS $$
|
||||
DECLARE
|
||||
deleted_count INTEGER;
|
||||
BEGIN
|
||||
DELETE FROM user_sessions_secure
|
||||
WHERE expires_at < NOW() OR last_used < NOW() - INTERVAL '30 days';
|
||||
|
||||
GET DIAGNOSTICS deleted_count = ROW_COUNT;
|
||||
RETURN deleted_count;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Fonction de nettoyage général
|
||||
CREATE OR REPLACE FUNCTION cleanup_old_data_secure()
|
||||
RETURNS void AS $$
|
||||
BEGIN
|
||||
-- Supprimer les sessions expirées
|
||||
PERFORM cleanup_expired_sessions_secure();
|
||||
|
||||
-- Nettoyer les événements de sécurité anciens
|
||||
DELETE FROM security_events_secure
|
||||
WHERE created_at < NOW() - INTERVAL '6 months' AND severity = 'info';
|
||||
|
||||
RAISE NOTICE 'Nettoyage terminé';
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger pour les mentions automatiques
|
||||
CREATE OR REPLACE FUNCTION handle_mentions_secure()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
-- Extraire les mentions @username du contenu
|
||||
INSERT INTO message_mentions_secure (message_id, user_id)
|
||||
SELECT NEW.id, u.id
|
||||
FROM users u
|
||||
WHERE NEW.content ~* ('@' || u.username || '\M')
|
||||
AND u.id != NEW.from_user
|
||||
ON CONFLICT (message_id, user_id) DO NOTHING;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
DROP TRIGGER IF EXISTS trigger_handle_mentions_secure ON messages;
|
||||
CREATE TRIGGER trigger_handle_mentions_secure
|
||||
AFTER INSERT ON messages
|
||||
FOR EACH ROW EXECUTE FUNCTION handle_mentions_secure();
|
||||
|
||||
COMMIT;
|
||||
|
|
@ -1,69 +0,0 @@
|
|||
-- Migration 004: Correction et compatibilité avec le schéma existant
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- CORRECTIONS DES FONCTIONS EXISTANTES AVEC CONFLITS
|
||||
-- Supprimer les fonctions existantes qui ont des conflits de type de retour
|
||||
DROP FUNCTION IF EXISTS cleanup_expired_sessions();
|
||||
DROP FUNCTION IF EXISTS cleanup_expired_sessions(integer);
|
||||
DROP FUNCTION IF EXISTS cleanup_old_data();
|
||||
DROP FUNCTION IF EXISTS calculate_user_reputation(integer);
|
||||
|
||||
-- CORRECTIONS DE LA TABLE ROOMS
|
||||
ALTER TABLE rooms ADD COLUMN IF NOT EXISTS creator_id INTEGER;
|
||||
ALTER TABLE rooms ADD COLUMN IF NOT EXISTS max_members INTEGER DEFAULT 1000;
|
||||
ALTER TABLE rooms ADD COLUMN IF NOT EXISTS description TEXT;
|
||||
|
||||
-- CORRECTIONS DE LA TABLE MESSAGES
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'messages' AND column_name = 'timestamp') THEN
|
||||
ALTER TABLE messages RENAME COLUMN timestamp TO created_at;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
ALTER TABLE messages ADD COLUMN IF NOT EXISTS is_pinned BOOLEAN DEFAULT false;
|
||||
ALTER TABLE messages ADD COLUMN IF NOT EXISTS thread_count INTEGER DEFAULT 0;
|
||||
ALTER TABLE messages ADD COLUMN IF NOT EXISTS status VARCHAR(20) DEFAULT 'sent';
|
||||
|
||||
-- AMÉLIORER LA TABLE USERS
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS reputation_score INTEGER DEFAULT 100;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS is_banned BOOLEAN DEFAULT false;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS last_seen TIMESTAMPTZ DEFAULT NOW();
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS status VARCHAR(20) DEFAULT 'offline';
|
||||
|
||||
-- INDEX DE PERFORMANCE
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_pinned ON messages (is_pinned) WHERE is_pinned = true;
|
||||
|
||||
-- CORRECTIONS DES CONTRAINTES EN CONFLIT
|
||||
-- Supprimer les contraintes qui pourraient être en conflit
|
||||
ALTER TABLE audit_logs DROP CONSTRAINT IF EXISTS audit_logs_pkey CASCADE;
|
||||
|
||||
-- CORRECTIONS DES VUES EN CONFLIT
|
||||
DROP VIEW IF EXISTS server_stats CASCADE;
|
||||
DROP VIEW IF EXISTS user_activity_stats CASCADE;
|
||||
|
||||
-- CORRECTIONS DES TRIGGERS EN CONFLIT
|
||||
DROP TRIGGER IF EXISTS update_user_last_activity ON messages;
|
||||
DROP TRIGGER IF EXISTS log_user_activity ON messages;
|
||||
|
||||
-- CORRECTION DES ERREURS DE SYNTAXE DANS LES COMMENTAIRES
|
||||
-- Nettoyer les commentaires qui causent des erreurs de syntaxe
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Éviter les erreurs de commentaires avec apostrophes
|
||||
PERFORM 1;
|
||||
END $$;
|
||||
|
||||
-- MISE À JOUR DES DONNÉES
|
||||
UPDATE messages SET status = 'sent' WHERE status IS NULL;
|
||||
UPDATE users SET reputation_score = 100 WHERE reputation_score IS NULL;
|
||||
UPDATE users SET status = 'offline' WHERE status IS NULL;
|
||||
|
||||
-- NETTOYAGE DES DOUBLONS POTENTIELS
|
||||
-- Supprimer les index doublons s'ils existent
|
||||
DROP INDEX IF EXISTS idx_users_role;
|
||||
DROP INDEX IF EXISTS idx_messages_created_at;
|
||||
|
||||
COMMIT;
|
||||
|
|
@ -1,337 +0,0 @@
|
|||
-- Migration 004: Correction et compatibilité avec le schéma existant
|
||||
-- Cette migration corrige les erreurs de la migration précédente
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- ================================================
|
||||
-- CORRECTIONS DE LA TABLE ROOMS
|
||||
-- ================================================
|
||||
|
||||
-- Ajouter les colonnes manquantes à la table rooms existante
|
||||
ALTER TABLE rooms ADD COLUMN IF NOT EXISTS creator_id INTEGER;
|
||||
ALTER TABLE rooms ADD COLUMN IF NOT EXISTS max_members INTEGER DEFAULT 1000;
|
||||
ALTER TABLE rooms ADD COLUMN IF NOT EXISTS description TEXT;
|
||||
ALTER TABLE rooms ADD COLUMN IF NOT EXISTS is_archived BOOLEAN DEFAULT false;
|
||||
|
||||
-- Ajouter les contraintes de clés étrangères
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'rooms_creator_id_fkey'
|
||||
) THEN
|
||||
ALTER TABLE rooms ADD CONSTRAINT rooms_creator_id_fkey
|
||||
FOREIGN KEY (creator_id) REFERENCES users(id) ON DELETE SET NULL;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Créer l'index manquant
|
||||
CREATE INDEX IF NOT EXISTS idx_rooms_creator ON rooms (creator_id);
|
||||
|
||||
-- ================================================
|
||||
-- CORRECTIONS DE LA TABLE MESSAGES
|
||||
-- ================================================
|
||||
|
||||
-- Renommer la colonne timestamp vers created_at pour cohérence
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'messages' AND column_name = 'timestamp')
|
||||
AND NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'messages' AND column_name = 'created_at') THEN
|
||||
ALTER TABLE messages RENAME COLUMN timestamp TO created_at;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Ajouter des colonnes manquantes pour les nouvelles fonctionnalités
|
||||
ALTER TABLE messages ADD COLUMN IF NOT EXISTS is_pinned BOOLEAN DEFAULT false;
|
||||
ALTER TABLE messages ADD COLUMN IF NOT EXISTS is_flagged BOOLEAN DEFAULT false;
|
||||
ALTER TABLE messages ADD COLUMN IF NOT EXISTS moderation_notes TEXT;
|
||||
ALTER TABLE messages ADD COLUMN IF NOT EXISTS thread_count INTEGER DEFAULT 0;
|
||||
ALTER TABLE messages ADD COLUMN IF NOT EXISTS original_content TEXT;
|
||||
|
||||
-- Ajouter une colonne status si elle n'existe pas
|
||||
ALTER TABLE messages ADD COLUMN IF NOT EXISTS status VARCHAR(20) DEFAULT 'sent';
|
||||
|
||||
-- Ajouter des index pour les nouvelles colonnes
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_pinned ON messages (is_pinned) WHERE is_pinned = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_flagged ON messages (is_flagged) WHERE is_flagged = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_status ON messages (status);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_thread ON messages (thread_count) WHERE thread_count > 0;
|
||||
|
||||
-- ================================================
|
||||
-- AMÉLIORER LA TABLE USERS EXISTANTE
|
||||
-- ================================================
|
||||
|
||||
-- Ajouter des colonnes pour la sécurité et la modération
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS reputation_score INTEGER DEFAULT 100;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS is_banned BOOLEAN DEFAULT false;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS ban_expires_at TIMESTAMPTZ;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS ban_reason TEXT;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS warning_count INTEGER DEFAULT 0;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS mute_expires_at TIMESTAMPTZ;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS last_seen TIMESTAMPTZ DEFAULT NOW();
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS status VARCHAR(20) DEFAULT 'offline';
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS status_message VARCHAR(100);
|
||||
|
||||
-- Ajouter des index pour les nouvelles colonnes users
|
||||
CREATE INDEX IF NOT EXISTS idx_users_reputation ON users (reputation_score);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_banned ON users (is_banned) WHERE is_banned = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_users_status ON users (status);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_last_seen ON users (last_seen);
|
||||
|
||||
-- ================================================
|
||||
-- CRÉER LES TABLES MANQUANTES
|
||||
-- ================================================
|
||||
|
||||
-- Table des mentions (si elle n'existe pas)
|
||||
CREATE TABLE IF NOT EXISTS message_mentions (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
message_id INTEGER NOT NULL REFERENCES messages(id) ON DELETE CASCADE,
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
is_read BOOLEAN DEFAULT false,
|
||||
|
||||
UNIQUE (message_id, user_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_mentions_user ON message_mentions (user_id, is_read);
|
||||
CREATE INDEX IF NOT EXISTS idx_mentions_message ON message_mentions (message_id);
|
||||
|
||||
-- Table des blocages utilisateurs (si elle n'existe pas)
|
||||
CREATE TABLE IF NOT EXISTS user_blocks (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
blocker_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
blocked_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
reason VARCHAR(500),
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
UNIQUE (blocker_id, blocked_id),
|
||||
CONSTRAINT no_self_block CHECK (blocker_id != blocked_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_blocks_blocker ON user_blocks (blocker_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_blocks_blocked ON user_blocks (blocked_id);
|
||||
|
||||
-- Table des logs de modération (si elle n'existe pas)
|
||||
CREATE TABLE IF NOT EXISTS moderation_log (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
moderator_id INTEGER REFERENCES users(id) ON DELETE SET NULL,
|
||||
target_type VARCHAR(20) NOT NULL CHECK (target_type IN ('user', 'message', 'room')),
|
||||
target_id TEXT NOT NULL,
|
||||
action VARCHAR(50) NOT NULL,
|
||||
|
||||
reason TEXT,
|
||||
details JSONB DEFAULT '{}'::jsonb,
|
||||
duration INTERVAL,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
ip_address INET
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_moderation_log_moderator ON moderation_log (moderator_id, created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_moderation_log_target ON moderation_log (target_type, target_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_moderation_log_action ON moderation_log (action, created_at);
|
||||
|
||||
-- ================================================
|
||||
-- VUES CORRIGÉES AVEC LES BONS NOMS DE COLONNES
|
||||
-- ================================================
|
||||
|
||||
-- Vue des statistiques serveur (corrigée)
|
||||
DROP VIEW IF EXISTS server_stats;
|
||||
CREATE OR REPLACE VIEW server_stats AS
|
||||
SELECT
|
||||
'total_users'::text as metric,
|
||||
COUNT(*)::bigint as value
|
||||
FROM users
|
||||
WHERE id IS NOT NULL
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
'active_users'::text as metric,
|
||||
COUNT(*)::bigint as value
|
||||
FROM users
|
||||
WHERE last_seen > NOW() - INTERVAL '1 hour'
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
'total_rooms'::text as metric,
|
||||
COUNT(*)::bigint as value
|
||||
FROM rooms
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
'total_messages'::text as metric,
|
||||
COUNT(*)::bigint as value
|
||||
FROM messages
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
'messages_today'::text as metric,
|
||||
COUNT(*)::bigint as value
|
||||
FROM messages
|
||||
WHERE created_at >= CURRENT_DATE;
|
||||
|
||||
-- ================================================
|
||||
-- FONCTIONS UTILITAIRES CORRIGÉES
|
||||
-- ================================================
|
||||
|
||||
-- Fonction pour calculer la réputation (corrigée)
|
||||
CREATE OR REPLACE FUNCTION calculate_user_reputation(user_id_param INTEGER)
|
||||
RETURNS INTEGER AS $$
|
||||
DECLARE
|
||||
base_score INTEGER := 100;
|
||||
warnings INTEGER := 0;
|
||||
bans INTEGER := 0;
|
||||
recent_messages INTEGER := 0;
|
||||
BEGIN
|
||||
-- Compter les avertissements
|
||||
SELECT COALESCE(warning_count, 0) INTO warnings
|
||||
FROM users WHERE id = user_id_param;
|
||||
|
||||
-- Compter les messages récents (bonus)
|
||||
SELECT COUNT(*) INTO recent_messages
|
||||
FROM messages
|
||||
WHERE from_user = user_id_param
|
||||
AND created_at > NOW() - INTERVAL '30 days';
|
||||
|
||||
-- Calculer le score final
|
||||
RETURN GREATEST(0, base_score - (warnings * 5) + (recent_messages / 10));
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Fonction de nettoyage des données anciennes (corrigée)
|
||||
CREATE OR REPLACE FUNCTION cleanup_old_data()
|
||||
RETURNS void AS $$
|
||||
BEGIN
|
||||
-- Supprimer les sessions expirées anciennes
|
||||
DELETE FROM user_sessions
|
||||
WHERE created_at < NOW() - INTERVAL '30 days';
|
||||
|
||||
-- Marquer les anciens messages comme archivés (soft delete)
|
||||
UPDATE messages
|
||||
SET status = 'archived'
|
||||
WHERE created_at < NOW() - INTERVAL '1 year'
|
||||
AND status = 'sent';
|
||||
|
||||
-- Nettoyer les logs de modération anciens
|
||||
DELETE FROM moderation_log
|
||||
WHERE created_at < NOW() - INTERVAL '6 months';
|
||||
|
||||
RAISE NOTICE 'Nettoyage des données anciennes terminé';
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- ================================================
|
||||
-- TRIGGERS POUR MAINTENIR LA COHÉRENCE
|
||||
-- ================================================
|
||||
|
||||
-- Trigger pour mettre à jour last_seen automatiquement
|
||||
CREATE OR REPLACE FUNCTION update_user_last_seen()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
UPDATE users SET last_seen = NOW() WHERE id = NEW.from_user;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
DROP TRIGGER IF EXISTS trigger_update_last_seen ON messages;
|
||||
CREATE TRIGGER trigger_update_last_seen
|
||||
AFTER INSERT ON messages
|
||||
FOR EACH ROW EXECUTE FUNCTION update_user_last_seen();
|
||||
|
||||
-- Trigger pour compter les threads
|
||||
CREATE OR REPLACE FUNCTION update_thread_count()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.reply_to_id IS NOT NULL THEN
|
||||
UPDATE messages
|
||||
SET thread_count = thread_count + 1
|
||||
WHERE id = NEW.reply_to_id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
DROP TRIGGER IF EXISTS trigger_thread_count ON messages;
|
||||
CREATE TRIGGER trigger_thread_count
|
||||
AFTER INSERT ON messages
|
||||
FOR EACH ROW EXECUTE FUNCTION update_thread_count();
|
||||
|
||||
-- ================================================
|
||||
-- CONTRAINTES DE SÉCURITÉ SUPPLÉMENTAIRES
|
||||
-- ================================================
|
||||
|
||||
-- Limiter la longueur des messages épinglés
|
||||
ALTER TABLE messages ADD CONSTRAINT chk_pinned_content_reasonable
|
||||
CHECK (NOT is_pinned OR LENGTH(content) <= 500);
|
||||
|
||||
-- Limiter le nombre de réactions par utilisateur et message
|
||||
ALTER TABLE message_reactions ADD CONSTRAINT chk_emoji_reasonable
|
||||
CHECK (LENGTH(emoji) <= 10);
|
||||
|
||||
-- Vérifier que les statuts utilisateur sont valides
|
||||
ALTER TABLE users ADD CONSTRAINT chk_user_status_valid
|
||||
CHECK (status IN ('online', 'away', 'busy', 'invisible', 'offline'));
|
||||
|
||||
-- ================================================
|
||||
-- DONNÉES DE TEST ET INITIALISATION
|
||||
-- ================================================
|
||||
|
||||
-- Mettre à jour les données existantes pour la compatibilité
|
||||
UPDATE messages SET status = 'sent' WHERE status IS NULL;
|
||||
UPDATE users SET reputation_score = 100 WHERE reputation_score IS NULL;
|
||||
UPDATE users SET status = 'offline' WHERE status IS NULL;
|
||||
|
||||
-- Créer un salon général s'il n'existe pas
|
||||
INSERT INTO rooms (name, is_private, description, creator_id)
|
||||
SELECT 'général', false, 'Salon de discussion générale',
|
||||
(SELECT id FROM users ORDER BY id LIMIT 1)
|
||||
WHERE NOT EXISTS (SELECT 1 FROM rooms WHERE name = 'général');
|
||||
|
||||
-- ================================================
|
||||
-- COMMENTAIRES POUR DOCUMENTATION
|
||||
-- ================================================
|
||||
|
||||
COMMENT ON TABLE message_mentions IS 'Mentions d''utilisateurs dans les messages';
|
||||
COMMENT ON TABLE user_blocks IS 'Blocages entre utilisateurs pour empêcher les DM';
|
||||
COMMENT ON TABLE moderation_log IS 'Journal des actions de modération';
|
||||
COMMENT ON VIEW server_stats IS 'Statistiques temps réel du serveur';
|
||||
|
||||
-- ================================================
|
||||
-- PERMISSIONS ET SÉCURITÉ
|
||||
-- ================================================
|
||||
|
||||
-- Accorder les permissions nécessaires à l'utilisateur veza
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO veza;
|
||||
GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO veza;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO veza;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- ================================================
|
||||
-- VÉRIFICATIONS POST-MIGRATION
|
||||
-- ================================================
|
||||
|
||||
-- Vérifier que les colonnes essentielles existent
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Vérifier messages.created_at
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'messages' AND column_name = 'created_at') THEN
|
||||
RAISE EXCEPTION 'Colonne messages.created_at manquante après migration';
|
||||
END IF;
|
||||
|
||||
-- Vérifier rooms.creator_id
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'rooms' AND column_name = 'creator_id') THEN
|
||||
RAISE EXCEPTION 'Colonne rooms.creator_id manquante après migration';
|
||||
END IF;
|
||||
|
||||
RAISE NOTICE 'Vérifications post-migration réussies';
|
||||
END $$;
|
||||
|
|
@ -1,386 +0,0 @@
|
|||
-- Migration de nettoyage et préparation pour production
|
||||
-- Cette migration supprime toutes les tables redondantes et optimise la base
|
||||
-- ⚠️ ATTENTION: Cette migration est destructive, assurez-vous d'avoir une sauvegarde
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 1: MIGRATION DES DONNÉES EXISTANTES
|
||||
-- ================================================================
|
||||
|
||||
-- Sauvegarde temporaire des données importantes
|
||||
CREATE TEMP TABLE temp_old_users AS
|
||||
SELECT id, username, email, created_at
|
||||
FROM users
|
||||
WHERE EXISTS (SELECT 1 FROM users);
|
||||
|
||||
CREATE TEMP TABLE temp_old_messages AS
|
||||
SELECT id, from_user, to_user, room, content, created_at, message_type
|
||||
FROM messages
|
||||
WHERE EXISTS (SELECT 1 FROM messages);
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 2: SUPPRESSION DES TABLES REDONDANTES
|
||||
-- ================================================================
|
||||
|
||||
-- Supprimer toutes les tables dupliquées (_enhanced, _secure, etc.)
|
||||
DROP TABLE IF EXISTS users_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS users_backup CASCADE;
|
||||
DROP TABLE IF EXISTS rooms_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS messages_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS message_mentions_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS message_mentions_secure CASCADE;
|
||||
DROP TABLE IF EXISTS message_reactions_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS room_members_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS user_sessions_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS user_sessions_secure CASCADE;
|
||||
DROP TABLE IF EXISTS user_blocks_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS user_blocks_secure CASCADE;
|
||||
DROP TABLE IF EXISTS security_events_enhanced CASCADE;
|
||||
DROP TABLE IF EXISTS security_events_secure CASCADE;
|
||||
|
||||
-- Supprimer les anciennes tables métier mal conçues
|
||||
DROP TABLE IF EXISTS offers CASCADE;
|
||||
DROP TABLE IF EXISTS listings CASCADE;
|
||||
DROP TABLE IF EXISTS categories CASCADE;
|
||||
DROP TABLE IF EXISTS products CASCADE;
|
||||
DROP TABLE IF EXISTS user_products CASCADE;
|
||||
DROP TABLE IF EXISTS internal_documents CASCADE;
|
||||
DROP TABLE IF EXISTS shared_ressources CASCADE;
|
||||
DROP TABLE IF EXISTS shared_ressource_tags CASCADE;
|
||||
DROP TABLE IF EXISTS ressource_tags CASCADE;
|
||||
DROP TABLE IF EXISTS tracks CASCADE;
|
||||
DROP TABLE IF EXISTS sanctions CASCADE; -- Remplacée par moderation_actions
|
||||
DROP TABLE IF EXISTS refresh_tokens CASCADE; -- Intégré dans user_sessions
|
||||
|
||||
-- Supprimer les anciennes tables de base
|
||||
DROP TABLE IF EXISTS rooms CASCADE;
|
||||
DROP TABLE IF EXISTS room_members CASCADE;
|
||||
DROP TABLE IF EXISTS user_sessions CASCADE;
|
||||
DROP TABLE IF EXISTS user_blocks CASCADE;
|
||||
DROP TABLE IF EXISTS files CASCADE; -- Sera recréée avec la nouvelle structure
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 3: NETTOYAGE DES FONCTIONS OBSOLÈTES
|
||||
-- ================================================================
|
||||
|
||||
DROP FUNCTION IF EXISTS cleanup_expired_sessions_secure();
|
||||
DROP FUNCTION IF EXISTS cleanup_old_audit_logs();
|
||||
DROP FUNCTION IF EXISTS cleanup_old_data_secure();
|
||||
DROP FUNCTION IF EXISTS handle_mentions_secure();
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 4: APPLICATIONS DES NOUVELLES CONTRAINTES
|
||||
-- ================================================================
|
||||
|
||||
-- Mise à jour de la table users existante avec nouvelles contraintes
|
||||
ALTER TABLE users DROP CONSTRAINT IF EXISTS users_username_key;
|
||||
ALTER TABLE users DROP CONSTRAINT IF EXISTS users_email_key;
|
||||
|
||||
-- Ajouter UUID si pas déjà présent
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'users' AND column_name = 'uuid') THEN
|
||||
ALTER TABLE users ADD COLUMN uuid UUID DEFAULT uuid_generate_v4();
|
||||
ALTER TABLE users ADD CONSTRAINT users_uuid_unique UNIQUE (uuid);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Ajouter les nouvelles colonnes de sécurité
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Colonnes de sécurité 2FA
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'users' AND column_name = 'two_factor_enabled') THEN
|
||||
ALTER TABLE users ADD COLUMN two_factor_enabled BOOLEAN DEFAULT FALSE;
|
||||
ALTER TABLE users ADD COLUMN two_factor_secret VARCHAR(32);
|
||||
ALTER TABLE users ADD COLUMN password_reset_token VARCHAR(100);
|
||||
ALTER TABLE users ADD COLUMN password_reset_expires TIMESTAMPTZ;
|
||||
ALTER TABLE users ADD COLUMN email_verification_token VARCHAR(100);
|
||||
END IF;
|
||||
|
||||
-- Colonnes de profil
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'users' AND column_name = 'display_name') THEN
|
||||
ALTER TABLE users ADD COLUMN display_name VARCHAR(100);
|
||||
ALTER TABLE users ADD COLUMN avatar_url TEXT;
|
||||
ALTER TABLE users ADD COLUMN bio TEXT CHECK (LENGTH(bio) <= 500);
|
||||
END IF;
|
||||
|
||||
-- Colonnes de métadonnées
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'users' AND column_name = 'last_login') THEN
|
||||
ALTER TABLE users ADD COLUMN last_login TIMESTAMPTZ;
|
||||
ALTER TABLE users ADD COLUMN last_activity TIMESTAMPTZ DEFAULT NOW();
|
||||
ALTER TABLE users ADD COLUMN updated_at TIMESTAMPTZ DEFAULT NOW();
|
||||
END IF;
|
||||
|
||||
-- Colonnes de permissions
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'users' AND column_name = 'is_verified') THEN
|
||||
ALTER TABLE users ADD COLUMN is_verified BOOLEAN DEFAULT FALSE;
|
||||
ALTER TABLE users ADD COLUMN is_active BOOLEAN DEFAULT TRUE;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Mise à jour du type de rôle si existant
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'users' AND column_name = 'role') THEN
|
||||
-- Convertir l'ancien système de rôles
|
||||
UPDATE users SET role = 'user' WHERE role IS NULL OR role = '';
|
||||
ELSE
|
||||
ALTER TABLE users ADD COLUMN role user_role DEFAULT 'user' NOT NULL;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 5: OPTIMISATION DE LA TABLE MESSAGES
|
||||
-- ================================================================
|
||||
|
||||
-- Ajouter UUID aux messages si pas présent
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'messages' AND column_name = 'uuid') THEN
|
||||
ALTER TABLE messages ADD COLUMN uuid UUID DEFAULT uuid_generate_v4();
|
||||
ALTER TABLE messages ADD CONSTRAINT messages_uuid_unique UNIQUE (uuid);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Renommer les colonnes pour cohérence
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Renommer from_user en author_id
|
||||
IF EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'messages' AND column_name = 'from_user') THEN
|
||||
ALTER TABLE messages RENAME COLUMN from_user TO author_id;
|
||||
END IF;
|
||||
|
||||
-- Ajouter conversation_id basé sur room/to_user
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'messages' AND column_name = 'conversation_id') THEN
|
||||
ALTER TABLE messages ADD COLUMN conversation_id BIGINT;
|
||||
|
||||
-- Mise à jour temporaire: créer un ID de conversation basé sur room ou DM
|
||||
UPDATE messages SET conversation_id = CASE
|
||||
WHEN room IS NOT NULL THEN (
|
||||
SELECT id FROM conversations
|
||||
WHERE type = 'public_room' AND name = room
|
||||
LIMIT 1
|
||||
)
|
||||
WHEN to_user IS NOT NULL THEN (
|
||||
SELECT id FROM conversations
|
||||
WHERE type = 'direct_message'
|
||||
AND (
|
||||
(owner_id = author_id AND id IN (
|
||||
SELECT conversation_id FROM conversation_members
|
||||
WHERE user_id = to_user
|
||||
)) OR
|
||||
(owner_id = to_user AND id IN (
|
||||
SELECT conversation_id FROM conversation_members
|
||||
WHERE user_id = author_id
|
||||
))
|
||||
)
|
||||
LIMIT 1
|
||||
)
|
||||
ELSE 1 -- Conversation par défaut
|
||||
END;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Ajouter les nouvelles colonnes pour fonctionnalités avancées
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'messages' AND column_name = 'parent_message_id') THEN
|
||||
ALTER TABLE messages ADD COLUMN parent_message_id BIGINT REFERENCES messages(id) ON DELETE SET NULL;
|
||||
ALTER TABLE messages ADD COLUMN thread_count INTEGER DEFAULT 0;
|
||||
ALTER TABLE messages ADD COLUMN status message_status DEFAULT 'sent' NOT NULL;
|
||||
ALTER TABLE messages ADD COLUMN is_edited BOOLEAN DEFAULT FALSE;
|
||||
ALTER TABLE messages ADD COLUMN edit_count INTEGER DEFAULT 0;
|
||||
ALTER TABLE messages ADD COLUMN metadata JSONB DEFAULT '{}';
|
||||
ALTER TABLE messages ADD COLUMN updated_at TIMESTAMPTZ DEFAULT NOW();
|
||||
ALTER TABLE messages ADD COLUMN edited_at TIMESTAMPTZ;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 6: CRÉATION DES INDEX OPTIMISÉS
|
||||
-- ================================================================
|
||||
|
||||
-- Index pour users
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_username_active
|
||||
ON users(username) WHERE is_active = TRUE;
|
||||
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_email_verified
|
||||
ON users(email) WHERE is_verified = TRUE;
|
||||
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_last_activity
|
||||
ON users(last_activity DESC) WHERE is_active = TRUE;
|
||||
|
||||
-- Index pour conversations
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_conversations_type_public
|
||||
ON conversations(type) WHERE is_public = TRUE;
|
||||
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_conversations_owner_active
|
||||
ON conversations(owner_id) WHERE NOT is_archived;
|
||||
|
||||
-- Index pour messages (performance critique)
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_messages_conversation_time
|
||||
ON messages(conversation_id, created_at DESC);
|
||||
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_messages_author_time
|
||||
ON messages(author_id, created_at DESC);
|
||||
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_messages_threads
|
||||
ON messages(parent_message_id, created_at) WHERE parent_message_id IS NOT NULL;
|
||||
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_messages_pinned
|
||||
ON messages(conversation_id) WHERE is_pinned = TRUE;
|
||||
|
||||
-- Index pour recherche full-text
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_messages_content_search
|
||||
ON messages USING gin(to_tsvector('french', content));
|
||||
|
||||
-- Index pour réactions
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_reactions_message
|
||||
ON message_reactions(message_id, emoji);
|
||||
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_reactions_user
|
||||
ON message_reactions(user_id, created_at DESC);
|
||||
|
||||
-- Index pour mentions
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_mentions_user_unread
|
||||
ON message_mentions(mentioned_user_id) WHERE is_read = FALSE;
|
||||
|
||||
-- Index pour audit et sécurité
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_audit_user_action
|
||||
ON audit_logs(user_id, action, created_at DESC);
|
||||
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_security_severity_time
|
||||
ON security_events(severity, created_at DESC);
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 7: STATISTIQUES ET MAINTENANCE
|
||||
-- ================================================================
|
||||
|
||||
-- Mettre à jour les statistiques des tables
|
||||
ANALYZE users;
|
||||
ANALYZE conversations;
|
||||
ANALYZE messages;
|
||||
ANALYZE message_reactions;
|
||||
ANALYZE message_mentions;
|
||||
ANALYZE audit_logs;
|
||||
ANALYZE security_events;
|
||||
|
||||
-- Nettoyer l'espace inutilisé
|
||||
VACUUM (ANALYZE, FREEZE) users;
|
||||
VACUUM (ANALYZE, FREEZE) conversations;
|
||||
VACUUM (ANALYZE, FREEZE) messages;
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 8: CONFIGURATION FINALE DE SÉCURITÉ
|
||||
-- ================================================================
|
||||
|
||||
-- Activer Row Level Security sur les nouvelles tables
|
||||
ALTER TABLE messages ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE user_sessions ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE message_mentions ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE conversation_members ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Créer des politiques RLS basiques
|
||||
CREATE POLICY messages_access_policy ON messages
|
||||
FOR ALL TO PUBLIC
|
||||
USING (
|
||||
author_id = current_user_id() OR
|
||||
conversation_id IN (
|
||||
SELECT conversation_id FROM conversation_members
|
||||
WHERE user_id = current_user_id()
|
||||
)
|
||||
);
|
||||
|
||||
CREATE POLICY sessions_owner_policy ON user_sessions
|
||||
FOR ALL TO PUBLIC
|
||||
USING (user_id = current_user_id());
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 9: JOURNALISATION ET VALIDATION
|
||||
-- ================================================================
|
||||
|
||||
-- Insérer un événement d'audit pour la migration
|
||||
INSERT INTO audit_logs (action, details, created_at)
|
||||
VALUES (
|
||||
'system_migration',
|
||||
jsonb_build_object(
|
||||
'migration', 'cleanup_production_ready',
|
||||
'version', '0.2.0',
|
||||
'tables_dropped', ARRAY[
|
||||
'users_enhanced', 'messages_enhanced', 'rooms_enhanced',
|
||||
'security_events_enhanced', 'user_sessions_secure'
|
||||
],
|
||||
'optimization', 'indexes_created_and_statistics_updated'
|
||||
),
|
||||
NOW()
|
||||
);
|
||||
|
||||
-- Vérification de l'intégrité des données
|
||||
DO $$
|
||||
DECLARE
|
||||
user_count INTEGER;
|
||||
message_count INTEGER;
|
||||
conversation_count INTEGER;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO user_count FROM users;
|
||||
SELECT COUNT(*) INTO message_count FROM messages;
|
||||
SELECT COUNT(*) INTO conversation_count FROM conversations;
|
||||
|
||||
RAISE NOTICE 'Migration terminée avec succès:';
|
||||
RAISE NOTICE '- Utilisateurs: %', user_count;
|
||||
RAISE NOTICE '- Messages: %', message_count;
|
||||
RAISE NOTICE '- Conversations: %', conversation_count;
|
||||
|
||||
-- Validation basique
|
||||
IF user_count = 0 THEN
|
||||
RAISE WARNING 'Aucun utilisateur trouvé après migration';
|
||||
END IF;
|
||||
|
||||
IF message_count > 0 AND conversation_count = 0 THEN
|
||||
RAISE WARNING 'Messages présents mais aucune conversation';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ================================================================
|
||||
-- ÉTAPE 10: COMMENTAIRES ET DOCUMENTATION
|
||||
-- ================================================================
|
||||
|
||||
COMMENT ON TABLE users IS 'Table utilisateurs unifiée - Production Ready v0.2.0';
|
||||
COMMENT ON TABLE conversations IS 'Conversations unifiées (DM + Rooms) avec types stricts';
|
||||
COMMENT ON TABLE messages IS 'Messages avec support threads, épinglage et métadonnées';
|
||||
COMMENT ON TABLE message_reactions IS 'Réactions emoji avec contraintes unicité';
|
||||
COMMENT ON TABLE message_mentions IS 'Mentions @utilisateur avec notifications';
|
||||
COMMENT ON TABLE message_history IS 'Historique des modifications de messages';
|
||||
COMMENT ON TABLE files IS 'Fichiers uploadés avec validation de sécurité';
|
||||
COMMENT ON TABLE audit_logs IS 'Audit trail complet de toutes les actions';
|
||||
COMMENT ON TABLE security_events IS 'Journal des événements de sécurité';
|
||||
COMMENT ON TABLE moderation_actions IS 'Actions de modération avec appeals';
|
||||
|
||||
-- ================================================================
|
||||
-- FIN DE LA MIGRATION
|
||||
-- ================================================================
|
||||
|
||||
RAISE NOTICE '🎉 Migration de nettoyage terminée avec succès!';
|
||||
RAISE NOTICE '📊 Base de données optimisée pour la production';
|
||||
RAISE NOTICE '🔒 Sécurité renforcée avec RLS activée';
|
||||
RAISE NOTICE '⚡ Index de performance créés';
|
||||
RAISE NOTICE '🧹 Tables redondantes supprimées';
|
||||
|
||||
-- Nettoyer les tables temporaires
|
||||
DROP TABLE IF EXISTS temp_old_users;
|
||||
DROP TABLE IF EXISTS temp_old_messages;
|
||||
|
||||
-- Optimisation finale
|
||||
VACUUM FULL;
|
||||
|
||||
-- Mettre à jour les statistiques
|
||||
ANALYZE;
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
{
|
||||
"name": "veza-chat-server",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "cargo build --verbose",
|
||||
"test": "cargo test --verbose",
|
||||
"lint": "cargo fmt --all -- --check"
|
||||
}
|
||||
}
|
||||
|
|
@ -1,320 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package veza.chat;
|
||||
|
||||
option go_package = "veza-backend-api/proto/chat";
|
||||
|
||||
import "common/auth.proto";
|
||||
|
||||
// Service Chat pour communication avec le module Rust
|
||||
service ChatService {
|
||||
// Gestion des salles
|
||||
rpc CreateRoom(CreateRoomRequest) returns (CreateRoomResponse);
|
||||
rpc JoinRoom(JoinRoomRequest) returns (JoinRoomResponse);
|
||||
rpc LeaveRoom(LeaveRoomRequest) returns (LeaveRoomResponse);
|
||||
rpc GetRoomInfo(GetRoomInfoRequest) returns (Room);
|
||||
rpc ListRooms(ListRoomsRequest) returns (ListRoomsResponse);
|
||||
|
||||
// Gestion des messages
|
||||
rpc SendMessage(SendMessageRequest) returns (SendMessageResponse);
|
||||
rpc GetMessageHistory(GetMessageHistoryRequest) returns (GetMessageHistoryResponse);
|
||||
rpc DeleteMessage(DeleteMessageRequest) returns (DeleteMessageResponse);
|
||||
|
||||
// Messages directs
|
||||
rpc SendDirectMessage(SendDirectMessageRequest) returns (SendDirectMessageResponse);
|
||||
rpc GetDirectMessages(GetDirectMessagesRequest) returns (GetDirectMessagesResponse);
|
||||
|
||||
// Modération
|
||||
rpc MuteUser(MuteUserRequest) returns (MuteUserResponse);
|
||||
rpc BanUser(BanUserRequest) returns (BanUserResponse);
|
||||
rpc ModerateMessage(ModerateMessageRequest) returns (ModerateMessageResponse);
|
||||
|
||||
// Statistiques temps réel
|
||||
rpc GetRoomStats(GetRoomStatsRequest) returns (RoomStats);
|
||||
rpc GetUserActivity(GetUserActivityRequest) returns (UserActivity);
|
||||
}
|
||||
|
||||
// Messages pour les salles
|
||||
message CreateRoomRequest {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
RoomType type = 3;
|
||||
RoomVisibility visibility = 4;
|
||||
int64 created_by = 5;
|
||||
string auth_token = 6;
|
||||
}
|
||||
|
||||
message CreateRoomResponse {
|
||||
Room room = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
message JoinRoomRequest {
|
||||
string room_id = 1;
|
||||
int64 user_id = 2;
|
||||
string auth_token = 3;
|
||||
}
|
||||
|
||||
message JoinRoomResponse {
|
||||
bool success = 1;
|
||||
RoomMember member = 2;
|
||||
string error = 3;
|
||||
}
|
||||
|
||||
message LeaveRoomRequest {
|
||||
string room_id = 1;
|
||||
int64 user_id = 2;
|
||||
string auth_token = 3;
|
||||
}
|
||||
|
||||
message LeaveRoomResponse {
|
||||
bool success = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
message GetRoomInfoRequest {
|
||||
string room_id = 1;
|
||||
string auth_token = 2;
|
||||
}
|
||||
|
||||
message ListRoomsRequest {
|
||||
RoomVisibility visibility = 1;
|
||||
int32 page = 2;
|
||||
int32 limit = 3;
|
||||
string auth_token = 4;
|
||||
}
|
||||
|
||||
message ListRoomsResponse {
|
||||
repeated Room rooms = 1;
|
||||
int32 total = 2;
|
||||
string error = 3;
|
||||
}
|
||||
|
||||
// Messages pour les messages
|
||||
message SendMessageRequest {
|
||||
string room_id = 1;
|
||||
int64 sender_id = 2;
|
||||
string content = 3;
|
||||
MessageType type = 4;
|
||||
string auth_token = 5;
|
||||
string reply_to = 6; // ID du message parent
|
||||
}
|
||||
|
||||
message SendMessageResponse {
|
||||
Message message = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
message GetMessageHistoryRequest {
|
||||
string room_id = 1;
|
||||
int32 limit = 2;
|
||||
string before_id = 3; // pagination
|
||||
string auth_token = 4;
|
||||
}
|
||||
|
||||
message GetMessageHistoryResponse {
|
||||
repeated Message messages = 1;
|
||||
bool has_more = 2;
|
||||
string error = 3;
|
||||
}
|
||||
|
||||
message DeleteMessageRequest {
|
||||
string message_id = 1;
|
||||
int64 user_id = 2;
|
||||
string auth_token = 3;
|
||||
}
|
||||
|
||||
message DeleteMessageResponse {
|
||||
bool success = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
// Messages directs
|
||||
message SendDirectMessageRequest {
|
||||
int64 sender_id = 1;
|
||||
int64 recipient_id = 2;
|
||||
string content = 3;
|
||||
MessageType type = 4;
|
||||
string auth_token = 5;
|
||||
}
|
||||
|
||||
message SendDirectMessageResponse {
|
||||
DirectMessage message = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
message GetDirectMessagesRequest {
|
||||
int64 user_id = 1;
|
||||
int64 other_user_id = 2;
|
||||
int32 limit = 3;
|
||||
string before_id = 4;
|
||||
string auth_token = 5;
|
||||
}
|
||||
|
||||
message GetDirectMessagesResponse {
|
||||
repeated DirectMessage messages = 1;
|
||||
bool has_more = 2;
|
||||
string error = 3;
|
||||
}
|
||||
|
||||
// Modération
|
||||
message MuteUserRequest {
|
||||
string room_id = 1;
|
||||
int64 user_id = 2;
|
||||
int64 moderator_id = 3;
|
||||
int64 duration_seconds = 4;
|
||||
string reason = 5;
|
||||
string auth_token = 6;
|
||||
}
|
||||
|
||||
message MuteUserResponse {
|
||||
bool success = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
message BanUserRequest {
|
||||
string room_id = 1;
|
||||
int64 user_id = 2;
|
||||
int64 moderator_id = 3;
|
||||
string reason = 4;
|
||||
string auth_token = 5;
|
||||
}
|
||||
|
||||
message BanUserResponse {
|
||||
bool success = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
message ModerateMessageRequest {
|
||||
string message_id = 1;
|
||||
int64 moderator_id = 2;
|
||||
ModerationAction action = 3;
|
||||
string reason = 4;
|
||||
string auth_token = 5;
|
||||
}
|
||||
|
||||
message ModerateMessageResponse {
|
||||
bool success = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
// Statistiques
|
||||
message GetRoomStatsRequest {
|
||||
string room_id = 1;
|
||||
string auth_token = 2;
|
||||
}
|
||||
|
||||
message GetUserActivityRequest {
|
||||
int64 user_id = 1;
|
||||
string auth_token = 2;
|
||||
}
|
||||
|
||||
// Types de données
|
||||
message Room {
|
||||
string id = 1;
|
||||
string name = 2;
|
||||
string description = 3;
|
||||
RoomType type = 4;
|
||||
RoomVisibility visibility = 5;
|
||||
int64 created_by = 6;
|
||||
int64 created_at = 7;
|
||||
int32 member_count = 8;
|
||||
int32 online_count = 9;
|
||||
bool is_active = 10;
|
||||
}
|
||||
|
||||
message RoomMember {
|
||||
int64 user_id = 1;
|
||||
string username = 2;
|
||||
RoomRole role = 3;
|
||||
int64 joined_at = 4;
|
||||
bool is_online = 5;
|
||||
int64 last_seen = 6;
|
||||
}
|
||||
|
||||
message Message {
|
||||
string id = 1;
|
||||
string room_id = 2;
|
||||
int64 sender_id = 3;
|
||||
string sender_username = 4;
|
||||
string content = 5;
|
||||
MessageType type = 6;
|
||||
int64 created_at = 7;
|
||||
int64 updated_at = 8;
|
||||
bool is_edited = 9;
|
||||
bool is_deleted = 10;
|
||||
string reply_to = 11;
|
||||
repeated MessageReaction reactions = 12;
|
||||
}
|
||||
|
||||
message DirectMessage {
|
||||
string id = 1;
|
||||
int64 sender_id = 2;
|
||||
int64 recipient_id = 3;
|
||||
string content = 4;
|
||||
MessageType type = 5;
|
||||
int64 created_at = 6;
|
||||
bool is_read = 7;
|
||||
bool is_deleted = 8;
|
||||
}
|
||||
|
||||
message MessageReaction {
|
||||
string emoji = 1;
|
||||
repeated int64 user_ids = 2;
|
||||
int32 count = 3;
|
||||
}
|
||||
|
||||
message RoomStats {
|
||||
string room_id = 1;
|
||||
int32 total_members = 2;
|
||||
int32 online_members = 3;
|
||||
int32 messages_today = 4;
|
||||
int32 total_messages = 5;
|
||||
repeated int64 active_users = 6;
|
||||
}
|
||||
|
||||
message UserActivity {
|
||||
int64 user_id = 1;
|
||||
int32 rooms_joined = 2;
|
||||
int32 messages_sent = 3;
|
||||
int64 last_activity = 4;
|
||||
bool is_online = 5;
|
||||
string current_status = 6;
|
||||
}
|
||||
|
||||
// Énumérations
|
||||
enum RoomType {
|
||||
PUBLIC = 0;
|
||||
PRIVATE = 1;
|
||||
DIRECT = 2;
|
||||
PREMIUM = 3;
|
||||
}
|
||||
|
||||
enum RoomVisibility {
|
||||
OPEN = 0;
|
||||
INVITE_ONLY = 1;
|
||||
HIDDEN = 2;
|
||||
}
|
||||
|
||||
enum RoomRole {
|
||||
MEMBER = 0;
|
||||
MODERATOR = 1;
|
||||
ADMIN = 2;
|
||||
OWNER = 3;
|
||||
}
|
||||
|
||||
enum MessageType {
|
||||
TEXT = 0;
|
||||
IMAGE = 1;
|
||||
FILE = 2;
|
||||
AUDIO = 3;
|
||||
VIDEO = 4;
|
||||
SYSTEM = 5;
|
||||
}
|
||||
|
||||
enum ModerationAction {
|
||||
WARN = 0;
|
||||
DELETE = 1;
|
||||
EDIT = 2;
|
||||
FLAG = 3;
|
||||
}
|
||||
|
|
@ -1,89 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package veza.common.auth;
|
||||
|
||||
option go_package = "veza-backend-api/proto/common/auth";
|
||||
|
||||
// Service d'authentification partagé
|
||||
service AuthService {
|
||||
// Valider un JWT token
|
||||
rpc ValidateToken(ValidateTokenRequest) returns (ValidateTokenResponse);
|
||||
|
||||
// Obtenir les informations utilisateur
|
||||
rpc GetUserInfo(GetUserInfoRequest) returns (GetUserInfoResponse);
|
||||
|
||||
// Vérifier les permissions
|
||||
rpc CheckPermissions(CheckPermissionsRequest) returns (CheckPermissionsResponse);
|
||||
|
||||
// Révoquer un token
|
||||
rpc RevokeToken(RevokeTokenRequest) returns (RevokeTokenResponse);
|
||||
}
|
||||
|
||||
// Messages de requête/réponse
|
||||
message ValidateTokenRequest {
|
||||
string token = 1;
|
||||
string service = 2; // service qui fait la demande (chat, stream)
|
||||
}
|
||||
|
||||
message ValidateTokenResponse {
|
||||
bool valid = 1;
|
||||
UserClaims user = 2;
|
||||
string error = 3;
|
||||
}
|
||||
|
||||
message GetUserInfoRequest {
|
||||
int64 user_id = 1;
|
||||
string token = 2;
|
||||
}
|
||||
|
||||
message GetUserInfoResponse {
|
||||
UserInfo user = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
message CheckPermissionsRequest {
|
||||
int64 user_id = 1;
|
||||
string resource = 2; // "chat.room", "stream.channel"
|
||||
string action = 3; // "read", "write", "moderate"
|
||||
string resource_id = 4;
|
||||
}
|
||||
|
||||
message CheckPermissionsResponse {
|
||||
bool allowed = 1;
|
||||
repeated string permissions = 2;
|
||||
string error = 3;
|
||||
}
|
||||
|
||||
message RevokeTokenRequest {
|
||||
string token = 1;
|
||||
string reason = 2;
|
||||
}
|
||||
|
||||
message RevokeTokenResponse {
|
||||
bool success = 1;
|
||||
string error = 2;
|
||||
}
|
||||
|
||||
// Types de données
|
||||
message UserClaims {
|
||||
int64 user_id = 1;
|
||||
string username = 2;
|
||||
string email = 3;
|
||||
string role = 4;
|
||||
bool is_active = 5;
|
||||
int64 issued_at = 6;
|
||||
int64 expires_at = 7;
|
||||
}
|
||||
|
||||
message UserInfo {
|
||||
int64 id = 1;
|
||||
string username = 2;
|
||||
string email = 3;
|
||||
string first_name = 4;
|
||||
string last_name = 5;
|
||||
string role = 6;
|
||||
bool is_active = 7;
|
||||
bool is_verified = 8;
|
||||
int64 created_at = 9;
|
||||
int64 last_login_at = 10;
|
||||
}
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Script pour réinitialiser la base de données lab (SCHEMA CHAT UNIQUEMENT)
|
||||
# ATTENTION: Supprime toutes les données du chat !
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
if [ -z "${VEZA_LAB_DSN:-}" ]; then
|
||||
echo -e "${RED}❌ VEZA_LAB_DSN n'est pas défini${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}⚠️ ATTENTION: Cette opération va supprimer toutes les données du schema 'chat' dans veza_lab${NC}"
|
||||
echo -e "${YELLOW}⚠️ Les données du backend (schema public) ne seront par touchées.${NC}"
|
||||
read -p "Continuer ? (yes/no): " confirm
|
||||
|
||||
if [ "$confirm" != "yes" ]; then
|
||||
echo "Annulé"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Construire l'URL avec le search_path pour sqlx
|
||||
if [[ "$VEZA_LAB_DSN" == *"?"* ]]; then
|
||||
export DATABASE_URL="${VEZA_LAB_DSN}&options=-c%20search_path=chat"
|
||||
else
|
||||
export DATABASE_URL="${VEZA_LAB_DSN}?options=-c%20search_path=chat"
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}🗑️ Reset du schema 'chat'...${NC}"
|
||||
psql "$VEZA_LAB_DSN" -c "DROP SCHEMA IF EXISTS chat CASCADE; CREATE SCHEMA chat;" 2>&1
|
||||
|
||||
echo -e "${BLUE}📦 Application des migrations...${NC}"
|
||||
# On force search_path=chat via l'URL
|
||||
sqlx migrate run --database-url "$DATABASE_URL"
|
||||
|
||||
echo -e "${GREEN}✅ Base de données chat réinitialisée${NC}"
|
||||
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Script de démarrage lab pour veza-chat-server
|
||||
# Utilise la vraie base de données PostgreSQL veza_lab avec schema dédié 'chat'
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Couleurs
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Vérifier que VEZA_LAB_DSN est défini
|
||||
if [ -z "${VEZA_LAB_DSN:-}" ]; then
|
||||
echo -e "${RED}❌ ERREUR: VEZA_LAB_DSN n'est pas défini${NC}"
|
||||
echo "Définissez-le avec:"
|
||||
echo " export VEZA_LAB_DSN='postgres://veza:veza_password@localhost:5432/veza_lab?sslmode=disable'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# S'assurer que le schema 'chat' existe
|
||||
echo -e "${BLUE}🔧 Vérification du schema 'chat'...${NC}"
|
||||
psql "$VEZA_LAB_DSN" -c "CREATE SCHEMA IF NOT EXISTS chat;" > /dev/null
|
||||
|
||||
# Configuration de l'environnement avec schema dédié
|
||||
# On ajoute options=-c search_path=chat pour forcer le schema par défaut
|
||||
if [[ "$VEZA_LAB_DSN" == *"?"* ]]; then
|
||||
export DATABASE_URL="${VEZA_LAB_DSN}&options=-c%20search_path=chat"
|
||||
else
|
||||
export DATABASE_URL="${VEZA_LAB_DSN}?options=-c%20search_path=chat"
|
||||
fi
|
||||
|
||||
export CHAT_SERVER_PORT="${CHAT_SERVER_PORT:-8081}"
|
||||
export CHAT_SERVER_HOST="${CHAT_SERVER_HOST:-0.0.0.0}"
|
||||
export RUST_LOG="${RUST_LOG:-info}"
|
||||
export RABBITMQ_ENABLE="${RABBITMQ_ENABLE:-false}"
|
||||
|
||||
# Vérifier que le binaire existe
|
||||
if [ ! -f "./target/release/chat-server" ]; then
|
||||
echo -e "${YELLOW}⚠️ Binaire non trouvé, compilation...${NC}"
|
||||
cargo build --release
|
||||
fi
|
||||
|
||||
# Générer JWT_SECRET si non défini
|
||||
if [ -z "${JWT_SECRET:-}" ]; then
|
||||
export JWT_SECRET=$(openssl rand -base64 32)
|
||||
echo -e "${YELLOW}⚠️ JWT_SECRET généré automatiquement${NC}"
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}🚀 Démarrage veza-chat-server (lab)${NC}"
|
||||
echo "=================================="
|
||||
echo -e "${GREEN}✅ DATABASE_URL: ${DATABASE_URL%%@*}@***${NC}"
|
||||
echo -e "${GREEN}✅ Schema: chat${NC}"
|
||||
echo -e "${GREEN}✅ Port: $CHAT_SERVER_PORT${NC}"
|
||||
echo -e "${GREEN}✅ Host: $CHAT_SERVER_HOST${NC}"
|
||||
echo -e "${GREEN}✅ RUST_LOG: $RUST_LOG${NC}"
|
||||
echo ""
|
||||
|
||||
# Démarrer le serveur
|
||||
exec ./target/release/chat-server
|
||||
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Script de test lab pour veza-chat-server
|
||||
# Utilise la vraie base de données PostgreSQL veza_lab
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Couleurs
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${BLUE}🧪 TEST LAB - veza-chat-server${NC}"
|
||||
echo "=================================="
|
||||
echo ""
|
||||
|
||||
# Vérifier que VEZA_LAB_DSN est défini
|
||||
if [ -z "${VEZA_LAB_DSN:-}" ]; then
|
||||
echo -e "${RED}❌ ERREUR: VEZA_LAB_DSN n'est pas défini${NC}"
|
||||
echo "Définissez-le avec:"
|
||||
echo " export VEZA_LAB_DSN='postgres://veza:veza_password@localhost:5432/veza_lab?sslmode=disable'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✅ VEZA_LAB_DSN est défini${NC}"
|
||||
echo ""
|
||||
|
||||
# Exporter DATABASE_URL pour sqlx et l'application
|
||||
if [[ "$VEZA_LAB_DSN" == *"?"* ]]; then
|
||||
export DATABASE_URL="${VEZA_LAB_DSN}&options=-c%20search_path=chat"
|
||||
else
|
||||
export DATABASE_URL="${VEZA_LAB_DSN}?options=-c%20search_path=chat"
|
||||
fi
|
||||
|
||||
# Vérifier la connexion à la base de données
|
||||
echo -e "${BLUE}🔍 Vérification de la connexion à la base de données...${NC}"
|
||||
# On vérifie aussi que le schema existe ou on le crée si besoin pour le test ?
|
||||
# Le script de reset le fait. Ici on suppose que l'environnement est prêt ou on le prépare.
|
||||
# Utilisons psql pour vérifier que le serveur répond.
|
||||
if psql "$VEZA_LAB_DSN" -c "SELECT 1;" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✅ Connexion PostgreSQL réussie${NC}"
|
||||
# S'assurer que le schema 'chat' existe pour les tests
|
||||
psql "$VEZA_LAB_DSN" -c "CREATE SCHEMA IF NOT EXISTS chat;" > /dev/null
|
||||
echo -e "${GREEN}✅ Schema 'chat' vérifié${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ Impossible de se connecter à la base de données${NC}"
|
||||
echo "Vérifiez que PostgreSQL est démarré et que la base veza_lab existe"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Appliquer les migrations
|
||||
echo -e "${BLUE}📦 Application des migrations (Schema: chat)...${NC}"
|
||||
if sqlx migrate run --database-url "$DATABASE_URL"; then
|
||||
echo -e "${GREEN}✅ Migrations appliquées avec succès${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ Erreur lors de l'application des migrations${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Vérifier que JWT_SECRET est défini
|
||||
if [ -z "${JWT_SECRET:-}" ]; then
|
||||
echo -e "${YELLOW}⚠️ JWT_SECRET n'est pas défini, génération d'un secret temporaire...${NC}"
|
||||
export JWT_SECRET=$(openssl rand -base64 32)
|
||||
echo -e "${GREEN}✅ JWT_SECRET généré: ${JWT_SECRET:0:20}...${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Configuration par défaut pour le lab
|
||||
export CHAT_SERVER_PORT="${CHAT_SERVER_PORT:-8081}"
|
||||
export CHAT_SERVER_HOST="${CHAT_SERVER_HOST:-0.0.0.0}"
|
||||
export RUST_LOG="${RUST_LOG:-info}"
|
||||
export RABBITMQ_ENABLE="${RABBITMQ_ENABLE:-false}"
|
||||
|
||||
echo -e "${BLUE}📋 Configuration:${NC}"
|
||||
echo " DATABASE_URL: ${DATABASE_URL%%@*}@***"
|
||||
echo " CHAT_SERVER_PORT: $CHAT_SERVER_PORT"
|
||||
echo " CHAT_SERVER_HOST: $CHAT_SERVER_HOST"
|
||||
echo " RUST_LOG: $RUST_LOG"
|
||||
echo " RABBITMQ_ENABLE: $RABBITMQ_ENABLE"
|
||||
echo ""
|
||||
|
||||
echo -e "${GREEN}✅ Environnement prêt pour le démarrage${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Pour démarrer le serveur:${NC}"
|
||||
echo " ./target/release/chat-server"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Ou avec make:${NC}"
|
||||
echo " make dev"
|
||||
echo ""
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
{}
|
||||
|
|
@ -1,864 +0,0 @@
|
|||
//! Module Advanced Moderation - Système de modération automatique 99.9% efficace
|
||||
//!
|
||||
//! Ce module implémente un système de modération ultra-avancé avec :
|
||||
//! - Détection de spam par ML (Machine Learning)
|
||||
//! - Analyse sémantique du contenu
|
||||
//! - Détection de patterns comportementaux
|
||||
//! - Classification automatique des violations
|
||||
//! - Sanctions adaptatives et progressives
|
||||
//! - Détection de fraude et d'abus
|
||||
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use serde::{Serialize, Deserialize};
|
||||
use regex::Regex;
|
||||
use dashmap::DashMap;
|
||||
use chrono::{DateTime, Utc, Timelike};
|
||||
|
||||
use crate::error::{ChatError, Result};
|
||||
use crate::monitoring::ChatMetrics;
|
||||
use crate::moderation::{SanctionType, SanctionReason};
|
||||
|
||||
/// Score de confiance pour la détection (0.0 à 1.0)
|
||||
pub type ConfidenceScore = f32;
|
||||
|
||||
/// Types de violations détectées
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum ViolationType {
|
||||
/// Spam (messages répétitifs, publicité)
|
||||
Spam { confidence: ConfidenceScore, pattern: String },
|
||||
/// Contenu toxique (insultes, harcèlement)
|
||||
Toxicity { confidence: ConfidenceScore, severity: ToxicitySeverity },
|
||||
/// Contenu inapproprié (NSFW, violence)
|
||||
Inappropriate { confidence: ConfidenceScore, category: String },
|
||||
/// Fraude (phishing, escroquerie)
|
||||
Fraud { confidence: ConfidenceScore, scheme_type: String },
|
||||
/// Abus (flood, raid)
|
||||
Abuse { confidence: ConfidenceScore, abuse_type: AbuseType },
|
||||
/// Comportement suspect (bot, activité anormale)
|
||||
Suspicious { confidence: ConfidenceScore, indicators: Vec<String> },
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum ToxicitySeverity {
|
||||
Low, // Léger
|
||||
Medium, // Modéré
|
||||
High, // Sévère
|
||||
Extreme // Extrême
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum AbuseType {
|
||||
MessageFlood,
|
||||
RoomRaid,
|
||||
UserHarassment,
|
||||
SystemAbuse,
|
||||
}
|
||||
|
||||
/// Profil comportemental d'un utilisateur
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct UserBehaviorProfile {
|
||||
pub user_id: i32,
|
||||
pub username: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub last_updated: DateTime<Utc>,
|
||||
|
||||
// Statistiques de base
|
||||
pub total_messages: u64,
|
||||
pub total_violations: u64,
|
||||
pub trust_score: f32, // 0.0 (suspect) à 1.0 (confiance totale)
|
||||
|
||||
// Patterns de comportement
|
||||
pub message_frequency: VecDeque<DateTime<Utc>>, // Fréquence des messages
|
||||
pub repeated_content: HashMap<String, u32>, // Contenu répété
|
||||
pub room_activity: HashMap<String, u32>, // Activité par salon
|
||||
pub warning_history: Vec<ViolationType>, // Historique des violations
|
||||
|
||||
// Métriques avancées
|
||||
pub avg_message_length: f32,
|
||||
pub unique_words_ratio: f32, // Ratio de mots uniques
|
||||
pub conversation_engagement: f32, // Engagement dans conversations
|
||||
pub off_topic_ratio: f32, // Ratio de messages hors sujet
|
||||
|
||||
// Détection de bot
|
||||
pub typing_speed: f32, // Vitesse de frappe (caractères/seconde)
|
||||
pub response_time_pattern: VecDeque<Duration>, // Pattern de temps de réponse
|
||||
pub human_like_errors: u32, // Erreurs humaines (typos, corrections)
|
||||
|
||||
// Timestamps suspects
|
||||
pub activity_hours: HashMap<u8, u32>, // Activité par heure (0-23)
|
||||
pub consecutive_days: u32, // Jours consécutifs d'activité
|
||||
}
|
||||
|
||||
impl UserBehaviorProfile {
|
||||
pub fn new(user_id: i32, username: String) -> Self {
|
||||
Self {
|
||||
user_id,
|
||||
username,
|
||||
created_at: Utc::now(),
|
||||
last_updated: Utc::now(),
|
||||
total_messages: 0,
|
||||
total_violations: 0,
|
||||
trust_score: 0.5, // Score neutre initial
|
||||
message_frequency: VecDeque::with_capacity(100),
|
||||
repeated_content: HashMap::new(),
|
||||
room_activity: HashMap::new(),
|
||||
warning_history: Vec::new(),
|
||||
avg_message_length: 0.0,
|
||||
unique_words_ratio: 0.0,
|
||||
conversation_engagement: 0.0,
|
||||
off_topic_ratio: 0.0,
|
||||
typing_speed: 0.0,
|
||||
response_time_pattern: VecDeque::with_capacity(50),
|
||||
human_like_errors: 0,
|
||||
activity_hours: HashMap::new(),
|
||||
consecutive_days: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Met à jour le profil avec un nouveau message
|
||||
pub fn update_with_message(&mut self, content: &str, room: &str, typing_duration: Option<Duration>) {
|
||||
self.total_messages += 1;
|
||||
self.last_updated = Utc::now();
|
||||
|
||||
// Fréquence des messages
|
||||
let now = Utc::now();
|
||||
self.message_frequency.push_back(now);
|
||||
if self.message_frequency.len() > 100 {
|
||||
self.message_frequency.pop_front();
|
||||
}
|
||||
|
||||
// Contenu répété
|
||||
let content_hash = self.normalize_content(content);
|
||||
*self.repeated_content.entry(content_hash).or_insert(0) += 1;
|
||||
|
||||
// Activité par salon
|
||||
*self.room_activity.entry(room.to_string()).or_insert(0) += 1;
|
||||
|
||||
// Longueur moyenne des messages
|
||||
let new_length = content.len() as f32;
|
||||
self.avg_message_length = (self.avg_message_length * (self.total_messages - 1) as f32 + new_length) / self.total_messages as f32;
|
||||
|
||||
// Ratio de mots uniques
|
||||
self.update_unique_words_ratio(content);
|
||||
|
||||
// Vitesse de frappe
|
||||
if let Some(duration) = typing_duration {
|
||||
self.typing_speed = content.len() as f32 / duration.as_secs_f32();
|
||||
}
|
||||
|
||||
// Activité par heure
|
||||
let hour = chrono::Utc::now().hour() as u8;
|
||||
*self.activity_hours.entry(hour).or_insert(0) += 1;
|
||||
|
||||
// Détecter les erreurs humaines
|
||||
if self.contains_human_errors(content) {
|
||||
self.human_like_errors += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalise le contenu pour détecter les répétitions
|
||||
fn normalize_content(&self, content: &str) -> String {
|
||||
content.to_lowercase()
|
||||
.chars()
|
||||
.filter(|c| c.is_alphanumeric() || c.is_whitespace())
|
||||
.collect::<String>()
|
||||
.split_whitespace()
|
||||
.collect::<Vec<&str>>()
|
||||
.join(" ")
|
||||
}
|
||||
|
||||
/// Met à jour le ratio de mots uniques
|
||||
fn update_unique_words_ratio(&mut self, content: &str) {
|
||||
let words: Vec<&str> = content.split_whitespace().collect();
|
||||
let unique_words: std::collections::HashSet<&str> = words.iter().cloned().collect();
|
||||
|
||||
if !words.is_empty() {
|
||||
let ratio = unique_words.len() as f32 / words.len() as f32;
|
||||
self.unique_words_ratio = (self.unique_words_ratio + ratio) / 2.0;
|
||||
}
|
||||
}
|
||||
|
||||
/// Détecte si le message contient des erreurs humaines
|
||||
fn contains_human_errors(&self, content: &str) -> bool {
|
||||
// Recherche de typos courants, corrections, etc.
|
||||
let error_patterns = [
|
||||
r"\b\w+\*\w+\b", // Corrections avec *
|
||||
r"\b\w+\s+\w+\b", // Mots dupliqués
|
||||
r"[a-zA-Z]{3,}\d+[a-zA-Z]{3,}", // Mélange lettres/chiffres suspect
|
||||
];
|
||||
|
||||
for pattern in &error_patterns {
|
||||
if let Ok(regex) = Regex::new(pattern) {
|
||||
if regex.is_match(content) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Calcule le score de suspicion (0.0 = normal, 1.0 = très suspect)
|
||||
pub fn calculate_suspicion_score(&self) -> f32 {
|
||||
let mut suspicion = 0.0;
|
||||
|
||||
// Fréquence anormale de messages
|
||||
if self.message_frequency.len() >= 10 {
|
||||
let recent_messages = self.message_frequency.iter().rev().take(10).collect::<Vec<_>>();
|
||||
let avg_interval = recent_messages.windows(2)
|
||||
.map(|w| w[0].signed_duration_since(*w[1]).num_seconds() as f32)
|
||||
.sum::<f32>() / (recent_messages.len() - 1) as f32;
|
||||
|
||||
if avg_interval < 1.0 { // Moins d'1 seconde entre messages
|
||||
suspicion += 0.3;
|
||||
}
|
||||
}
|
||||
|
||||
// Contenu répétitif
|
||||
let max_repetitions = self.repeated_content.values().max().unwrap_or(&0);
|
||||
if *max_repetitions > 3 {
|
||||
suspicion += 0.2 * (*max_repetitions as f32 / 10.0).min(1.0);
|
||||
}
|
||||
|
||||
// Faible ratio de mots uniques
|
||||
if self.unique_words_ratio < 0.3 {
|
||||
suspicion += 0.2;
|
||||
}
|
||||
|
||||
// Vitesse de frappe anormale
|
||||
if self.typing_speed > 20.0 || self.typing_speed < 0.5 {
|
||||
suspicion += 0.1;
|
||||
}
|
||||
|
||||
// Manque d'erreurs humaines
|
||||
if self.total_messages > 50 && self.human_like_errors == 0 {
|
||||
suspicion += 0.2;
|
||||
}
|
||||
|
||||
// Activité 24h/24
|
||||
let active_hours = self.activity_hours.len();
|
||||
if active_hours > 20 && self.consecutive_days > 7 {
|
||||
suspicion += 0.15;
|
||||
}
|
||||
|
||||
suspicion.min(1.0)
|
||||
}
|
||||
|
||||
/// Détermine si l'utilisateur est probablement un bot
|
||||
pub fn is_likely_bot(&self) -> bool {
|
||||
self.calculate_suspicion_score() > 0.7
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration du système de modération avancé
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AdvancedModerationConfig {
|
||||
/// Seuil de confiance pour action automatique
|
||||
pub auto_action_threshold: f32,
|
||||
/// Seuil de confiance pour alerter les modérateurs
|
||||
pub alert_threshold: f32,
|
||||
/// Nombre maximum de violations avant escalade
|
||||
pub max_violations_before_escalation: u32,
|
||||
/// Durée de rétention des profils utilisateur
|
||||
pub profile_retention_duration: Duration,
|
||||
/// Limite de messages par minute pour détection de flood
|
||||
pub flood_detection_threshold: u32,
|
||||
/// Patterns de spam prédéfinis
|
||||
pub spam_patterns: Vec<String>,
|
||||
/// Mots interdits avec pondération
|
||||
pub forbidden_words: HashMap<String, f32>,
|
||||
}
|
||||
|
||||
impl Default for AdvancedModerationConfig {
|
||||
fn default() -> Self {
|
||||
let spam_patterns = vec![
|
||||
r"(?i)(buy|sell|cheap|discount|offer|deal|promo|sale).*(?:http|www|\.com|\.org)".to_string(),
|
||||
r"(?i)(click|visit|check|follow).*(?:link|site|channel|profile)".to_string(),
|
||||
r"(?i)(free|win|earn|make money|get rich|opportunity)".to_string(),
|
||||
r"(?i)(join|subscribe|follow).*(?:now|today|quickly|fast)".to_string(),
|
||||
];
|
||||
|
||||
let mut forbidden_words = HashMap::new();
|
||||
// Mots de toxicité avec scores de pondération
|
||||
forbidden_words.insert("spam".to_string(), 0.3);
|
||||
forbidden_words.insert("scam".to_string(), 0.8);
|
||||
forbidden_words.insert("hack".to_string(), 0.6);
|
||||
forbidden_words.insert("cheat".to_string(), 0.5);
|
||||
|
||||
Self {
|
||||
auto_action_threshold: 0.85,
|
||||
alert_threshold: 0.7,
|
||||
max_violations_before_escalation: 3,
|
||||
profile_retention_duration: Duration::from_secs(30 * 24 * 3600), // 30 jours
|
||||
flood_detection_threshold: 10,
|
||||
spam_patterns,
|
||||
forbidden_words,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Système de modération automatique avancé
|
||||
#[derive(Debug)]
|
||||
pub struct AdvancedModerationEngine {
|
||||
config: AdvancedModerationConfig,
|
||||
user_profiles: Arc<DashMap<i32, UserBehaviorProfile>>,
|
||||
violation_cache: Arc<DashMap<String, ViolationType>>, // Cache des violations détectées
|
||||
metrics: Arc<ChatMetrics>,
|
||||
|
||||
// Regex compilées pour performance
|
||||
spam_regexes: Vec<Regex>,
|
||||
url_regex: Regex,
|
||||
phone_regex: Regex,
|
||||
email_regex: Regex,
|
||||
}
|
||||
|
||||
impl AdvancedModerationEngine {
|
||||
/// Crée un nouveau moteur de modération avancé
|
||||
pub fn new(config: AdvancedModerationConfig, metrics: Arc<ChatMetrics>) -> Result<Self> {
|
||||
// Compiler les regex de spam
|
||||
let mut spam_regexes = Vec::new();
|
||||
for pattern in &config.spam_patterns {
|
||||
match Regex::new(pattern) {
|
||||
Ok(regex) => spam_regexes.push(regex),
|
||||
Err(e) => tracing::warn!(pattern = %pattern, error = %e, "⚠️ Regex spam invalide"),
|
||||
}
|
||||
}
|
||||
|
||||
// Regex pour détecter URLs, téléphones, emails
|
||||
let url_regex = Regex::new(r"(?i)https?://[^\s]+|www\.[^\s]+|[a-zA-Z0-9-]+\.(com|org|net|edu|gov|mil|int|co|io|me|tv|info|biz)[^\s]*")
|
||||
.map_err(|e| ChatError::configuration_error(&format!("Regex URL invalide: {}", e)))?;
|
||||
|
||||
let phone_regex = Regex::new(r"(?:\+?1[-.\s]?)?\(?[0-9]{3}\)?[-.\s]?[0-9]{3}[-.\s]?[0-9]{4}")
|
||||
.map_err(|e| ChatError::configuration_error(&format!("Regex téléphone invalide: {}", e)))?;
|
||||
|
||||
let email_regex = Regex::new(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
.map_err(|e| ChatError::configuration_error(&format!("Regex email invalide: {}", e)))?;
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
user_profiles: Arc::new(DashMap::new()),
|
||||
violation_cache: Arc::new(DashMap::new()),
|
||||
metrics,
|
||||
spam_regexes,
|
||||
url_regex,
|
||||
phone_regex,
|
||||
email_regex,
|
||||
})
|
||||
}
|
||||
|
||||
/// Analyse un message pour détecter les violations
|
||||
pub async fn analyze_message(
|
||||
&self,
|
||||
user_id: i32,
|
||||
username: &str,
|
||||
content: &str,
|
||||
room: &str,
|
||||
typing_duration: Option<Duration>,
|
||||
) -> Result<Vec<ViolationType>> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
// Mettre à jour le profil utilisateur
|
||||
let mut profile = self.user_profiles.entry(user_id)
|
||||
.or_insert_with(|| UserBehaviorProfile::new(user_id, username.to_string()));
|
||||
profile.update_with_message(content, room, typing_duration);
|
||||
|
||||
let mut violations = Vec::new();
|
||||
|
||||
// 1. Détection de spam
|
||||
if let Some(spam_violation) = self.detect_spam(content, &profile).await? {
|
||||
violations.push(spam_violation);
|
||||
}
|
||||
|
||||
// 2. Détection de toxicité
|
||||
if let Some(toxicity_violation) = self.detect_toxicity(content).await? {
|
||||
violations.push(toxicity_violation);
|
||||
}
|
||||
|
||||
// 3. Détection de contenu inapproprié
|
||||
if let Some(inappropriate_violation) = self.detect_inappropriate_content(content).await? {
|
||||
violations.push(inappropriate_violation);
|
||||
}
|
||||
|
||||
// 4. Détection de fraude
|
||||
if let Some(fraud_violation) = self.detect_fraud(content).await? {
|
||||
violations.push(fraud_violation);
|
||||
}
|
||||
|
||||
// 5. Détection d'abus
|
||||
if let Some(abuse_violation) = self.detect_abuse(&profile).await? {
|
||||
violations.push(abuse_violation);
|
||||
}
|
||||
|
||||
// 6. Détection de comportement suspect
|
||||
if let Some(suspicious_violation) = self.detect_suspicious_behavior(&profile).await? {
|
||||
violations.push(suspicious_violation);
|
||||
}
|
||||
|
||||
// Mettre à jour les métriques
|
||||
let processing_time = start_time.elapsed();
|
||||
self.metrics.message_processing_time(processing_time, "advanced_moderation").await;
|
||||
|
||||
if !violations.is_empty() {
|
||||
// Mettre à jour le profil avec les violations
|
||||
profile.total_violations += violations.len() as u64;
|
||||
profile.warning_history.extend(violations.clone());
|
||||
|
||||
// Ajuster le score de confiance
|
||||
let violation_impact = violations.len() as f32 * 0.1;
|
||||
profile.trust_score = (profile.trust_score - violation_impact).max(0.0);
|
||||
|
||||
tracing::warn!(
|
||||
user_id = %user_id,
|
||||
username = %username,
|
||||
violations_count = %violations.len(),
|
||||
processing_time = ?processing_time,
|
||||
"🚨 Violations détectées"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(violations)
|
||||
}
|
||||
|
||||
/// Détecte les messages de spam
|
||||
async fn detect_spam(&self, content: &str, profile: &UserBehaviorProfile) -> Result<Option<ViolationType>> {
|
||||
let mut spam_score = 0.0;
|
||||
let mut detected_patterns = Vec::new();
|
||||
|
||||
// Vérifier les patterns de spam
|
||||
for regex in &self.spam_regexes {
|
||||
if regex.is_match(content) {
|
||||
spam_score += 0.3;
|
||||
detected_patterns.push(regex.as_str().to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Détecter les URLs suspectes
|
||||
if self.url_regex.is_match(content) {
|
||||
spam_score += 0.2;
|
||||
detected_patterns.push("URL détectée".to_string());
|
||||
}
|
||||
|
||||
// Détecter emails et téléphones (souvent spam)
|
||||
if self.email_regex.is_match(content) || self.phone_regex.is_match(content) {
|
||||
spam_score += 0.15;
|
||||
detected_patterns.push("Contact info détectée".to_string());
|
||||
}
|
||||
|
||||
// Analyser le comportement répétitif
|
||||
if let Some(max_repetitions) = profile.repeated_content.values().max() {
|
||||
if *max_repetitions > 3 {
|
||||
spam_score += 0.2 * (*max_repetitions as f32 / 10.0).min(1.0);
|
||||
detected_patterns.push(format!("Contenu répété {} fois", max_repetitions));
|
||||
}
|
||||
}
|
||||
|
||||
// Vérifier la fréquence des messages
|
||||
if profile.message_frequency.len() >= 5 {
|
||||
let recent_messages = profile.message_frequency.iter().rev().take(5).collect::<Vec<_>>();
|
||||
if let (Some(first), Some(last)) = (recent_messages.first(), recent_messages.last()) {
|
||||
let total_duration = first.signed_duration_since(**last);
|
||||
if total_duration.num_seconds() < 10 { // 5 messages en moins de 10 secondes
|
||||
spam_score += 0.25;
|
||||
detected_patterns.push("Flood détecté".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Détecter les mots interdits
|
||||
let content_lower = content.to_lowercase();
|
||||
for (word, weight) in &self.config.forbidden_words {
|
||||
if content_lower.contains(word) {
|
||||
spam_score += weight;
|
||||
detected_patterns.push(format!("Mot interdit: {}", word));
|
||||
}
|
||||
}
|
||||
|
||||
// Facteur de longueur anormale
|
||||
if content.len() > 500 {
|
||||
spam_score += 0.1;
|
||||
detected_patterns.push("Message très long".to_string());
|
||||
}
|
||||
|
||||
// Facteur de répétition de caractères
|
||||
if self.has_character_repetition(content) {
|
||||
spam_score += 0.15;
|
||||
detected_patterns.push("Répétition de caractères".to_string());
|
||||
}
|
||||
|
||||
if spam_score > 0.5 {
|
||||
Ok(Some(ViolationType::Spam {
|
||||
confidence: spam_score.min(1.0),
|
||||
pattern: detected_patterns.join(", "),
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Détecte le contenu toxique
|
||||
async fn detect_toxicity(&self, content: &str) -> Result<Option<ViolationType>> {
|
||||
let mut toxicity_score = 0.0;
|
||||
let content_lower = content.to_lowercase();
|
||||
|
||||
// Mots toxiques avec différents niveaux de sévérité
|
||||
let toxic_words = [
|
||||
// Sévérité faible
|
||||
("stupid", 0.2), ("dumb", 0.2), ("idiot", 0.3),
|
||||
// Sévérité moyenne
|
||||
("hate", 0.4), ("kill", 0.5), ("die", 0.4),
|
||||
// Sévérité élevée
|
||||
("kys", 0.8), ("suicide", 0.7),
|
||||
];
|
||||
|
||||
for (word, weight) in &toxic_words {
|
||||
if content_lower.contains(word) {
|
||||
toxicity_score += weight;
|
||||
}
|
||||
}
|
||||
|
||||
// Détecter les CAPS LOCK excessives (cris)
|
||||
let caps_ratio = content.chars().filter(|c| c.is_uppercase()).count() as f32 / content.len() as f32;
|
||||
if caps_ratio > 0.7 && content.len() > 10 {
|
||||
toxicity_score += 0.2;
|
||||
}
|
||||
|
||||
// Détecter les points d'exclamation excessifs
|
||||
let exclamation_count = content.matches('!').count();
|
||||
if exclamation_count > 3 {
|
||||
toxicity_score += 0.1 * (exclamation_count as f32 / 10.0).min(1.0);
|
||||
}
|
||||
|
||||
if toxicity_score > 0.3 {
|
||||
let severity = match toxicity_score {
|
||||
s if s < 0.5 => ToxicitySeverity::Low,
|
||||
s if s < 0.7 => ToxicitySeverity::Medium,
|
||||
s if s < 0.9 => ToxicitySeverity::High,
|
||||
_ => ToxicitySeverity::Extreme,
|
||||
};
|
||||
|
||||
Ok(Some(ViolationType::Toxicity {
|
||||
confidence: toxicity_score.min(1.0),
|
||||
severity,
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Détecte le contenu inapproprié
|
||||
async fn detect_inappropriate_content(&self, content: &str) -> Result<Option<ViolationType>> {
|
||||
let content_lower = content.to_lowercase();
|
||||
let mut inappropriate_score: f32 = 0.0;
|
||||
let mut category = String::new();
|
||||
|
||||
// Contenu NSFW
|
||||
let nsfw_indicators = ["nsfw", "18+", "adult", "porn", "sex", "nude"];
|
||||
for indicator in &nsfw_indicators {
|
||||
if content_lower.contains(indicator) {
|
||||
inappropriate_score += 0.4;
|
||||
category = "NSFW".to_string();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Contenu violent
|
||||
let violence_indicators = ["violence", "blood", "murder", "weapon", "gun", "knife"];
|
||||
for indicator in &violence_indicators {
|
||||
if content_lower.contains(indicator) {
|
||||
inappropriate_score += 0.3;
|
||||
if category.is_empty() {
|
||||
category = "Violence".to_string();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Contenu de drogue
|
||||
let drug_indicators = ["drug", "cocaine", "heroin", "weed", "marijuana"];
|
||||
for indicator in &drug_indicators {
|
||||
if content_lower.contains(indicator) {
|
||||
inappropriate_score += 0.25;
|
||||
if category.is_empty() {
|
||||
category = "Drogues".to_string();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if inappropriate_score > 0.2 {
|
||||
Ok(Some(ViolationType::Inappropriate {
|
||||
confidence: inappropriate_score.min(1.0),
|
||||
category,
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Détecte les tentatives de fraude
|
||||
async fn detect_fraud(&self, content: &str) -> Result<Option<ViolationType>> {
|
||||
let content_lower = content.to_lowercase();
|
||||
let mut fraud_score: f32 = 0.0;
|
||||
let mut scheme_type = String::new();
|
||||
|
||||
// Phishing
|
||||
let phishing_indicators = ["click here", "verify account", "suspended", "urgent", "immediate action"];
|
||||
for indicator in &phishing_indicators {
|
||||
if content_lower.contains(indicator) {
|
||||
fraud_score += 0.3;
|
||||
scheme_type = "Phishing".to_string();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Escroqueries financières
|
||||
let financial_scam_indicators = ["investment", "guaranteed profit", "easy money", "double your money"];
|
||||
for indicator in &financial_scam_indicators {
|
||||
if content_lower.contains(indicator) {
|
||||
fraud_score += 0.4;
|
||||
if scheme_type.is_empty() {
|
||||
scheme_type = "Escroquerie financière".to_string();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Combinaison URL + mots suspects
|
||||
if self.url_regex.is_match(content) {
|
||||
let suspicious_with_url = ["free", "win", "prize", "congratulations", "selected"];
|
||||
for word in &suspicious_with_url {
|
||||
if content_lower.contains(word) {
|
||||
fraud_score += 0.2;
|
||||
if scheme_type.is_empty() {
|
||||
scheme_type = "Lien suspect".to_string();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fraud_score > 0.3 {
|
||||
Ok(Some(ViolationType::Fraud {
|
||||
confidence: fraud_score.min(1.0),
|
||||
scheme_type,
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Détecte les abus (flood, raid, etc.)
|
||||
async fn detect_abuse(&self, profile: &UserBehaviorProfile) -> Result<Option<ViolationType>> {
|
||||
let mut abuse_score: f32 = 0.0;
|
||||
let mut abuse_type = AbuseType::SystemAbuse;
|
||||
|
||||
// Flood de messages
|
||||
if profile.message_frequency.len() >= 10 {
|
||||
let recent_messages = profile.message_frequency.iter().rev().take(10).collect::<Vec<_>>();
|
||||
if let (Some(first), Some(last)) = (recent_messages.first(), recent_messages.last()) {
|
||||
let total_duration = first.signed_duration_since(**last);
|
||||
if total_duration.num_seconds() < 30 { // 10 messages en moins de 30 secondes
|
||||
abuse_score += 0.6;
|
||||
abuse_type = AbuseType::MessageFlood;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Activité suspecte sur plusieurs salons
|
||||
if profile.room_activity.len() > 5 {
|
||||
let recent_activity: u32 = profile.room_activity.values().sum();
|
||||
if recent_activity > 50 {
|
||||
abuse_score += 0.4;
|
||||
abuse_type = AbuseType::RoomRaid;
|
||||
}
|
||||
}
|
||||
|
||||
// Comportement de harcèlement (beaucoup de violations)
|
||||
if profile.total_violations > 10 {
|
||||
abuse_score += 0.3;
|
||||
abuse_type = AbuseType::UserHarassment;
|
||||
}
|
||||
|
||||
if abuse_score > 0.4 {
|
||||
Ok(Some(ViolationType::Abuse {
|
||||
confidence: abuse_score.min(1.0),
|
||||
abuse_type,
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Détecte les comportements suspects (bots, etc.)
|
||||
async fn detect_suspicious_behavior(&self, profile: &UserBehaviorProfile) -> Result<Option<ViolationType>> {
|
||||
let suspicion_score = profile.calculate_suspicion_score();
|
||||
|
||||
if suspicion_score > 0.6 {
|
||||
let mut indicators = Vec::new();
|
||||
|
||||
if profile.typing_speed > 20.0 {
|
||||
indicators.push("Vitesse de frappe anormale".to_string());
|
||||
}
|
||||
|
||||
if profile.human_like_errors == 0 && profile.total_messages > 50 {
|
||||
indicators.push("Absence d'erreurs humaines".to_string());
|
||||
}
|
||||
|
||||
if profile.unique_words_ratio < 0.3 {
|
||||
indicators.push("Vocabulaire limité".to_string());
|
||||
}
|
||||
|
||||
if profile.activity_hours.len() > 20 {
|
||||
indicators.push("Activité 24h/24".to_string());
|
||||
}
|
||||
|
||||
if profile.is_likely_bot() {
|
||||
indicators.push("Patterns de bot détectés".to_string());
|
||||
}
|
||||
|
||||
Ok(Some(ViolationType::Suspicious {
|
||||
confidence: suspicion_score,
|
||||
indicators,
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Détermine la sanction appropriée basée sur les violations
|
||||
pub async fn determine_sanction(&self, violations: &[ViolationType], profile: &UserBehaviorProfile) -> Result<Option<(SanctionType, SanctionReason, Duration)>> {
|
||||
if violations.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Calculer le score de sévérité total
|
||||
let mut severity_score = 0.0;
|
||||
let mut primary_reason = SanctionReason::Other("Violation détectée".to_string());
|
||||
|
||||
for violation in violations {
|
||||
match violation {
|
||||
ViolationType::Spam { confidence, .. } => {
|
||||
severity_score += confidence * 0.5;
|
||||
primary_reason = SanctionReason::Spam;
|
||||
}
|
||||
ViolationType::Toxicity { confidence, severity, .. } => {
|
||||
let multiplier = match severity {
|
||||
ToxicitySeverity::Low => 0.6,
|
||||
ToxicitySeverity::Medium => 0.8,
|
||||
ToxicitySeverity::High => 1.0,
|
||||
ToxicitySeverity::Extreme => 1.2,
|
||||
};
|
||||
severity_score += confidence * multiplier;
|
||||
primary_reason = SanctionReason::Toxicity;
|
||||
}
|
||||
ViolationType::Inappropriate { confidence, .. } => {
|
||||
severity_score += confidence * 0.7;
|
||||
primary_reason = SanctionReason::Inappropriate;
|
||||
}
|
||||
ViolationType::Fraud { confidence, .. } => {
|
||||
severity_score += confidence * 1.0;
|
||||
primary_reason = SanctionReason::Abuse;
|
||||
}
|
||||
ViolationType::Abuse { confidence, .. } => {
|
||||
severity_score += confidence * 0.8;
|
||||
primary_reason = SanctionReason::Abuse;
|
||||
}
|
||||
ViolationType::Suspicious { confidence, .. } => {
|
||||
severity_score += confidence * 0.4;
|
||||
primary_reason = SanctionReason::RuleViolation;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ajuster en fonction de l'historique
|
||||
let history_multiplier = match profile.warning_history.len() {
|
||||
0..=2 => 1.0,
|
||||
3..=5 => 1.2,
|
||||
6..=10 => 1.5,
|
||||
_ => 2.0,
|
||||
};
|
||||
|
||||
severity_score *= history_multiplier;
|
||||
|
||||
// Déterminer la sanction
|
||||
let (sanction_type, duration) = match severity_score {
|
||||
s if s < 0.5 => return Ok(None), // Pas de sanction
|
||||
s if s < 0.7 => (SanctionType::Warning, Duration::from_secs(0)),
|
||||
s if s < 1.0 => (SanctionType::Mute, Duration::from_secs(3600)), // 1 heure
|
||||
s if s < 1.5 => (SanctionType::TempBan, Duration::from_secs(24 * 3600)), // 24 heures
|
||||
_ => (SanctionType::TempBan, Duration::from_secs(7 * 24 * 3600)), // 7 jours
|
||||
};
|
||||
|
||||
Ok(Some((sanction_type, primary_reason, duration)))
|
||||
}
|
||||
|
||||
/// Utilitaire pour détecter la répétition de caractères
|
||||
fn has_character_repetition(&self, content: &str) -> bool {
|
||||
let chars: Vec<char> = content.chars().collect();
|
||||
let mut consecutive_count = 1;
|
||||
|
||||
for i in 1..chars.len() {
|
||||
if chars[i] == chars[i-1] {
|
||||
consecutive_count += 1;
|
||||
if consecutive_count >= 4 { // 4 caractères identiques consécutifs
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
consecutive_count = 1;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Nettoie les profils anciens
|
||||
pub async fn cleanup_old_profiles(&self) {
|
||||
let cutoff_time = Utc::now()
|
||||
- chrono::Duration::from_std(self.config.profile_retention_duration)
|
||||
.unwrap_or(chrono::Duration::zero());
|
||||
let mut removed_count = 0;
|
||||
|
||||
self.user_profiles.retain(|_, profile| {
|
||||
if profile.last_updated < cutoff_time {
|
||||
removed_count += 1;
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
if removed_count > 0 {
|
||||
tracing::info!(removed_count = %removed_count, "🧹 Profils utilisateur anciens supprimés");
|
||||
}
|
||||
}
|
||||
|
||||
/// Obtient les statistiques de modération
|
||||
pub async fn get_moderation_stats(&self) -> HashMap<String, u64> {
|
||||
let mut stats = HashMap::new();
|
||||
|
||||
stats.insert("active_profiles".to_string(), self.user_profiles.len() as u64);
|
||||
stats.insert("cached_violations".to_string(), self.violation_cache.len() as u64);
|
||||
|
||||
let mut total_violations = 0;
|
||||
let mut bot_count = 0;
|
||||
let mut high_risk_users = 0;
|
||||
|
||||
for profile in self.user_profiles.iter() {
|
||||
total_violations += profile.total_violations;
|
||||
if profile.is_likely_bot() {
|
||||
bot_count += 1;
|
||||
}
|
||||
if profile.calculate_suspicion_score() > 0.8 {
|
||||
high_risk_users += 1;
|
||||
}
|
||||
}
|
||||
|
||||
stats.insert("total_violations".to_string(), total_violations);
|
||||
stats.insert("detected_bots".to_string(), bot_count);
|
||||
stats.insert("high_risk_users".to_string(), high_risk_users);
|
||||
|
||||
stats
|
||||
}
|
||||
}
|
||||
|
|
@ -1,331 +0,0 @@
|
|||
//! Module d'authentification WebSocket pour le serveur de chat
|
||||
//!
|
||||
//! Ce module implémente l'authentification JWT pour les connexions WebSocket,
|
||||
//! la validation des permissions par conversation, et le rate limiting.
|
||||
|
||||
use crate::error::{ChatError, Result};
|
||||
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Claims JWT pour l'authentification
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct JwtClaims {
|
||||
pub user_id: Uuid,
|
||||
pub username: String,
|
||||
pub exp: u64,
|
||||
pub iat: u64,
|
||||
pub permissions: Vec<String>,
|
||||
}
|
||||
|
||||
/// Gestionnaire d'authentification WebSocket
|
||||
pub struct WebSocketAuthManager {
|
||||
jwt_secret: String,
|
||||
active_sessions: Arc<RwLock<HashMap<Uuid, UserSession>>>,
|
||||
rate_limits: Arc<RwLock<HashMap<Uuid, RateLimitState>>>,
|
||||
}
|
||||
|
||||
/// Session utilisateur active
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UserSession {
|
||||
pub user_id: Uuid,
|
||||
pub username: String,
|
||||
pub connected_at: SystemTime,
|
||||
pub last_activity: SystemTime,
|
||||
pub permissions: Vec<String>,
|
||||
pub conversation_access: Vec<Uuid>,
|
||||
}
|
||||
|
||||
/// État du rate limiting par utilisateur
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RateLimitState {
|
||||
pub message_count: u32,
|
||||
pub window_start: SystemTime,
|
||||
pub last_message_time: SystemTime,
|
||||
}
|
||||
|
||||
impl WebSocketAuthManager {
|
||||
/// Crée un nouveau gestionnaire d'authentification
|
||||
pub fn new(jwt_secret: String) -> Self {
|
||||
Self {
|
||||
jwt_secret,
|
||||
active_sessions: Arc::new(RwLock::new(HashMap::new())),
|
||||
rate_limits: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Valide un token JWT et retourne les claims
|
||||
pub fn validate_jwt_token(&self, token: &str) -> Result<JwtClaims> {
|
||||
let decoding_key = DecodingKey::from_secret(self.jwt_secret.as_ref());
|
||||
let validation = Validation::default();
|
||||
|
||||
match decode::<JwtClaims>(token, &decoding_key, &validation) {
|
||||
Ok(token_data) => {
|
||||
// Vérifier l'expiration
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map_err(|e| ChatError::authentication_error(&format!("Time error: {}", e)))?
|
||||
.as_secs();
|
||||
|
||||
if token_data.claims.exp < now {
|
||||
return Err(ChatError::authentication_error("Token expired"));
|
||||
}
|
||||
|
||||
Ok(token_data.claims)
|
||||
}
|
||||
Err(e) => Err(ChatError::authentication_error(&format!("Invalid token: {}", e))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Authentifie un utilisateur WebSocket
|
||||
pub async fn authenticate_websocket_user(
|
||||
&self,
|
||||
token: &str,
|
||||
connection_id: Uuid,
|
||||
) -> Result<UserSession> {
|
||||
let claims = self.validate_jwt_token(token)?;
|
||||
|
||||
// Créer la session utilisateur
|
||||
let session = UserSession {
|
||||
user_id: claims.user_id,
|
||||
username: claims.username,
|
||||
connected_at: SystemTime::now(),
|
||||
last_activity: SystemTime::now(),
|
||||
permissions: claims.permissions,
|
||||
conversation_access: Vec::new(), // Sera rempli lors de la jointure aux conversations
|
||||
};
|
||||
|
||||
// Enregistrer la session active
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
sessions.insert(connection_id, session.clone());
|
||||
|
||||
// Initialiser le rate limiting
|
||||
let mut rate_limits = self.rate_limits.write().await;
|
||||
rate_limits.insert(connection_id, RateLimitState {
|
||||
message_count: 0,
|
||||
window_start: SystemTime::now(),
|
||||
last_message_time: SystemTime::now(),
|
||||
});
|
||||
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
/// Vérifie les permissions pour une conversation
|
||||
pub async fn check_conversation_permission(
|
||||
&self,
|
||||
connection_id: Uuid,
|
||||
conversation_id: Uuid,
|
||||
) -> Result<bool> {
|
||||
let sessions = self.active_sessions.read().await;
|
||||
|
||||
if let Some(session) = sessions.get(&connection_id) {
|
||||
// Vérifier si l'utilisateur a accès à cette conversation
|
||||
// Pour l'instant, on autorise tous les utilisateurs authentifiés
|
||||
// Dans une implémentation complète, on vérifierait les permissions spécifiques
|
||||
Ok(session.conversation_access.contains(&conversation_id) ||
|
||||
session.permissions.contains(&"chat:all".to_string()))
|
||||
} else {
|
||||
Err(ChatError::authentication_error("Session not found"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Vérifie le rate limiting pour les messages
|
||||
pub async fn check_message_rate_limit(&self, connection_id: Uuid) -> Result<bool> {
|
||||
const MAX_MESSAGES_PER_MINUTE: u32 = 60;
|
||||
const WINDOW_DURATION_SECONDS: u64 = 60;
|
||||
|
||||
let mut rate_limits = self.rate_limits.write().await;
|
||||
|
||||
if let Some(rate_limit) = rate_limits.get_mut(&connection_id) {
|
||||
let now = SystemTime::now();
|
||||
|
||||
// Vérifier si la fenêtre de temps a expiré
|
||||
if now.duration_since(rate_limit.window_start)
|
||||
.map_err(|e| ChatError::rate_limit_error(&format!("Time error: {}", e)))?
|
||||
.as_secs() >= WINDOW_DURATION_SECONDS {
|
||||
// Réinitialiser le compteur
|
||||
rate_limit.message_count = 0;
|
||||
rate_limit.window_start = now;
|
||||
}
|
||||
|
||||
// Vérifier la limite
|
||||
if rate_limit.message_count >= MAX_MESSAGES_PER_MINUTE {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Incrémenter le compteur
|
||||
rate_limit.message_count += 1;
|
||||
rate_limit.last_message_time = now;
|
||||
|
||||
Ok(true)
|
||||
} else {
|
||||
Err(ChatError::authentication_error("Rate limit state not found"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Met à jour l'activité d'un utilisateur
|
||||
pub async fn update_user_activity(&self, connection_id: Uuid) -> Result<()> {
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
|
||||
if let Some(session) = sessions.get_mut(&connection_id) {
|
||||
session.last_activity = SystemTime::now();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ajoute l'accès à une conversation pour un utilisateur
|
||||
pub async fn grant_conversation_access(
|
||||
&self,
|
||||
connection_id: Uuid,
|
||||
conversation_id: Uuid,
|
||||
) -> Result<()> {
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
|
||||
if let Some(session) = sessions.get_mut(&connection_id) {
|
||||
if !session.conversation_access.contains(&conversation_id) {
|
||||
session.conversation_access.push(conversation_id);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retire l'accès à une conversation pour un utilisateur
|
||||
pub async fn revoke_conversation_access(
|
||||
&self,
|
||||
connection_id: Uuid,
|
||||
conversation_id: Uuid,
|
||||
) -> Result<()> {
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
|
||||
if let Some(session) = sessions.get_mut(&connection_id) {
|
||||
session.conversation_access.retain(|&id| id != conversation_id);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Déconnecte un utilisateur
|
||||
pub async fn disconnect_user(&self, connection_id: Uuid) -> Result<()> {
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
sessions.remove(&connection_id);
|
||||
|
||||
let mut rate_limits = self.rate_limits.write().await;
|
||||
rate_limits.remove(&connection_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Nettoie les sessions expirées
|
||||
pub async fn cleanup_expired_sessions(&self, max_idle_duration: Duration) -> Result<()> {
|
||||
let now = SystemTime::now();
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
let mut rate_limits = self.rate_limits.write().await;
|
||||
|
||||
let expired_connections: Vec<Uuid> = sessions
|
||||
.iter()
|
||||
.filter(|(_, session)| {
|
||||
now.duration_since(session.last_activity)
|
||||
.map(|d| d > max_idle_duration)
|
||||
.unwrap_or(true)
|
||||
})
|
||||
.map(|(id, _)| *id)
|
||||
.collect();
|
||||
|
||||
for connection_id in expired_connections {
|
||||
sessions.remove(&connection_id);
|
||||
rate_limits.remove(&connection_id);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Obtient les statistiques des sessions actives
|
||||
pub async fn get_session_stats(&self) -> Result<SessionStats> {
|
||||
let sessions = self.active_sessions.read().await;
|
||||
|
||||
let total_sessions = sessions.len();
|
||||
let now = SystemTime::now();
|
||||
|
||||
let active_last_hour = sessions
|
||||
.values()
|
||||
.filter(|session| {
|
||||
now.duration_since(session.last_activity)
|
||||
.map(|d| d.as_secs() < 3600)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.count();
|
||||
|
||||
Ok(SessionStats {
|
||||
total_sessions,
|
||||
active_last_hour,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistiques des sessions
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct SessionStats {
|
||||
pub total_sessions: usize,
|
||||
pub active_last_hour: usize,
|
||||
}
|
||||
|
||||
impl Default for WebSocketAuthManager {
|
||||
fn default() -> Self {
|
||||
// SECURITY: Default impl ne doit pas être utilisé en production
|
||||
// Utiliser WebSocketAuthManager::new() avec require_env_min_length("JWT_SECRET", 32)
|
||||
panic!(
|
||||
"WebSocketAuthManager::default() cannot be used in production. \
|
||||
Use WebSocketAuthManager::new() with require_env_min_length(\"JWT_SECRET\", 32)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_jwt_validation() {
|
||||
let auth_manager = WebSocketAuthManager::new("test_secret".to_string());
|
||||
|
||||
// Test avec un token invalide
|
||||
let result = auth_manager.validate_jwt_token("invalid_token");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rate_limiting() {
|
||||
let auth_manager = WebSocketAuthManager::new("test_secret".to_string());
|
||||
let connection_id = Uuid::new_v4();
|
||||
|
||||
// Simuler l'authentification
|
||||
// Note: SystemTime::duration_since peut échouer si l'horloge est réglée en arrière,
|
||||
// mais c'est très rare. Dans un vrai test, on utiliserait chrono::Utc::now().
|
||||
let now = SystemTime::now().duration_since(UNIX_EPOCH)
|
||||
.expect("System time before UNIX epoch (should never happen)");
|
||||
let claims = JwtClaims {
|
||||
user_id: Uuid::new_v4(),
|
||||
username: "test_user".to_string(),
|
||||
exp: now.as_secs() + 3600,
|
||||
iat: now.as_secs(),
|
||||
permissions: vec!["chat:all".to_string()],
|
||||
};
|
||||
|
||||
// Test du rate limiting
|
||||
for _ in 0..65 {
|
||||
let result = auth_manager.check_message_rate_limit(connection_id).await;
|
||||
if let Ok(allowed) = result {
|
||||
if !allowed {
|
||||
break; // Rate limit atteint
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,259 +0,0 @@
|
|||
//! Module d'authentification pour le serveur de chat
|
||||
//!
|
||||
//! Gère l'authentification des utilisateurs, les sessions et les rôles
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{DateTime, Utc};
|
||||
use std::collections::HashMap;
|
||||
use crate::error::{ChatError, Result};
|
||||
|
||||
/// Rôles des utilisateurs dans le système de chat
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum Role {
|
||||
/// Utilisateur standard
|
||||
User,
|
||||
/// Modérateur - peut modérer les messages et gérer les salons
|
||||
Moderator,
|
||||
/// Administrateur - accès complet au système
|
||||
Admin,
|
||||
/// Utilisateur banni - accès restreint
|
||||
Banned,
|
||||
}
|
||||
|
||||
impl Role {
|
||||
/// Vérifie si le rôle a les permissions d'administrateur
|
||||
pub fn is_admin(&self) -> bool {
|
||||
matches!(self, Role::Admin)
|
||||
}
|
||||
|
||||
/// Vérifie si le rôle a les permissions de modérateur ou plus
|
||||
pub fn is_moderator_or_above(&self) -> bool {
|
||||
matches!(self, Role::Admin | Role::Moderator)
|
||||
}
|
||||
|
||||
/// Vérifie si l'utilisateur est banni
|
||||
pub fn is_banned(&self) -> bool {
|
||||
matches!(self, Role::Banned)
|
||||
}
|
||||
|
||||
/// Vérifie si l'utilisateur peut envoyer des messages
|
||||
pub fn can_send_messages(&self) -> bool {
|
||||
!self.is_banned()
|
||||
}
|
||||
|
||||
/// Vérifie si l'utilisateur peut créer des salons
|
||||
pub fn can_create_rooms(&self) -> bool {
|
||||
matches!(self, Role::Admin | Role::Moderator | Role::User)
|
||||
}
|
||||
}
|
||||
|
||||
/// Session utilisateur active
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UserSession {
|
||||
/// ID unique de l'utilisateur
|
||||
pub user_id: i32,
|
||||
/// Nom d'utilisateur
|
||||
pub username: String,
|
||||
/// Rôle de l'utilisateur
|
||||
pub role: Role,
|
||||
/// Timestamp de connexion
|
||||
pub connected_at: DateTime<Utc>,
|
||||
/// Dernière activité
|
||||
pub last_activity: DateTime<Utc>,
|
||||
/// Adresse IP de connexion
|
||||
pub ip_address: String,
|
||||
/// User agent du client
|
||||
pub user_agent: Option<String>,
|
||||
/// Salons auxquels l'utilisateur est connecté
|
||||
pub active_rooms: Vec<String>,
|
||||
/// Statut de présence
|
||||
pub presence_status: PresenceStatus,
|
||||
}
|
||||
|
||||
/// Statut de présence de l'utilisateur
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum PresenceStatus {
|
||||
Online,
|
||||
Away,
|
||||
Busy,
|
||||
Offline,
|
||||
}
|
||||
|
||||
impl UserSession {
|
||||
/// Crée une nouvelle session utilisateur
|
||||
pub fn new(
|
||||
user_id: i32,
|
||||
username: String,
|
||||
role: Role,
|
||||
ip_address: String,
|
||||
user_agent: Option<String>,
|
||||
) -> Self {
|
||||
let now = Utc::now();
|
||||
Self {
|
||||
user_id,
|
||||
username,
|
||||
role,
|
||||
connected_at: now,
|
||||
last_activity: now,
|
||||
ip_address,
|
||||
user_agent,
|
||||
active_rooms: Vec::new(),
|
||||
presence_status: PresenceStatus::Online,
|
||||
}
|
||||
}
|
||||
|
||||
/// Met à jour la dernière activité
|
||||
pub fn update_activity(&mut self) {
|
||||
self.last_activity = Utc::now();
|
||||
}
|
||||
|
||||
/// Ajoute l'utilisateur à un salon
|
||||
pub fn join_room(&mut self, room_id: String) {
|
||||
if !self.active_rooms.contains(&room_id) {
|
||||
self.active_rooms.push(room_id);
|
||||
}
|
||||
self.update_activity();
|
||||
}
|
||||
|
||||
/// Retire l'utilisateur d'un salon
|
||||
pub fn leave_room(&mut self, room_id: &str) {
|
||||
self.active_rooms.retain(|r| r != room_id);
|
||||
self.update_activity();
|
||||
}
|
||||
|
||||
/// Change le statut de présence
|
||||
pub fn set_presence(&mut self, status: PresenceStatus) {
|
||||
self.presence_status = status;
|
||||
self.update_activity();
|
||||
}
|
||||
|
||||
/// Vérifie si l'utilisateur est dans un salon spécifique
|
||||
pub fn is_in_room(&self, room_id: &str) -> bool {
|
||||
self.active_rooms.contains(&room_id.to_string())
|
||||
}
|
||||
|
||||
/// Vérifie si la session est expirée (inactivité > 1 heure)
|
||||
pub fn is_expired(&self) -> bool {
|
||||
let now = Utc::now();
|
||||
let duration = now.signed_duration_since(self.last_activity);
|
||||
duration.num_hours() > 1
|
||||
}
|
||||
}
|
||||
|
||||
/// Gestionnaire d'authentification
|
||||
pub struct AuthManager {
|
||||
/// Sessions actives (user_id -> session)
|
||||
sessions: HashMap<i32, UserSession>,
|
||||
/// Connexions WebSocket (connection_id -> user_id)
|
||||
connections: HashMap<String, i32>,
|
||||
}
|
||||
|
||||
impl AuthManager {
|
||||
/// Crée un nouveau gestionnaire d'authentification
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
sessions: HashMap::new(),
|
||||
connections: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Authentifie un utilisateur et crée une session
|
||||
pub fn authenticate_user(
|
||||
&mut self,
|
||||
user_id: i32,
|
||||
username: String,
|
||||
role: Role,
|
||||
connection_id: String,
|
||||
ip_address: String,
|
||||
user_agent: Option<String>,
|
||||
) -> Result<&UserSession> {
|
||||
// Créer ou mettre à jour la session
|
||||
let session = UserSession::new(user_id, username, role, ip_address, user_agent);
|
||||
|
||||
// Stocker la session
|
||||
self.sessions.insert(user_id, session.clone());
|
||||
self.connections.insert(connection_id, user_id);
|
||||
|
||||
// Récupérer la session insérée (ne peut pas échouer car on vient de l'insérer)
|
||||
Ok(self.sessions.get(&user_id).ok_or_else(|| {
|
||||
ChatError::internal_error(format!(
|
||||
"Session not found after insertion for user_id: {}",
|
||||
user_id
|
||||
))
|
||||
})?)
|
||||
}
|
||||
|
||||
/// Récupère une session par ID utilisateur
|
||||
pub fn get_session(&self, user_id: i32) -> Option<&UserSession> {
|
||||
self.sessions.get(&user_id)
|
||||
}
|
||||
|
||||
/// Récupère une session par ID de connexion
|
||||
pub fn get_session_by_connection(&self, connection_id: &str) -> Option<&UserSession> {
|
||||
let user_id = self.connections.get(connection_id)?;
|
||||
self.sessions.get(user_id)
|
||||
}
|
||||
|
||||
/// Met à jour l'activité d'une session
|
||||
pub fn update_activity(&mut self, user_id: i32) -> Result<()> {
|
||||
if let Some(session) = self.sessions.get_mut(&user_id) {
|
||||
session.update_activity();
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ChatError::unauthorized("Session non trouvée"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Déconnecte un utilisateur
|
||||
pub fn disconnect_user(&mut self, connection_id: &str) -> Option<UserSession> {
|
||||
if let Some(user_id) = self.connections.remove(connection_id) {
|
||||
// Retirer de tous les salons
|
||||
if let Some(mut session) = self.sessions.remove(&user_id) {
|
||||
session.active_rooms.clear();
|
||||
session.presence_status = PresenceStatus::Offline;
|
||||
Some(session)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Nettoie les sessions expirées
|
||||
pub fn cleanup_expired_sessions(&mut self) -> Vec<i32> {
|
||||
let mut expired_users = Vec::new();
|
||||
|
||||
self.sessions.retain(|&user_id, session| {
|
||||
if session.is_expired() {
|
||||
expired_users.push(user_id);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
// Nettoyer aussi les connexions
|
||||
self.connections.retain(|_, &mut user_id| {
|
||||
!expired_users.contains(&user_id)
|
||||
});
|
||||
|
||||
expired_users
|
||||
}
|
||||
|
||||
/// Récupère toutes les sessions actives
|
||||
pub fn get_active_sessions(&self) -> Vec<&UserSession> {
|
||||
self.sessions.values().collect()
|
||||
}
|
||||
|
||||
/// Récupère le nombre de sessions actives
|
||||
pub fn active_session_count(&self) -> usize {
|
||||
self.sessions.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AuthManager {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
|
@ -1,297 +0,0 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::RwLock;
|
||||
use serde::{Serialize, Deserialize};
|
||||
|
||||
/// Cache entry avec expiration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CacheEntry<T> {
|
||||
pub value: T,
|
||||
pub expires_at: Instant,
|
||||
pub hit_count: u64,
|
||||
pub last_accessed: Instant,
|
||||
}
|
||||
|
||||
impl<T> CacheEntry<T> {
|
||||
pub fn new(value: T, ttl: Duration) -> Self {
|
||||
let now = Instant::now();
|
||||
Self {
|
||||
value,
|
||||
expires_at: now + ttl,
|
||||
hit_count: 0,
|
||||
last_accessed: now,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_expired(&self) -> bool {
|
||||
Instant::now() > self.expires_at
|
||||
}
|
||||
|
||||
pub fn touch(&mut self) {
|
||||
self.hit_count += 1;
|
||||
self.last_accessed = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
/// Cache intelligent avec LRU et expiration
|
||||
pub struct SmartCache<K, V>
|
||||
where
|
||||
K: Clone + std::hash::Hash + Eq,
|
||||
V: Clone,
|
||||
{
|
||||
entries: Arc<RwLock<HashMap<K, CacheEntry<V>>>>,
|
||||
max_size: usize,
|
||||
default_ttl: Duration,
|
||||
}
|
||||
|
||||
impl<K, V> SmartCache<K, V>
|
||||
where
|
||||
K: Clone + std::hash::Hash + Eq,
|
||||
V: Clone,
|
||||
{
|
||||
pub fn new(max_size: usize, default_ttl: Duration) -> Self {
|
||||
Self {
|
||||
entries: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_size,
|
||||
default_ttl,
|
||||
}
|
||||
}
|
||||
|
||||
/// Insère une valeur dans le cache
|
||||
pub async fn insert(&self, key: K, value: V) {
|
||||
self.insert_with_ttl(key, value, self.default_ttl).await;
|
||||
}
|
||||
|
||||
/// Insère une valeur avec un TTL personnalisé
|
||||
pub async fn insert_with_ttl(&self, key: K, value: V, ttl: Duration) {
|
||||
let mut entries = self.entries.write().await;
|
||||
|
||||
// Nettoyage des entrées expirées
|
||||
self.cleanup_expired(&mut entries).await;
|
||||
|
||||
// Éviction LRU si le cache est plein
|
||||
if entries.len() >= self.max_size {
|
||||
self.evict_lru(&mut entries).await;
|
||||
}
|
||||
|
||||
entries.insert(key, CacheEntry::new(value, ttl));
|
||||
}
|
||||
|
||||
/// Récupère une valeur du cache
|
||||
pub async fn get(&self, key: &K) -> Option<V> {
|
||||
let mut entries = self.entries.write().await;
|
||||
|
||||
if let Some(entry) = entries.get_mut(key) {
|
||||
if entry.is_expired() {
|
||||
entries.remove(key);
|
||||
return None;
|
||||
}
|
||||
|
||||
entry.touch();
|
||||
Some(entry.value.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Supprime une entrée du cache
|
||||
pub async fn remove(&self, key: &K) -> Option<V> {
|
||||
let mut entries = self.entries.write().await;
|
||||
entries.remove(key).map(|entry| entry.value)
|
||||
}
|
||||
|
||||
/// Nettoie les entrées expirées
|
||||
async fn cleanup_expired(&self, entries: &mut HashMap<K, CacheEntry<V>>) {
|
||||
let expired_keys: Vec<K> = entries.iter()
|
||||
.filter(|(_, entry)| entry.is_expired())
|
||||
.map(|(key, _)| key.clone())
|
||||
.collect();
|
||||
|
||||
for key in expired_keys {
|
||||
entries.remove(&key);
|
||||
}
|
||||
}
|
||||
|
||||
/// Éviction LRU (Least Recently Used)
|
||||
async fn evict_lru(&self, entries: &mut HashMap<K, CacheEntry<V>>) {
|
||||
if let Some((lru_key, _)) = entries.iter()
|
||||
.min_by_key(|(_, entry)| entry.last_accessed)
|
||||
.map(|(key, entry)| (key.clone(), entry.clone())) {
|
||||
entries.remove(&lru_key);
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistiques du cache
|
||||
pub async fn stats(&self) -> CacheStats {
|
||||
let entries = self.entries.read().await;
|
||||
let total_hits: u64 = entries.values().map(|entry| entry.hit_count).sum();
|
||||
|
||||
CacheStats {
|
||||
total_entries: entries.len(),
|
||||
max_size: self.max_size,
|
||||
total_hits,
|
||||
hit_rate: if entries.is_empty() { 0.0 } else { total_hits as f64 / entries.len() as f64 },
|
||||
}
|
||||
}
|
||||
|
||||
/// Vide le cache
|
||||
pub async fn clear(&self) {
|
||||
let mut entries = self.entries.write().await;
|
||||
entries.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CacheStats {
|
||||
pub total_entries: usize,
|
||||
pub max_size: usize,
|
||||
pub total_hits: u64,
|
||||
pub hit_rate: f64,
|
||||
}
|
||||
|
||||
/// Cache spécialisé pour les messages de salon
|
||||
pub type RoomMessageCache = SmartCache<String, Vec<MessageCacheEntry>>;
|
||||
|
||||
/// Cache spécialisé pour les messages directs
|
||||
pub type DirectMessageCache = SmartCache<(i32, i32), Vec<MessageCacheEntry>>;
|
||||
|
||||
/// Cache spécialisé pour les utilisateurs en ligne
|
||||
pub type UserPresenceCache = SmartCache<i32, UserPresenceEntry>;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageCacheEntry {
|
||||
pub id: i32,
|
||||
pub user_id: i32,
|
||||
pub username: String,
|
||||
pub content: String,
|
||||
pub timestamp: chrono::DateTime<chrono::Utc>,
|
||||
pub message_type: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UserPresenceEntry {
|
||||
pub user_id: i32,
|
||||
pub username: String,
|
||||
pub status: String,
|
||||
pub last_seen: chrono::DateTime<chrono::Utc>,
|
||||
pub current_room: Option<String>,
|
||||
}
|
||||
|
||||
/// Gestionnaire centralisé de tous les caches
|
||||
pub struct CacheManager {
|
||||
pub room_messages: RoomMessageCache,
|
||||
pub direct_messages: DirectMessageCache,
|
||||
pub user_presence: UserPresenceCache,
|
||||
pub user_sessions: SmartCache<String, i32>, // JWT token -> user_id
|
||||
}
|
||||
|
||||
impl Default for CacheManager {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl CacheManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
// Cache des messages de salon (30 min TTL)
|
||||
room_messages: SmartCache::new(1000, Duration::from_secs(1800)),
|
||||
|
||||
// Cache des messages directs (1 heure TTL)
|
||||
direct_messages: SmartCache::new(500, Duration::from_secs(3600)),
|
||||
|
||||
// Cache de présence utilisateur (5 min TTL)
|
||||
user_presence: SmartCache::new(10000, Duration::from_secs(300)),
|
||||
|
||||
// Cache des sessions JWT (24 heures TTL)
|
||||
user_sessions: SmartCache::new(50000, Duration::from_secs(86400)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Met en cache les messages d'un salon
|
||||
pub async fn cache_room_messages(&self, room: &str, messages: Vec<MessageCacheEntry>) {
|
||||
self.room_messages.insert(room.to_string(), messages).await;
|
||||
}
|
||||
|
||||
/// Récupère les messages mis en cache d'un salon
|
||||
pub async fn get_cached_room_messages(&self, room: &str) -> Option<Vec<MessageCacheEntry>> {
|
||||
self.room_messages.get(&room.to_string()).await
|
||||
}
|
||||
|
||||
/// Met en cache les messages directs entre deux utilisateurs
|
||||
pub async fn cache_direct_messages(&self, user1: i32, user2: i32, messages: Vec<MessageCacheEntry>) {
|
||||
// Normaliser la clé pour éviter les doublons (user1, user2) et (user2, user1)
|
||||
let key = if user1 < user2 { (user1, user2) } else { (user2, user1) };
|
||||
self.direct_messages.insert(key, messages).await;
|
||||
}
|
||||
|
||||
/// Récupère les messages directs mis en cache
|
||||
pub async fn get_cached_direct_messages(&self, user1: i32, user2: i32) -> Option<Vec<MessageCacheEntry>> {
|
||||
let key = if user1 < user2 { (user1, user2) } else { (user2, user1) };
|
||||
self.direct_messages.get(&key).await
|
||||
}
|
||||
|
||||
/// Met en cache la présence d'un utilisateur
|
||||
pub async fn cache_user_presence(&self, user_id: i32, presence: UserPresenceEntry) {
|
||||
self.user_presence.insert(user_id, presence).await;
|
||||
}
|
||||
|
||||
/// Récupère la présence mise en cache d'un utilisateur
|
||||
pub async fn get_cached_user_presence(&self, user_id: i32) -> Option<UserPresenceEntry> {
|
||||
self.user_presence.get(&user_id).await
|
||||
}
|
||||
|
||||
/// Met en cache une session utilisateur
|
||||
pub async fn cache_user_session(&self, token: &str, user_id: i32) {
|
||||
self.user_sessions.insert(token.to_string(), user_id).await;
|
||||
}
|
||||
|
||||
/// Récupère l'ID utilisateur d'un token mis en cache
|
||||
pub async fn get_cached_user_session(&self, token: &str) -> Option<i32> {
|
||||
self.user_sessions.get(&token.to_string()).await
|
||||
}
|
||||
|
||||
/// Invalide la session d'un utilisateur
|
||||
pub async fn invalidate_user_session(&self, token: &str) {
|
||||
self.user_sessions.remove(&token.to_string()).await;
|
||||
}
|
||||
|
||||
/// Nettoie tous les caches expirés
|
||||
pub async fn cleanup_all(&self) {
|
||||
// Le nettoyage est automatique lors des opérations get/insert
|
||||
tracing::info!("🧹 Nettoyage automatique des caches effectué");
|
||||
}
|
||||
|
||||
/// Statistiques globales des caches
|
||||
pub async fn global_stats(&self) -> GlobalCacheStats {
|
||||
let room_stats = self.room_messages.stats().await;
|
||||
let dm_stats = self.direct_messages.stats().await;
|
||||
let presence_stats = self.user_presence.stats().await;
|
||||
let session_stats = self.user_sessions.stats().await;
|
||||
|
||||
GlobalCacheStats {
|
||||
room_messages: room_stats,
|
||||
direct_messages: dm_stats,
|
||||
user_presence: presence_stats,
|
||||
user_sessions: session_stats,
|
||||
}
|
||||
}
|
||||
|
||||
/// Vide tous les caches (pour le débogage/maintenance)
|
||||
pub async fn clear_all(&self) {
|
||||
self.room_messages.clear().await;
|
||||
self.direct_messages.clear().await;
|
||||
self.user_presence.clear().await;
|
||||
self.user_sessions.clear().await;
|
||||
tracing::warn!("🗑️ Tous les caches ont été vidés");
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct GlobalCacheStats {
|
||||
pub room_messages: CacheStats,
|
||||
pub direct_messages: CacheStats,
|
||||
pub user_presence: CacheStats,
|
||||
pub user_sessions: CacheStats,
|
||||
}
|
||||
|
|
@ -1,794 +0,0 @@
|
|||
//! Gestion unifiée des salons, messages directs et modération
|
||||
//!
|
||||
//! Ce module fournit une interface unifiée pour:
|
||||
//! - Gestion des salons (création, suppression, permissions)
|
||||
//! - Messages directs entre utilisateurs
|
||||
//! - Système de modération avancé
|
||||
//! - Gestion des rôles et permissions
|
||||
//! - Intégration avec les métriques et logs
|
||||
|
||||
use crate::authentication::{Role, UserSession};
|
||||
use crate::error::{ChatError, Result};
|
||||
use crate::prometheus_metrics::PrometheusMetrics;
|
||||
use crate::structured_logging::chat_logs;
|
||||
use chrono::{DateTime, Utc};
|
||||
use std::time::Duration;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Types de salons
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum RoomType {
|
||||
/// Salon public - visible par tous
|
||||
Public,
|
||||
/// Salon privé - invitation uniquement
|
||||
Private,
|
||||
/// Salon direct - conversation entre 2 utilisateurs
|
||||
Direct,
|
||||
/// Salon système - géré par le système
|
||||
System,
|
||||
}
|
||||
|
||||
/// Permissions dans un salon
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
pub enum RoomPermission {
|
||||
/// Lire les messages
|
||||
Read,
|
||||
/// Envoyer des messages
|
||||
Write,
|
||||
/// Modifier les messages
|
||||
Edit,
|
||||
/// Supprimer les messages
|
||||
Delete,
|
||||
/// Inviter des utilisateurs
|
||||
Invite,
|
||||
/// Gérer le salon
|
||||
Manage,
|
||||
/// Modérer le salon
|
||||
Moderate,
|
||||
}
|
||||
|
||||
/// Statut d'un salon
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum RoomStatus {
|
||||
/// Salon actif
|
||||
Active,
|
||||
/// Salon archivé
|
||||
Archived,
|
||||
/// Salon supprimé
|
||||
Deleted,
|
||||
/// Salon suspendu
|
||||
Suspended,
|
||||
}
|
||||
|
||||
/// Configuration d'un salon
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RoomConfig {
|
||||
/// Nom du salon
|
||||
pub name: String,
|
||||
/// Description du salon
|
||||
pub description: Option<String>,
|
||||
/// Type du salon
|
||||
pub room_type: RoomType,
|
||||
/// Permissions par défaut
|
||||
pub default_permissions: HashSet<RoomPermission>,
|
||||
/// Limite de membres (None = illimitée)
|
||||
pub max_members: Option<u32>,
|
||||
/// Activer l'historique des messages
|
||||
pub enable_history: bool,
|
||||
/// Activer les réactions
|
||||
pub enable_reactions: bool,
|
||||
/// Activer les mentions
|
||||
pub enable_mentions: bool,
|
||||
/// Activer les fils de discussion
|
||||
pub enable_threads: bool,
|
||||
/// Mots-clés interdits
|
||||
pub forbidden_words: HashSet<String>,
|
||||
/// Activer la modération automatique
|
||||
pub auto_moderation: bool,
|
||||
}
|
||||
|
||||
impl Default for RoomConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: "Nouveau salon".to_string(),
|
||||
description: None,
|
||||
room_type: RoomType::Public,
|
||||
default_permissions: HashSet::from([RoomPermission::Read, RoomPermission::Write]),
|
||||
max_members: Some(1000),
|
||||
enable_history: true,
|
||||
enable_reactions: true,
|
||||
enable_mentions: true,
|
||||
enable_threads: true,
|
||||
forbidden_words: HashSet::new(),
|
||||
auto_moderation: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Salon de chat
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Room {
|
||||
/// ID unique du salon
|
||||
pub id: Uuid,
|
||||
/// Configuration du salon
|
||||
pub config: RoomConfig,
|
||||
/// Créateur du salon
|
||||
pub creator_id: i32,
|
||||
/// Date de création
|
||||
pub created_at: DateTime<Utc>,
|
||||
/// Date de dernière activité
|
||||
pub last_activity: DateTime<Utc>,
|
||||
/// Statut du salon
|
||||
pub status: RoomStatus,
|
||||
/// Membres du salon (user_id -> permissions)
|
||||
pub members: HashMap<i32, HashSet<RoomPermission>>,
|
||||
/// Modérateurs du salon
|
||||
pub moderators: HashSet<i32>,
|
||||
/// Administrateurs du salon
|
||||
pub administrators: HashSet<i32>,
|
||||
/// Messages épinglés
|
||||
pub pinned_messages: Vec<Uuid>,
|
||||
/// Tags du salon
|
||||
pub tags: HashSet<String>,
|
||||
}
|
||||
|
||||
impl Room {
|
||||
/// Crée un nouveau salon
|
||||
pub fn new(creator_id: i32, config: RoomConfig) -> Self {
|
||||
let now = Utc::now();
|
||||
Self {
|
||||
id: Uuid::new_v4(),
|
||||
config,
|
||||
creator_id,
|
||||
created_at: now,
|
||||
last_activity: now,
|
||||
status: RoomStatus::Active,
|
||||
members: HashMap::new(),
|
||||
moderators: HashSet::new(),
|
||||
administrators: HashSet::new(),
|
||||
pinned_messages: Vec::new(),
|
||||
tags: HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Ajoute un membre au salon
|
||||
pub fn add_member(&mut self, user_id: i32, permissions: HashSet<RoomPermission>) {
|
||||
self.members.insert(user_id, permissions);
|
||||
self.last_activity = Utc::now();
|
||||
}
|
||||
|
||||
/// Retire un membre du salon
|
||||
pub fn remove_member(&mut self, user_id: i32) {
|
||||
self.members.remove(&user_id);
|
||||
self.moderators.remove(&user_id);
|
||||
self.administrators.remove(&user_id);
|
||||
self.last_activity = Utc::now();
|
||||
}
|
||||
|
||||
/// Vérifie si un utilisateur a une permission spécifique
|
||||
pub fn has_permission(&self, user_id: i32, permission: &RoomPermission) -> bool {
|
||||
// L'administrateur du salon a toutes les permissions
|
||||
if self.administrators.contains(&user_id) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Vérifier les permissions du membre
|
||||
if let Some(member_permissions) = self.members.get(&user_id) {
|
||||
member_permissions.contains(permission)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Vérifie si un utilisateur peut envoyer des messages
|
||||
pub fn can_send_messages(&self, user_id: i32) -> bool {
|
||||
self.has_permission(user_id, &RoomPermission::Write)
|
||||
}
|
||||
|
||||
/// Vérifie si un utilisateur peut modérer
|
||||
pub fn can_moderate(&self, user_id: i32) -> bool {
|
||||
self.administrators.contains(&user_id)
|
||||
|| self.moderators.contains(&user_id)
|
||||
|| self.has_permission(user_id, &RoomPermission::Moderate)
|
||||
}
|
||||
|
||||
/// Ajoute un modérateur
|
||||
pub fn add_moderator(&mut self, user_id: i32) -> Result<()> {
|
||||
if !self.members.contains_key(&user_id) {
|
||||
return Err(ChatError::validation_error(
|
||||
"Utilisateur n'est pas membre du salon",
|
||||
));
|
||||
}
|
||||
|
||||
self.moderators.insert(user_id);
|
||||
self.last_activity = Utc::now();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retire un modérateur
|
||||
pub fn remove_moderator(&mut self, user_id: i32) {
|
||||
self.moderators.remove(&user_id);
|
||||
self.last_activity = Utc::now();
|
||||
}
|
||||
|
||||
/// Épingle un message
|
||||
pub fn pin_message(&mut self, message_id: Uuid) -> Result<()> {
|
||||
if self.pinned_messages.len() >= 10 {
|
||||
return Err(ChatError::validation_error("Trop de messages épinglés"));
|
||||
}
|
||||
|
||||
if !self.pinned_messages.contains(&message_id) {
|
||||
self.pinned_messages.push(message_id);
|
||||
self.last_activity = Utc::now();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Désépingle un message
|
||||
pub fn unpin_message(&mut self, message_id: Uuid) {
|
||||
self.pinned_messages.retain(|&id| id != message_id);
|
||||
self.last_activity = Utc::now();
|
||||
}
|
||||
|
||||
/// Met à jour la dernière activité
|
||||
pub fn update_activity(&mut self) {
|
||||
self.last_activity = Utc::now();
|
||||
}
|
||||
|
||||
/// Archive le salon
|
||||
pub fn archive(&mut self) {
|
||||
self.status = RoomStatus::Archived;
|
||||
self.last_activity = Utc::now();
|
||||
}
|
||||
|
||||
/// Supprime le salon
|
||||
pub fn delete(&mut self) {
|
||||
self.status = RoomStatus::Deleted;
|
||||
self.last_activity = Utc::now();
|
||||
}
|
||||
}
|
||||
|
||||
/// Message de chat
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ChatMessage {
|
||||
/// ID unique du message
|
||||
pub id: Uuid,
|
||||
/// ID du salon ou de la conversation
|
||||
pub room_id: Uuid,
|
||||
/// ID de l'expéditeur
|
||||
pub sender_id: i32,
|
||||
/// Nom d'utilisateur de l'expéditeur
|
||||
pub sender_username: String,
|
||||
/// Contenu du message
|
||||
pub content: String,
|
||||
/// Type de message
|
||||
pub message_type: MessageType,
|
||||
/// Message parent (pour les fils de discussion)
|
||||
pub parent_message_id: Option<Uuid>,
|
||||
/// Date d'envoi
|
||||
pub sent_at: DateTime<Utc>,
|
||||
/// Date de modification
|
||||
pub edited_at: Option<DateTime<Utc>>,
|
||||
/// Message supprimé
|
||||
pub deleted: bool,
|
||||
/// Réactions au message
|
||||
pub reactions: HashMap<String, Vec<i32>>,
|
||||
/// Mentions dans le message
|
||||
pub mentions: Vec<i32>,
|
||||
/// Fichiers joints
|
||||
pub attachments: Vec<Attachment>,
|
||||
/// Métadonnées du message
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
/// Types de messages
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum MessageType {
|
||||
/// Message texte normal
|
||||
Text,
|
||||
/// Message système
|
||||
System,
|
||||
/// Message de bienvenue
|
||||
Welcome,
|
||||
/// Message de modération
|
||||
Moderation,
|
||||
/// Message de fichier
|
||||
File,
|
||||
/// Message d'image
|
||||
Image,
|
||||
/// Message de code
|
||||
Code,
|
||||
/// Message de commande
|
||||
Command,
|
||||
}
|
||||
|
||||
/// Fichier joint
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Attachment {
|
||||
/// ID du fichier
|
||||
pub id: Uuid,
|
||||
/// Nom du fichier
|
||||
pub filename: String,
|
||||
/// Type MIME
|
||||
pub mime_type: String,
|
||||
/// Taille en bytes
|
||||
pub size_bytes: u64,
|
||||
/// URL de téléchargement
|
||||
pub download_url: String,
|
||||
/// Date d'upload
|
||||
pub uploaded_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// Action de modération
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ModerationAction {
|
||||
/// ID de l'action
|
||||
pub id: Uuid,
|
||||
/// Type d'action
|
||||
pub action_type: ModerationActionType,
|
||||
/// ID du modérateur
|
||||
pub moderator_id: i32,
|
||||
/// ID de l'utilisateur ciblé
|
||||
pub target_user_id: Option<i32>,
|
||||
/// ID du message ciblé
|
||||
pub target_message_id: Option<Uuid>,
|
||||
/// ID du salon
|
||||
pub room_id: Uuid,
|
||||
/// Raison de l'action
|
||||
pub reason: String,
|
||||
/// Durée de la sanction (si applicable)
|
||||
pub duration: Option<Duration>,
|
||||
/// Date de l'action
|
||||
pub created_at: DateTime<Utc>,
|
||||
/// Action active
|
||||
pub active: bool,
|
||||
}
|
||||
|
||||
/// Types d'actions de modération
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum ModerationActionType {
|
||||
/// Avertissement
|
||||
Warning,
|
||||
/// Suppression de message
|
||||
DeleteMessage,
|
||||
/// Modification de message
|
||||
EditMessage,
|
||||
/// Bannissement temporaire
|
||||
TemporaryBan,
|
||||
/// Bannissement permanent
|
||||
PermanentBan,
|
||||
/// Mute temporaire
|
||||
TemporaryMute,
|
||||
/// Mute permanent
|
||||
PermanentMute,
|
||||
/// Kick du salon
|
||||
Kick,
|
||||
/// Suspension du salon
|
||||
SuspendRoom,
|
||||
/// Suppression du salon
|
||||
DeleteRoom,
|
||||
}
|
||||
|
||||
/// Gestionnaire de chat unifié
|
||||
pub struct ChatManager {
|
||||
/// Salons actifs
|
||||
rooms: Arc<RwLock<HashMap<Uuid, Room>>>,
|
||||
/// Messages par salon
|
||||
messages: Arc<RwLock<HashMap<Uuid, Vec<ChatMessage>>>>,
|
||||
/// Actions de modération
|
||||
moderation_actions: Arc<RwLock<Vec<ModerationAction>>>,
|
||||
/// Métriques Prometheus
|
||||
metrics: Option<Arc<PrometheusMetrics>>,
|
||||
}
|
||||
|
||||
impl ChatManager {
|
||||
/// Crée un nouveau gestionnaire de chat
|
||||
pub fn new(metrics: Option<Arc<PrometheusMetrics>>) -> Self {
|
||||
Self {
|
||||
rooms: Arc::new(RwLock::new(HashMap::new())),
|
||||
messages: Arc::new(RwLock::new(HashMap::new())),
|
||||
moderation_actions: Arc::new(RwLock::new(Vec::new())),
|
||||
metrics,
|
||||
}
|
||||
}
|
||||
|
||||
/// Crée un nouveau salon
|
||||
pub async fn create_room(
|
||||
&self,
|
||||
creator_id: i32,
|
||||
creator_username: &str,
|
||||
config: RoomConfig,
|
||||
) -> Result<Uuid> {
|
||||
let room = Room::new(creator_id, config.clone());
|
||||
let room_id = room.id;
|
||||
|
||||
// Ajouter le créateur comme administrateur
|
||||
let mut room = room;
|
||||
room.administrators.insert(creator_id);
|
||||
room.add_member(creator_id, room.config.default_permissions.clone());
|
||||
|
||||
// Sauvegarder le salon
|
||||
{
|
||||
let mut rooms = self.rooms.write().await;
|
||||
rooms.insert(room_id, room);
|
||||
}
|
||||
|
||||
// Enregistrer les métriques
|
||||
if let Some(metrics) = &self.metrics {
|
||||
metrics.record_room_created();
|
||||
metrics.update_active_rooms(self.get_active_rooms_count().await);
|
||||
}
|
||||
|
||||
chat_logs::room_created(
|
||||
&room_id.to_string(),
|
||||
&config.name,
|
||||
creator_id,
|
||||
creator_username,
|
||||
);
|
||||
|
||||
Ok(room_id)
|
||||
}
|
||||
|
||||
/// Supprime un salon
|
||||
pub async fn delete_room(
|
||||
&self,
|
||||
room_id: Uuid,
|
||||
deleter_id: i32,
|
||||
deleter_username: &str,
|
||||
) -> Result<()> {
|
||||
let room_name = {
|
||||
let rooms = self.rooms.read().await;
|
||||
let room = rooms
|
||||
.get(&room_id)
|
||||
.ok_or_else(|| ChatError::validation_error("Salon non trouvé"))?;
|
||||
|
||||
// Vérifier les permissions
|
||||
if !room.administrators.contains(&deleter_id) {
|
||||
return Err(ChatError::unauthorized("Permissions insuffisantes"));
|
||||
}
|
||||
|
||||
room.config.name.clone()
|
||||
};
|
||||
|
||||
// Supprimer le salon
|
||||
{
|
||||
let mut rooms = self.rooms.write().await;
|
||||
if let Some(room) = rooms.get_mut(&room_id) {
|
||||
room.delete();
|
||||
}
|
||||
}
|
||||
|
||||
// Supprimer les messages
|
||||
{
|
||||
let mut messages = self.messages.write().await;
|
||||
messages.remove(&room_id);
|
||||
}
|
||||
|
||||
// Enregistrer les métriques
|
||||
if let Some(metrics) = &self.metrics {
|
||||
metrics.record_room_deleted();
|
||||
metrics.update_active_rooms(self.get_active_rooms_count().await);
|
||||
}
|
||||
|
||||
chat_logs::room_deleted(
|
||||
&room_id.to_string(),
|
||||
&room_name,
|
||||
deleter_id,
|
||||
deleter_username,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Rejoint un salon
|
||||
pub async fn join_room(&self, room_id: Uuid, user_id: i32, username: &str) -> Result<()> {
|
||||
let mut rooms = self.rooms.write().await;
|
||||
let room = rooms
|
||||
.get_mut(&room_id)
|
||||
.ok_or_else(|| ChatError::validation_error("Salon non trouvé"))?;
|
||||
|
||||
// Vérifier si le salon est actif
|
||||
if room.status != RoomStatus::Active {
|
||||
return Err(ChatError::validation_error("Salon non actif"));
|
||||
}
|
||||
|
||||
// Vérifier la limite de membres
|
||||
if let Some(max_members) = room.config.max_members {
|
||||
if room.members.len() >= max_members as usize {
|
||||
return Err(ChatError::validation_error("Salon plein"));
|
||||
}
|
||||
}
|
||||
|
||||
// Ajouter le membre
|
||||
room.add_member(user_id, room.config.default_permissions.clone());
|
||||
|
||||
chat_logs::room_created(&room_id.to_string(), &room.config.name, user_id, username);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Quitte un salon
|
||||
pub async fn leave_room(&self, room_id: Uuid, user_id: i32, username: &str) -> Result<()> {
|
||||
let mut rooms = self.rooms.write().await;
|
||||
let room = rooms
|
||||
.get_mut(&room_id)
|
||||
.ok_or_else(|| ChatError::validation_error("Salon non trouvé"))?;
|
||||
|
||||
room.remove_member(user_id);
|
||||
|
||||
chat_logs::room_deleted(&room_id.to_string(), &room.config.name, user_id, username);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Envoie un message dans un salon
|
||||
pub async fn send_message(
|
||||
&self,
|
||||
room_id: Uuid,
|
||||
sender_id: i32,
|
||||
sender_username: &str,
|
||||
content: String,
|
||||
message_type: MessageType,
|
||||
) -> Result<Uuid> {
|
||||
// Vérifier les permissions
|
||||
{
|
||||
let rooms = self.rooms.read().await;
|
||||
let room = rooms
|
||||
.get(&room_id)
|
||||
.ok_or_else(|| ChatError::validation_error("Salon non trouvé"))?;
|
||||
|
||||
if !room.can_send_messages(sender_id) {
|
||||
return Err(ChatError::unauthorized("Permissions insuffisantes"));
|
||||
}
|
||||
}
|
||||
|
||||
// Créer le message
|
||||
let message = ChatMessage {
|
||||
id: Uuid::new_v4(),
|
||||
room_id,
|
||||
sender_id,
|
||||
sender_username: sender_username.to_string(),
|
||||
content: content.clone(),
|
||||
message_type,
|
||||
parent_message_id: None,
|
||||
sent_at: Utc::now(),
|
||||
edited_at: None,
|
||||
deleted: false,
|
||||
reactions: HashMap::new(),
|
||||
mentions: Vec::new(),
|
||||
attachments: Vec::new(),
|
||||
metadata: HashMap::new(),
|
||||
};
|
||||
|
||||
let message_id = message.id;
|
||||
|
||||
// Sauvegarder le message
|
||||
{
|
||||
let mut messages = self.messages.write().await;
|
||||
messages.entry(room_id).or_default().push(message);
|
||||
}
|
||||
|
||||
// Mettre à jour l'activité du salon
|
||||
{
|
||||
let mut rooms = self.rooms.write().await;
|
||||
if let Some(room) = rooms.get_mut(&room_id) {
|
||||
room.update_activity();
|
||||
}
|
||||
}
|
||||
|
||||
// Enregistrer les métriques
|
||||
if let Some(metrics) = &self.metrics {
|
||||
metrics.record_message_sent("text", "room");
|
||||
metrics.record_message_size(content.len() as u64, "text");
|
||||
}
|
||||
|
||||
chat_logs::message_sent(
|
||||
message_id.to_string(),
|
||||
sender_id,
|
||||
sender_username,
|
||||
&room_id.to_string(),
|
||||
"text",
|
||||
content.len(),
|
||||
);
|
||||
|
||||
Ok(message_id)
|
||||
}
|
||||
|
||||
/// Récupère les messages d'un salon
|
||||
pub async fn get_room_messages(
|
||||
&self,
|
||||
room_id: Uuid,
|
||||
limit: Option<usize>,
|
||||
offset: Option<usize>,
|
||||
) -> Result<Vec<ChatMessage>> {
|
||||
let messages = self.messages.read().await;
|
||||
let room_messages = messages
|
||||
.get(&room_id)
|
||||
.ok_or_else(|| ChatError::validation_error("Salon non trouvé"))?;
|
||||
|
||||
let offset = offset.unwrap_or(0);
|
||||
let limit = limit.unwrap_or(50).min(100);
|
||||
|
||||
let start = room_messages.len().saturating_sub(offset + limit);
|
||||
let end = room_messages.len().saturating_sub(offset);
|
||||
|
||||
Ok(room_messages[start..end].to_vec())
|
||||
}
|
||||
|
||||
/// Crée une conversation directe
|
||||
pub async fn create_direct_conversation(&self, user1_id: i32, user2_id: i32) -> Result<Uuid> {
|
||||
let config = RoomConfig {
|
||||
name: format!("DM_{}_{}", user1_id, user2_id),
|
||||
room_type: RoomType::Direct,
|
||||
max_members: Some(2),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let room_id = self.create_room(user1_id, "system", config).await?;
|
||||
|
||||
// Ajouter le deuxième utilisateur
|
||||
self.join_room(room_id, user2_id, "user").await?;
|
||||
|
||||
Ok(room_id)
|
||||
}
|
||||
|
||||
/// Applique une action de modération
|
||||
pub async fn apply_moderation_action(&self, action: ModerationAction) -> Result<()> {
|
||||
// Sauvegarder l'action
|
||||
{
|
||||
let mut actions = self.moderation_actions.write().await;
|
||||
actions.push(action.clone());
|
||||
}
|
||||
|
||||
// Appliquer l'action selon le type
|
||||
match action.action_type {
|
||||
ModerationActionType::DeleteMessage => {
|
||||
if let Some(message_id) = action.target_message_id {
|
||||
self.delete_message(message_id, action.moderator_id).await?;
|
||||
}
|
||||
}
|
||||
ModerationActionType::Kick => {
|
||||
if let Some(user_id) = action.target_user_id {
|
||||
self.leave_room(action.room_id, user_id, "moderated")
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
ModerationActionType::SuspendRoom => {
|
||||
self.suspend_room(action.room_id, action.moderator_id)
|
||||
.await?;
|
||||
}
|
||||
_ => {
|
||||
// Autres actions de modération
|
||||
}
|
||||
}
|
||||
|
||||
// Enregistrer les métriques
|
||||
if let Some(metrics) = &self.metrics {
|
||||
metrics.record_moderation_action(&format!("{:?}", action.action_type));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Supprime un message
|
||||
async fn delete_message(&self, message_id: Uuid, moderator_id: i32) -> Result<()> {
|
||||
let mut messages = self.messages.write().await;
|
||||
|
||||
for room_messages in messages.values_mut() {
|
||||
if let Some(message) = room_messages.iter_mut().find(|m| m.id == message_id) {
|
||||
message.deleted = true;
|
||||
message
|
||||
.metadata
|
||||
.insert("deleted_by".to_string(), moderator_id.to_string());
|
||||
message
|
||||
.metadata
|
||||
.insert("deleted_at".to_string(), Utc::now().to_rfc3339());
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
Err(ChatError::validation_error("Message non trouvé"))
|
||||
}
|
||||
|
||||
/// Suspend un salon
|
||||
async fn suspend_room(&self, room_id: Uuid, _moderator_id: i32) -> Result<()> {
|
||||
let mut rooms = self.rooms.write().await;
|
||||
if let Some(room) = rooms.get_mut(&room_id) {
|
||||
room.status = RoomStatus::Suspended;
|
||||
room.last_activity = Utc::now();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Récupère le nombre de salons actifs
|
||||
async fn get_active_rooms_count(&self) -> u64 {
|
||||
let rooms = self.rooms.read().await;
|
||||
rooms
|
||||
.values()
|
||||
.filter(|room| room.status == RoomStatus::Active)
|
||||
.count() as u64
|
||||
}
|
||||
|
||||
/// Récupère les statistiques du chat
|
||||
pub async fn get_chat_stats(&self) -> ChatStats {
|
||||
let rooms = self.rooms.read().await;
|
||||
let messages = self.messages.read().await;
|
||||
|
||||
let total_rooms = rooms.len();
|
||||
let active_rooms = rooms
|
||||
.values()
|
||||
.filter(|room| room.status == RoomStatus::Active)
|
||||
.count();
|
||||
|
||||
let total_messages = messages.values().map(|msgs| msgs.len()).sum::<usize>();
|
||||
|
||||
let total_members = rooms.values().map(|room| room.members.len()).sum::<usize>();
|
||||
|
||||
ChatStats {
|
||||
total_rooms,
|
||||
active_rooms,
|
||||
total_messages,
|
||||
total_members,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistiques du chat
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ChatStats {
|
||||
pub total_rooms: usize,
|
||||
pub active_rooms: usize,
|
||||
pub total_messages: usize,
|
||||
pub total_members: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_room_creation() {
|
||||
let manager = ChatManager::new(None);
|
||||
let config = RoomConfig::default();
|
||||
|
||||
let room_id = manager.create_room(1, "testuser", config).await.unwrap();
|
||||
assert!(!room_id.is_nil());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_room_permissions() {
|
||||
let room = Room::new(1, RoomConfig::default());
|
||||
|
||||
// Le créateur doit avoir toutes les permissions
|
||||
assert!(room.can_send_messages(1));
|
||||
assert!(room.can_moderate(1));
|
||||
|
||||
// Un utilisateur non membre ne doit pas avoir de permissions
|
||||
assert!(!room.can_send_messages(2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_message_sending() {
|
||||
let manager = ChatManager::new(None);
|
||||
let config = RoomConfig::default();
|
||||
|
||||
let room_id = manager.create_room(1, "testuser", config).await.unwrap();
|
||||
let message_id = manager
|
||||
.send_message(
|
||||
room_id,
|
||||
1,
|
||||
"testuser",
|
||||
"Hello world".to_string(),
|
||||
MessageType::Text,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(!message_id.is_nil());
|
||||
}
|
||||
}
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
//file: backend/modules/chat_server/src/client.rs
|
||||
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Client {
|
||||
pub user_id: i32,
|
||||
pub username: String,
|
||||
pub sender: UnboundedSender<Message>,
|
||||
pub last_heartbeat: std::sync::Arc<std::sync::RwLock<Instant>>,
|
||||
pub connected_at: Instant,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
pub fn new(user_id: i32, username: String, sender: UnboundedSender<Message>) -> Self {
|
||||
Self {
|
||||
user_id,
|
||||
username,
|
||||
sender,
|
||||
last_heartbeat: std::sync::Arc::new(std::sync::RwLock::new(Instant::now())),
|
||||
connected_at: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Envoie un message texte au client
|
||||
pub fn send_text(&self, text: &str) -> bool {
|
||||
tracing::debug!(user_id = %self.user_id, username = %self.username, text_length = %text.len(), "🔧 Tentative d'envoi de message texte");
|
||||
|
||||
match self.sender.send(Message::Text(text.to_string())) {
|
||||
Ok(_) => {
|
||||
tracing::debug!(user_id = %self.user_id, username = %self.username, "✅ Message texte envoyé au canal");
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(user_id = %self.user_id, username = %self.username, error = %e, "❌ Erreur envoi message texte au canal");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Envoie un ping pour vérifier la connexion
|
||||
pub fn send_ping(&self) -> bool {
|
||||
tracing::debug!(user_id = %self.user_id, username = %self.username, "🏓 Envoi ping");
|
||||
|
||||
match self.sender.send(Message::Ping(vec![])) {
|
||||
Ok(_) => {
|
||||
tracing::debug!(user_id = %self.user_id, username = %self.username, "✅ Ping envoyé");
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(user_id = %self.user_id, username = %self.username, error = %e, "❌ Erreur envoi ping");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Met à jour le timestamp du dernier heartbeat
|
||||
pub fn update_heartbeat(&self) {
|
||||
if let Ok(mut last_heartbeat) = self.last_heartbeat.write() {
|
||||
*last_heartbeat = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
/// Vérifie si la connexion est encore active (basé sur le heartbeat)
|
||||
pub fn is_alive(&self, timeout: Duration) -> bool {
|
||||
if let Ok(last_heartbeat) = self.last_heartbeat.read() {
|
||||
last_heartbeat.elapsed() < timeout
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Retourne la durée de connexion
|
||||
pub fn connection_duration(&self) -> Duration {
|
||||
self.connected_at.elapsed()
|
||||
}
|
||||
}
|
||||
|
|
@ -1,670 +0,0 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::PgPool;
|
||||
use std::env;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
/// Configuration pour la connexion RabbitMQ
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RabbitMQConfig {
|
||||
pub url: String,
|
||||
pub max_retries: u32,
|
||||
pub retry_interval_secs: u64,
|
||||
pub enable: bool,
|
||||
}
|
||||
|
||||
impl RabbitMQConfig {
|
||||
pub fn from_env() -> Self {
|
||||
dotenvy::dotenv().ok(); // S'assurer que les .env sont chargés
|
||||
Self {
|
||||
url: env::var("RABBITMQ_URL")
|
||||
.unwrap_or_else(|_| "amqp://guest:guest@localhost:5672/".to_string()),
|
||||
max_retries: env::var("RABBITMQ_MAX_RETRIES")
|
||||
.unwrap_or_else(|_| "3".to_string())
|
||||
.parse()
|
||||
.unwrap_or(3),
|
||||
retry_interval_secs: env::var("RABBITMQ_RETRY_INTERVAL_SECS")
|
||||
.unwrap_or_else(|_| "2".to_string())
|
||||
.parse()
|
||||
.unwrap_or(2),
|
||||
enable: env::var("RABBITMQ_ENABLE")
|
||||
.unwrap_or_else(|_| "true".to_string())
|
||||
.parse()
|
||||
.unwrap_or(true),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration simple du chat server depuis variables d'environnement
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Config {
|
||||
/// URL de connexion à la base de données PostgreSQL
|
||||
pub database_url: String,
|
||||
/// Port d'écoute du serveur
|
||||
pub port: u16,
|
||||
/// Adresse IP d'écoute du serveur
|
||||
pub host: String,
|
||||
/// Configuration RabbitMQ
|
||||
pub rabbit_mq: RabbitMQConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Charge la configuration depuis les variables d'environnement
|
||||
///
|
||||
/// Utilise `dotenvy` pour charger le fichier `.env` si présent.
|
||||
/// Variables d'environnement:
|
||||
/// - `DATABASE_URL`: URL de connexion PostgreSQL (requis)
|
||||
/// - `CHAT_SERVER_PORT`: Port d'écoute (défaut: 8081)
|
||||
/// - `CHAT_SERVER_HOST`: Adresse d'écoute (défaut: 0.0.0.0)
|
||||
/// - `RABBITMQ_URL`: URL de connexion RabbitMQ (défaut: amqp://guest:guest@localhost:5672/)
|
||||
/// - `RABBITMQ_MAX_RETRIES`: Nb max de tentatives RabbitMQ (défaut: 3)
|
||||
/// - `RABBITMQ_RETRY_INTERVAL_SECS`: Intervalle retry RabbitMQ (défaut: 2s)
|
||||
/// - `RABBITMQ_ENABLE`: Activer RabbitMQ (défaut: true)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Un `Config` ou une erreur si `DATABASE_URL` n'est pas définie
|
||||
pub fn from_env() -> Result<Self, Box<dyn std::error::Error>> {
|
||||
// Charger le fichier .env si présent (dotenvy ignore silencieusement si absent)
|
||||
dotenvy::dotenv().ok();
|
||||
|
||||
Ok(Config {
|
||||
database_url: env::var("DATABASE_URL")
|
||||
.map_err(|_| "DATABASE_URL environment variable is required")?,
|
||||
port: env::var("CHAT_SERVER_PORT")
|
||||
.unwrap_or_else(|_| "8081".to_string())
|
||||
.parse()
|
||||
.map_err(|e| format!("Invalid CHAT_SERVER_PORT: {}", e))?,
|
||||
host: env::var("CHAT_SERVER_HOST").unwrap_or_else(|_| "0.0.0.0".to_string()),
|
||||
rabbit_mq: RabbitMQConfig::from_env(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration optimisée pour le pool de connexions SQLx
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DatabaseConfig {
|
||||
/// URL de connexion à la base de données
|
||||
pub database_url: String,
|
||||
|
||||
/// Nombre maximum de connexions ouvertes
|
||||
pub max_connections: u32,
|
||||
|
||||
/// Nombre minimum de connexions maintenues
|
||||
pub min_connections: u32,
|
||||
|
||||
/// Timeout pour établir une nouvelle connexion
|
||||
pub connect_timeout: Duration,
|
||||
|
||||
/// Timeout pour les requêtes
|
||||
pub acquire_timeout: Duration,
|
||||
|
||||
/// Durée de vie maximale d'une connexion
|
||||
pub max_lifetime: Duration,
|
||||
|
||||
/// Durée d'inactivité avant fermeture d'une connexion
|
||||
pub idle_timeout: Duration,
|
||||
|
||||
/// Test des connexions avant utilisation
|
||||
pub test_before_acquire: bool,
|
||||
|
||||
/// Configuration SSL
|
||||
pub ssl_mode: SslMode,
|
||||
|
||||
/// Configuration du pool pour les performances
|
||||
pub pool_config: PoolConfig,
|
||||
}
|
||||
|
||||
/// Mode SSL pour la connexion PostgreSQL
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum SslMode {
|
||||
Disable,
|
||||
Prefer,
|
||||
Require,
|
||||
VerifyCa,
|
||||
VerifyFull,
|
||||
}
|
||||
|
||||
impl SslMode {
|
||||
pub fn to_string(&self) -> &'static str {
|
||||
match self {
|
||||
SslMode::Disable => "disable",
|
||||
SslMode::Prefer => "prefer",
|
||||
SslMode::Require => "require",
|
||||
SslMode::VerifyCa => "verify-ca",
|
||||
SslMode::VerifyFull => "verify-full",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration spécifique au pool
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PoolConfig {
|
||||
/// Taille du buffer pour les requêtes préparées
|
||||
pub prepared_statement_cache_size: usize,
|
||||
|
||||
/// Nombre de connexions à créer au démarrage
|
||||
pub initial_connections: u32,
|
||||
|
||||
/// Intervalle de nettoyage des connexions inactives
|
||||
pub cleanup_interval: Duration,
|
||||
|
||||
/// Seuil pour déclencher un warning sur le nombre de connexions
|
||||
pub connection_warning_threshold: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SecurityConfig {
|
||||
pub jwt_secret: String,
|
||||
pub jwt_access_duration: Duration,
|
||||
pub jwt_refresh_duration: Duration,
|
||||
pub jwt_algorithm: String,
|
||||
pub jwt_audience: String,
|
||||
pub jwt_issuer: String,
|
||||
pub enable_2fa: bool,
|
||||
pub totp_window: u32,
|
||||
pub content_filtering: bool,
|
||||
pub password_min_length: u32,
|
||||
pub bcrypt_cost: u32,
|
||||
}
|
||||
|
||||
impl Default for DatabaseConfig {
|
||||
fn default() -> Self {
|
||||
let connect_secs = env::var("CHAT_CONNECT_TIMEOUT_SECS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(5);
|
||||
let acquire_secs = env::var("CHAT_ACQUIRE_TIMEOUT_SECS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(10);
|
||||
let max_lifetime_secs = env::var("CHAT_MAX_LIFETIME_SECS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(1800);
|
||||
let idle_secs = env::var("CHAT_IDLE_TIMEOUT_SECS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(600);
|
||||
Self {
|
||||
database_url: "postgresql://localhost/veza_chat".to_string(),
|
||||
max_connections: 20,
|
||||
min_connections: 5,
|
||||
connect_timeout: Duration::from_secs(connect_secs),
|
||||
acquire_timeout: Duration::from_secs(acquire_secs),
|
||||
max_lifetime: Duration::from_secs(max_lifetime_secs),
|
||||
idle_timeout: Duration::from_secs(idle_secs),
|
||||
test_before_acquire: true,
|
||||
ssl_mode: SslMode::Prefer,
|
||||
pool_config: PoolConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SecurityConfig {
|
||||
fn default() -> Self {
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
panic!(
|
||||
"SecurityConfig::default() cannot be used in production. \
|
||||
Create SecurityConfig manually with require_env_min_length(\"JWT_SECRET\", 32)"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
Self {
|
||||
jwt_secret: env::var("TEST_JWT_SECRET")
|
||||
.unwrap_or_else(|_| format!("test_{}_{}", uuid::Uuid::new_v4(), "x".repeat(20))),
|
||||
jwt_access_duration: Duration::from_secs(900), // 15 min
|
||||
jwt_refresh_duration: Duration::from_secs(86400 * 30), // 30 days
|
||||
jwt_algorithm: "HS256".to_string(),
|
||||
jwt_audience: "veza-chat".to_string(),
|
||||
jwt_issuer: "veza-backend".to_string(),
|
||||
enable_2fa: false,
|
||||
totp_window: 1,
|
||||
content_filtering: false,
|
||||
password_min_length: 8,
|
||||
bcrypt_cost: 12,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PoolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
prepared_statement_cache_size: 100,
|
||||
initial_connections: 5,
|
||||
cleanup_interval: Duration::from_secs(300), // 5 minutes
|
||||
connection_warning_threshold: 15,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Gestionnaire de pool de base de données optimisé
|
||||
pub struct DatabaseManager {
|
||||
pool: PgPool,
|
||||
config: DatabaseConfig,
|
||||
metrics: DatabaseMetrics,
|
||||
}
|
||||
|
||||
/// Métriques du pool de base de données
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DatabaseMetrics {
|
||||
pub total_connections: u32,
|
||||
pub idle_connections: u32,
|
||||
pub active_connections: u32,
|
||||
pub waiting_requests: u32,
|
||||
pub connection_errors: u64,
|
||||
pub query_errors: u64,
|
||||
pub avg_query_duration_ms: f64,
|
||||
pub last_cleanup: std::time::Instant,
|
||||
}
|
||||
|
||||
impl Default for DatabaseMetrics {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
total_connections: 0,
|
||||
idle_connections: 0,
|
||||
active_connections: 0,
|
||||
waiting_requests: 0,
|
||||
connection_errors: 0,
|
||||
query_errors: 0,
|
||||
avg_query_duration_ms: 0.0,
|
||||
last_cleanup: std::time::Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DatabaseManager {
|
||||
/// Crée une nouvelle instance du gestionnaire de base de données
|
||||
pub async fn new(config: DatabaseConfig) -> Result<Self, sqlx::Error> {
|
||||
info!(
|
||||
"Initializing database connection pool with config: {:?}",
|
||||
config
|
||||
);
|
||||
|
||||
// Construire l'URL de connexion avec les paramètres SSL
|
||||
let mut database_url = config.database_url.clone();
|
||||
if !database_url.contains("sslmode=") {
|
||||
let separator = if database_url.contains('?') { "&" } else { "?" };
|
||||
database_url.push_str(&format!(
|
||||
"{}sslmode={}",
|
||||
separator,
|
||||
config.ssl_mode.to_string()
|
||||
));
|
||||
}
|
||||
|
||||
// Créer le pool avec la configuration optimisée
|
||||
let pool_options = sqlx::postgres::PgPoolOptions::new()
|
||||
.max_connections(config.max_connections)
|
||||
.min_connections(config.min_connections)
|
||||
.acquire_timeout(config.acquire_timeout)
|
||||
.max_lifetime(config.max_lifetime)
|
||||
.idle_timeout(config.idle_timeout)
|
||||
.test_before_acquire(config.test_before_acquire);
|
||||
|
||||
let pool = pool_options.connect(&database_url).await?;
|
||||
|
||||
// Tester la connexion
|
||||
sqlx::query("SELECT 1").fetch_one(&pool).await?;
|
||||
|
||||
info!(
|
||||
"Database connection pool initialized successfully. Max connections: {}, Min connections: {}",
|
||||
config.max_connections, config.min_connections
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
pool,
|
||||
config,
|
||||
metrics: DatabaseMetrics::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Obtient le pool de connexions
|
||||
pub fn pool(&self) -> &PgPool {
|
||||
&self.pool
|
||||
}
|
||||
|
||||
/// Obtient les métriques actuelles du pool
|
||||
pub async fn get_metrics(&self) -> DatabaseMetrics {
|
||||
let pool_status = self.pool.size();
|
||||
let idle_count = self.pool.num_idle();
|
||||
let active_count = pool_status - idle_count as u32;
|
||||
|
||||
DatabaseMetrics {
|
||||
total_connections: pool_status,
|
||||
idle_connections: idle_count as u32,
|
||||
active_connections: active_count,
|
||||
waiting_requests: 0, // SQLx ne fournit pas cette info directement
|
||||
connection_errors: self.metrics.connection_errors,
|
||||
query_errors: self.metrics.query_errors,
|
||||
avg_query_duration_ms: self.metrics.avg_query_duration_ms,
|
||||
last_cleanup: self.metrics.last_cleanup,
|
||||
}
|
||||
}
|
||||
|
||||
/// Nettoie les connexions inactives
|
||||
pub async fn cleanup_idle_connections(&mut self) {
|
||||
let now = std::time::Instant::now();
|
||||
|
||||
if now.duration_since(self.metrics.last_cleanup) >= self.config.pool_config.cleanup_interval
|
||||
{
|
||||
// SQLx gère automatiquement le nettoyage des connexions inactives
|
||||
// On peut juste mettre à jour nos métriques
|
||||
self.metrics.last_cleanup = now;
|
||||
|
||||
let metrics = self.get_metrics().await;
|
||||
debug!(
|
||||
"Database cleanup completed. Active: {}, Idle: {}, Total: {}",
|
||||
metrics.active_connections, metrics.idle_connections, metrics.total_connections
|
||||
);
|
||||
|
||||
// Warning si trop de connexions actives
|
||||
if metrics.active_connections > self.config.pool_config.connection_warning_threshold {
|
||||
warn!(
|
||||
"High number of active database connections: {} (threshold: {})",
|
||||
metrics.active_connections,
|
||||
self.config.pool_config.connection_warning_threshold
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Teste la santé de la base de données
|
||||
pub async fn health_check(&self) -> Result<(), sqlx::Error> {
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
// Test de connexion simple
|
||||
sqlx::query("SELECT 1").fetch_one(&self.pool).await?;
|
||||
|
||||
let duration = start.elapsed();
|
||||
debug!("Database health check completed in {:?}", duration);
|
||||
|
||||
// Mettre à jour les métriques de performance
|
||||
// Note: Dans une vraie implémentation, on utiliserait des atomics pour thread-safety
|
||||
// self.metrics.avg_query_duration_ms = // calculer la moyenne
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ferme le pool de connexions
|
||||
pub async fn close(self) {
|
||||
info!("Closing database connection pool");
|
||||
self.pool.close().await;
|
||||
}
|
||||
|
||||
/// Obtient la configuration actuelle
|
||||
pub fn config(&self) -> &DatabaseConfig {
|
||||
&self.config
|
||||
}
|
||||
|
||||
/// Met à jour la configuration (nécessite un redémarrage du pool)
|
||||
pub fn update_config(&mut self, new_config: DatabaseConfig) {
|
||||
self.config = new_config;
|
||||
info!("Database configuration updated");
|
||||
}
|
||||
|
||||
/// Obtient des statistiques détaillées du pool
|
||||
pub async fn get_detailed_stats(&self) -> DetailedPoolStats {
|
||||
let metrics = self.get_metrics().await;
|
||||
|
||||
DetailedPoolStats {
|
||||
pool_size: metrics.total_connections,
|
||||
idle_connections: metrics.idle_connections,
|
||||
active_connections: metrics.active_connections,
|
||||
max_connections: self.config.max_connections,
|
||||
min_connections: self.config.min_connections,
|
||||
connection_utilization: (metrics.active_connections as f64
|
||||
/ self.config.max_connections as f64)
|
||||
* 100.0,
|
||||
avg_query_duration_ms: metrics.avg_query_duration_ms,
|
||||
connection_errors: metrics.connection_errors,
|
||||
query_errors: metrics.query_errors,
|
||||
last_cleanup: metrics.last_cleanup,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistiques détaillées du pool
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DetailedPoolStats {
|
||||
pub pool_size: u32,
|
||||
pub idle_connections: u32,
|
||||
pub active_connections: u32,
|
||||
pub max_connections: u32,
|
||||
pub min_connections: u32,
|
||||
pub connection_utilization: f64,
|
||||
pub avg_query_duration_ms: f64,
|
||||
pub connection_errors: u64,
|
||||
pub query_errors: u64,
|
||||
pub last_cleanup: std::time::Instant,
|
||||
}
|
||||
|
||||
/// Configuration optimisée pour différents environnements
|
||||
impl DatabaseConfig {
|
||||
/// Configuration pour le développement local
|
||||
pub fn development() -> Self {
|
||||
Self {
|
||||
database_url: "postgresql://localhost/veza_chat_dev".to_string(),
|
||||
max_connections: 10,
|
||||
min_connections: 2,
|
||||
connect_timeout: Duration::from_secs(5),
|
||||
acquire_timeout: Duration::from_secs(10),
|
||||
max_lifetime: Duration::from_secs(1800),
|
||||
idle_timeout: Duration::from_secs(300),
|
||||
test_before_acquire: true,
|
||||
ssl_mode: SslMode::Disable,
|
||||
pool_config: PoolConfig {
|
||||
prepared_statement_cache_size: 50,
|
||||
initial_connections: 2,
|
||||
cleanup_interval: Duration::from_secs(300),
|
||||
connection_warning_threshold: 8,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration pour la production
|
||||
pub fn production() -> Self {
|
||||
Self {
|
||||
database_url: "postgresql://localhost/veza_chat_prod".to_string(),
|
||||
max_connections: 20,
|
||||
min_connections: 5,
|
||||
connect_timeout: Duration::from_secs(5),
|
||||
acquire_timeout: Duration::from_secs(10),
|
||||
max_lifetime: Duration::from_secs(1800),
|
||||
idle_timeout: Duration::from_secs(600),
|
||||
test_before_acquire: true,
|
||||
ssl_mode: SslMode::Require,
|
||||
pool_config: PoolConfig {
|
||||
prepared_statement_cache_size: 100,
|
||||
initial_connections: 5,
|
||||
cleanup_interval: Duration::from_secs(300),
|
||||
connection_warning_threshold: 15,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration pour les tests
|
||||
pub fn testing() -> Self {
|
||||
Self {
|
||||
database_url: "postgresql://localhost/veza_chat_test".to_string(),
|
||||
max_connections: 5,
|
||||
min_connections: 1,
|
||||
connect_timeout: Duration::from_secs(2),
|
||||
acquire_timeout: Duration::from_secs(5),
|
||||
max_lifetime: Duration::from_secs(300),
|
||||
idle_timeout: Duration::from_secs(60),
|
||||
test_before_acquire: false,
|
||||
ssl_mode: SslMode::Disable,
|
||||
pool_config: PoolConfig {
|
||||
prepared_statement_cache_size: 10,
|
||||
initial_connections: 1,
|
||||
cleanup_interval: Duration::from_secs(60),
|
||||
connection_warning_threshold: 3,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_config_from_env() {
|
||||
// Sauvegarder les valeurs originales
|
||||
let original_db_url = std::env::var("DATABASE_URL").ok();
|
||||
let original_port = std::env::var("CHAT_SERVER_PORT").ok();
|
||||
let original_host = std::env::var("CHAT_SERVER_HOST").ok();
|
||||
|
||||
// Test avec des variables d'environnement définies
|
||||
std::env::set_var("DATABASE_URL", "postgresql://test:test@localhost/test_db");
|
||||
std::env::set_var("CHAT_SERVER_PORT", "9999");
|
||||
std::env::set_var("CHAT_SERVER_HOST", "127.0.0.1");
|
||||
|
||||
let config = Config::from_env().unwrap();
|
||||
assert_eq!(
|
||||
config.database_url,
|
||||
"postgresql://test:test@localhost/test_db"
|
||||
);
|
||||
assert_eq!(config.port, 9999);
|
||||
assert_eq!(config.host, "127.0.0.1");
|
||||
|
||||
// Restaurer les valeurs originales
|
||||
if let Some(url) = original_db_url {
|
||||
std::env::set_var("DATABASE_URL", url);
|
||||
} else {
|
||||
std::env::remove_var("DATABASE_URL");
|
||||
}
|
||||
if let Some(port) = original_port {
|
||||
std::env::set_var("CHAT_SERVER_PORT", port);
|
||||
} else {
|
||||
std::env::remove_var("CHAT_SERVER_PORT");
|
||||
}
|
||||
if let Some(host) = original_host {
|
||||
std::env::set_var("CHAT_SERVER_HOST", host);
|
||||
} else {
|
||||
std::env::remove_var("CHAT_SERVER_HOST");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(not(feature = "serial-test"), ignore)] // Ignorer si pas de serial-test
|
||||
fn test_config_from_env_defaults() {
|
||||
// Sauvegarder les valeurs originales
|
||||
let original_db_url = std::env::var("DATABASE_URL").ok();
|
||||
let original_port = std::env::var("CHAT_SERVER_PORT").ok();
|
||||
let original_host = std::env::var("CHAT_SERVER_HOST").ok();
|
||||
|
||||
// S'assurer que les variables sont bien supprimées
|
||||
std::env::remove_var("CHAT_SERVER_PORT");
|
||||
std::env::remove_var("CHAT_SERVER_HOST");
|
||||
|
||||
// Test avec DATABASE_URL uniquement
|
||||
std::env::set_var("DATABASE_URL", "postgresql://test:test@localhost/test_db");
|
||||
|
||||
let config = Config::from_env().unwrap();
|
||||
assert_eq!(
|
||||
config.database_url,
|
||||
"postgresql://test:test@localhost/test_db"
|
||||
);
|
||||
assert_eq!(config.port, 8081, "Port should default to 8081"); // Défaut
|
||||
assert_eq!(config.host, "0.0.0.0", "Host should default to 0.0.0.0"); // Défaut
|
||||
|
||||
// Restaurer les valeurs originales
|
||||
if let Some(url) = original_db_url {
|
||||
std::env::set_var("DATABASE_URL", url);
|
||||
} else {
|
||||
std::env::remove_var("DATABASE_URL");
|
||||
}
|
||||
if let Some(port) = original_port {
|
||||
std::env::set_var("CHAT_SERVER_PORT", port);
|
||||
} else {
|
||||
std::env::remove_var("CHAT_SERVER_PORT");
|
||||
}
|
||||
if let Some(host) = original_host {
|
||||
std::env::set_var("CHAT_SERVER_HOST", host);
|
||||
} else {
|
||||
std::env::remove_var("CHAT_SERVER_HOST");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(not(feature = "serial-test"), ignore)] // Ignorer si pas de serial-test
|
||||
fn test_config_from_env_missing_database_url() {
|
||||
// Sauvegarder la valeur originale
|
||||
let original_db_url = std::env::var("DATABASE_URL").ok();
|
||||
|
||||
// S'assurer que DATABASE_URL est bien supprimé
|
||||
std::env::remove_var("DATABASE_URL");
|
||||
|
||||
// Vérifier qu'il n'y a pas de .env qui pourrait définir DATABASE_URL
|
||||
// En forçant le rechargement, on s'assure que la variable n'est pas chargée
|
||||
let result = Config::from_env();
|
||||
|
||||
// Si dotenvy charge un .env avec DATABASE_URL, le test peut échouer
|
||||
// Dans ce cas, on accepte que le test soit ignoré si DATABASE_URL est défini ailleurs
|
||||
if original_db_url.is_none() && std::env::var("DATABASE_URL").is_ok() {
|
||||
// DATABASE_URL a été chargé depuis .env, on ignore ce test
|
||||
eprintln!("Warning: DATABASE_URL found in .env, skipping test");
|
||||
return;
|
||||
}
|
||||
|
||||
assert!(result.is_err(), "Should fail when DATABASE_URL is missing");
|
||||
|
||||
// Restaurer la valeur originale
|
||||
if let Some(url) = original_db_url {
|
||||
std::env::set_var("DATABASE_URL", url);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_database_config_defaults() {
|
||||
let config = DatabaseConfig::default();
|
||||
assert_eq!(config.max_connections, 20);
|
||||
assert_eq!(config.min_connections, 5);
|
||||
assert_eq!(config.connect_timeout, Duration::from_secs(5));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_database_config_environments() {
|
||||
let dev_config = DatabaseConfig::development();
|
||||
let prod_config = DatabaseConfig::production();
|
||||
let test_config = DatabaseConfig::testing();
|
||||
|
||||
assert!(dev_config.max_connections < prod_config.max_connections);
|
||||
assert!(test_config.max_connections < dev_config.max_connections);
|
||||
assert_eq!(prod_config.ssl_mode, SslMode::Require);
|
||||
assert_eq!(dev_config.ssl_mode, SslMode::Disable);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ssl_mode_to_string() {
|
||||
assert_eq!(SslMode::Disable.to_string(), "disable");
|
||||
assert_eq!(SslMode::Require.to_string(), "require");
|
||||
assert_eq!(SslMode::VerifyFull.to_string(), "verify-full");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ssl_mode_all_variants() {
|
||||
assert_eq!(SslMode::Prefer.to_string(), "prefer");
|
||||
assert_eq!(SslMode::VerifyCa.to_string(), "verify-ca");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pool_config_default() {
|
||||
let pool = PoolConfig::default();
|
||||
assert_eq!(pool.prepared_statement_cache_size, 100);
|
||||
assert_eq!(pool.initial_connections, 5);
|
||||
assert_eq!(pool.connection_warning_threshold, 15);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rabbitmq_config_defaults() {
|
||||
let config = RabbitMQConfig::from_env();
|
||||
assert!(!config.url.is_empty());
|
||||
assert!(config.max_retries >= 1);
|
||||
assert!(config.retry_interval_secs >= 1);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,829 +0,0 @@
|
|||
use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::net::IpAddr;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use dashmap::DashMap;
|
||||
use tokio::sync::RwLock;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use crate::error::{ChatError, Result};
|
||||
|
||||
/// Service de rate limiting avancé anti-DDoS
|
||||
#[derive(Debug)]
|
||||
pub struct AdvancedRateLimiter {
|
||||
/// Limiteurs par IP
|
||||
ip_limiters: Arc<DashMap<IpAddr, IpRateLimiter>>,
|
||||
|
||||
/// Limiteurs par utilisateur
|
||||
user_limiters: Arc<DashMap<i64, UserRateLimiter>>,
|
||||
|
||||
/// Limiteurs par canal
|
||||
channel_limiters: Arc<DashMap<String, ChannelRateLimiter>>,
|
||||
|
||||
/// Patterns d'attaque détectés
|
||||
attack_patterns: Arc<DashMap<String, AttackPattern>>,
|
||||
|
||||
/// Liste noire temporaire
|
||||
blacklist: Arc<DashMap<IpAddr, BlacklistEntry>>,
|
||||
|
||||
/// Configuration globale
|
||||
config: Arc<RwLock<RateLimitConfig>>,
|
||||
|
||||
/// Métriques de performance
|
||||
metrics: Arc<RateLimitMetrics>,
|
||||
}
|
||||
|
||||
/// Limiteur par IP avec détection de patterns
|
||||
#[derive(Debug)]
|
||||
pub struct IpRateLimiter {
|
||||
pub ip: IpAddr,
|
||||
pub buckets: HashMap<LimitType, TokenBucket>,
|
||||
pub last_activity: Instant,
|
||||
pub violation_count: u32,
|
||||
pub trust_score: f32,
|
||||
pub request_patterns: VecDeque<RequestEvent>,
|
||||
pub status: IpStatus,
|
||||
}
|
||||
|
||||
/// Limiteur par utilisateur
|
||||
#[derive(Debug)]
|
||||
pub struct UserRateLimiter {
|
||||
pub user_id: i64,
|
||||
pub buckets: HashMap<LimitType, TokenBucket>,
|
||||
pub last_activity: Instant,
|
||||
pub violation_count: u32,
|
||||
pub reputation: UserReputation,
|
||||
pub daily_limits: DailyLimits,
|
||||
}
|
||||
|
||||
/// Limiteur par canal
|
||||
#[derive(Debug)]
|
||||
pub struct ChannelRateLimiter {
|
||||
pub channel_id: String,
|
||||
pub message_bucket: TokenBucket,
|
||||
pub concurrent_users: u32,
|
||||
pub last_activity: Instant,
|
||||
pub spam_threshold: f32,
|
||||
pub moderation_level: ModerationLevel,
|
||||
}
|
||||
|
||||
/// Implémentation du Token Bucket
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TokenBucket {
|
||||
pub capacity: u32,
|
||||
pub tokens: u32,
|
||||
pub refill_rate: f32, // tokens per second
|
||||
pub last_refill: Instant,
|
||||
pub burst_allowance: u32,
|
||||
}
|
||||
|
||||
/// Types de limitations
|
||||
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
|
||||
pub enum LimitType {
|
||||
/// Messages par minute
|
||||
MessagesPerMinute,
|
||||
/// Connexions par heure
|
||||
ConnectionsPerHour,
|
||||
/// Tentatives d'authentification
|
||||
AuthAttempts,
|
||||
/// Requêtes API
|
||||
ApiRequests,
|
||||
/// Upload de fichiers
|
||||
FileUploads,
|
||||
/// Création de channels
|
||||
ChannelCreation,
|
||||
/// Invitations envoyées
|
||||
Invitations,
|
||||
/// Réactions ajoutées
|
||||
Reactions,
|
||||
}
|
||||
|
||||
/// Statut d'une IP
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum IpStatus {
|
||||
/// IP normale
|
||||
Normal,
|
||||
/// IP suspecte (surveillance accrue)
|
||||
Suspicious,
|
||||
/// IP en liste noire temporaire
|
||||
Blacklisted,
|
||||
/// IP bloquée définitivement
|
||||
Banned,
|
||||
/// IP de confiance (VPN/Proxy autorisé)
|
||||
Trusted,
|
||||
}
|
||||
|
||||
/// Réputation d'un utilisateur
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UserReputation {
|
||||
pub score: f32, // 0.0 - 1.0
|
||||
pub level: ReputationLevel,
|
||||
pub violations_today: u32,
|
||||
pub positive_actions: u32,
|
||||
pub last_violation: Option<Instant>,
|
||||
}
|
||||
|
||||
/// Niveau de réputation
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ReputationLevel {
|
||||
NewUser, // Nouvels utilisateurs (restrictions strictes)
|
||||
Normal, // Utilisateurs normaux
|
||||
Trusted, // Utilisateurs de confiance
|
||||
VIP, // Utilisateurs VIP (modérateurs, abonnés)
|
||||
System, // Comptes système (bots officiels)
|
||||
}
|
||||
|
||||
/// Limites quotidiennes
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DailyLimits {
|
||||
pub messages_sent: u32,
|
||||
pub max_messages: u32,
|
||||
pub files_uploaded: u32,
|
||||
pub max_files: u32,
|
||||
pub reset_time: Instant,
|
||||
}
|
||||
|
||||
/// Niveau de modération d'un canal
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ModerationLevel {
|
||||
Low, // Modération allégée
|
||||
Normal, // Modération standard
|
||||
High, // Modération stricte
|
||||
Lockdown, // Canal en verrouillage
|
||||
}
|
||||
|
||||
/// Pattern d'attaque détecté
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AttackPattern {
|
||||
pub pattern_id: String,
|
||||
pub pattern_type: AttackType,
|
||||
pub source_ips: Vec<IpAddr>,
|
||||
pub detection_time: Instant,
|
||||
pub severity: f32,
|
||||
pub requests_count: u32,
|
||||
pub geographic_spread: f32,
|
||||
pub user_agents: Vec<String>,
|
||||
}
|
||||
|
||||
/// Types d'attaques détectées
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum AttackType {
|
||||
/// Attaque DDoS classique
|
||||
DDoS,
|
||||
/// Spam de messages
|
||||
MessageSpam,
|
||||
/// Brute force sur l'authentification
|
||||
BruteForce,
|
||||
/// Scraping de données
|
||||
Scraping,
|
||||
/// Attaque par déni de service applicatif
|
||||
SlowLoris,
|
||||
/// Comportement suspect automatisé
|
||||
BotActivity,
|
||||
}
|
||||
|
||||
/// Entrée de liste noire
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BlacklistEntry {
|
||||
pub ip: IpAddr,
|
||||
pub reason: String,
|
||||
pub blocked_at: Instant,
|
||||
pub expires_at: Option<Instant>,
|
||||
pub violation_count: u32,
|
||||
pub auto_generated: bool,
|
||||
}
|
||||
|
||||
/// Événement de requête pour la détection de patterns
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RequestEvent {
|
||||
pub timestamp: Instant,
|
||||
pub request_type: String,
|
||||
pub path: String,
|
||||
pub user_agent: Option<String>,
|
||||
pub response_time: Duration,
|
||||
pub status_code: u16,
|
||||
}
|
||||
|
||||
/// Configuration du rate limiting
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RateLimitConfig {
|
||||
/// Messages par minute par utilisateur
|
||||
pub messages_per_minute: u32,
|
||||
/// Connexions par heure par IP
|
||||
pub connections_per_hour: u32,
|
||||
/// Tentatives d'auth par IP
|
||||
pub auth_attempts_per_minute: u32,
|
||||
/// Taille maximale des buckets
|
||||
pub max_bucket_capacity: u32,
|
||||
/// Seuil de détection d'attaque
|
||||
pub attack_detection_threshold: f32,
|
||||
/// Durée de blacklist automatique
|
||||
pub auto_blacklist_duration: Duration,
|
||||
/// Activar la géolocalisation
|
||||
pub enable_geolocation: bool,
|
||||
/// IPs de confiance (CDN, proxies autorisés)
|
||||
pub trusted_ips: Vec<IpAddr>,
|
||||
}
|
||||
|
||||
/// Métriques du rate limiting
|
||||
#[derive(Debug, Default)]
|
||||
pub struct RateLimitMetrics {
|
||||
pub requests_processed: Arc<std::sync::atomic::AtomicU64>,
|
||||
pub requests_blocked: Arc<std::sync::atomic::AtomicU64>,
|
||||
pub attacks_detected: Arc<std::sync::atomic::AtomicU32>,
|
||||
pub false_positives: Arc<std::sync::atomic::AtomicU32>,
|
||||
pub avg_response_time: Arc<Mutex<Duration>>,
|
||||
}
|
||||
|
||||
/// Résultat d'une vérification de rate limit
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RateLimitResult {
|
||||
pub allowed: bool,
|
||||
pub reason: Option<String>,
|
||||
pub retry_after: Option<Duration>,
|
||||
pub remaining_tokens: u32,
|
||||
pub burst_remaining: u32,
|
||||
pub reputation_impact: f32,
|
||||
}
|
||||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
impl AdvancedRateLimiter {
|
||||
/// Crée un nouveau rate limiter avancé
|
||||
pub fn new(config: RateLimitConfig) -> Self {
|
||||
Self {
|
||||
ip_limiters: Arc::new(DashMap::new()),
|
||||
user_limiters: Arc::new(DashMap::new()),
|
||||
channel_limiters: Arc::new(DashMap::new()),
|
||||
attack_patterns: Arc::new(DashMap::new()),
|
||||
blacklist: Arc::new(DashMap::new()),
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
metrics: Arc::new(RateLimitMetrics::default()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Vérifie si une requête est autorisée (point d'entrée principal)
|
||||
pub async fn check_rate_limit(
|
||||
&self,
|
||||
ip: IpAddr,
|
||||
user_id: Option<i64>,
|
||||
channel_id: Option<String>,
|
||||
limit_type: LimitType,
|
||||
request_info: RequestInfo,
|
||||
) -> Result<RateLimitResult> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
// Incrémenter les métriques
|
||||
self.metrics.requests_processed.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
// 1. Vérifier la liste noire d'abord
|
||||
if let Some(blacklist_entry) = self.blacklist.get(&ip) {
|
||||
if blacklist_entry.expires_at.map_or(true, |exp| exp > Instant::now()) {
|
||||
self.metrics.requests_blocked.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
return Ok(RateLimitResult {
|
||||
allowed: false,
|
||||
reason: Some(format!("IP blacklisted: {}", blacklist_entry.reason)),
|
||||
retry_after: blacklist_entry.expires_at.map(|exp| exp.duration_since(Instant::now())),
|
||||
remaining_tokens: 0,
|
||||
burst_remaining: 0,
|
||||
reputation_impact: -0.1,
|
||||
});
|
||||
} else {
|
||||
// Entrée expirée, la supprimer
|
||||
self.blacklist.remove(&ip);
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Vérifier le rate limiting par IP
|
||||
let ip_result = self.check_ip_rate_limit(ip, &limit_type, &request_info).await?;
|
||||
if !ip_result.allowed {
|
||||
return Ok(ip_result);
|
||||
}
|
||||
|
||||
// 3. Vérifier le rate limiting par utilisateur si applicable
|
||||
if let Some(uid) = user_id {
|
||||
let user_result = self.check_user_rate_limit(uid, &limit_type).await?;
|
||||
if !user_result.allowed {
|
||||
return Ok(user_result);
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Vérifier le rate limiting par canal si applicable
|
||||
if let Some(cid) = channel_id {
|
||||
let channel_result = self.check_channel_rate_limit(&cid, &limit_type).await?;
|
||||
if !channel_result.allowed {
|
||||
return Ok(channel_result);
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Analyser les patterns d'attaque
|
||||
self.analyze_request_pattern(ip, &request_info).await?;
|
||||
|
||||
// 6. Mettre à jour les métriques de performance
|
||||
let elapsed = start_time.elapsed();
|
||||
*self.metrics.avg_response_time.lock() = elapsed;
|
||||
|
||||
Ok(RateLimitResult {
|
||||
allowed: true,
|
||||
reason: None,
|
||||
retry_after: None,
|
||||
remaining_tokens: ip_result.remaining_tokens,
|
||||
burst_remaining: ip_result.burst_remaining,
|
||||
reputation_impact: 0.0,
|
||||
})
|
||||
}
|
||||
|
||||
/// Vérifie le rate limiting par IP
|
||||
async fn check_ip_rate_limit(
|
||||
&self,
|
||||
ip: IpAddr,
|
||||
limit_type: &LimitType,
|
||||
request_info: &RequestInfo,
|
||||
) -> Result<RateLimitResult> {
|
||||
let config = self.config.read().await;
|
||||
|
||||
// Récupérer ou créer le limiteur IP
|
||||
let mut ip_limiter = self.ip_limiters.entry(ip).or_insert_with(|| {
|
||||
IpRateLimiter::new(ip, &config)
|
||||
});
|
||||
|
||||
// Vérifier le statut de l'IP
|
||||
match ip_limiter.status {
|
||||
IpStatus::Banned => {
|
||||
return Ok(RateLimitResult {
|
||||
allowed: false,
|
||||
reason: Some("IP permanently banned".to_string()),
|
||||
retry_after: None,
|
||||
remaining_tokens: 0,
|
||||
burst_remaining: 0,
|
||||
reputation_impact: -0.2,
|
||||
});
|
||||
}
|
||||
IpStatus::Blacklisted => {
|
||||
return Ok(RateLimitResult {
|
||||
allowed: false,
|
||||
reason: Some("IP temporarily blacklisted".to_string()),
|
||||
retry_after: Some(Duration::from_secs(300)), // 5 minutes
|
||||
remaining_tokens: 0,
|
||||
burst_remaining: 0,
|
||||
reputation_impact: -0.1,
|
||||
});
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Appliquer le rate limiting avec token bucket
|
||||
let remaining_tokens = {
|
||||
let bucket = ip_limiter.buckets.get_mut(limit_type)
|
||||
.ok_or_else(|| ChatError::internal_error(format!(
|
||||
"Rate limit bucket not initialized for limit type: {:?}",
|
||||
limit_type
|
||||
)))?;
|
||||
bucket.refill();
|
||||
|
||||
if bucket.tokens > 0 {
|
||||
bucket.tokens -= 1;
|
||||
bucket.tokens
|
||||
} else {
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
if remaining_tokens > 0 {
|
||||
ip_limiter.last_activity = Instant::now();
|
||||
|
||||
// Enregistrer l'événement pour l'analyse de patterns
|
||||
ip_limiter.request_patterns.push_back(RequestEvent {
|
||||
timestamp: Instant::now(),
|
||||
request_type: format!("{:?}", limit_type),
|
||||
path: request_info.path.clone(),
|
||||
user_agent: request_info.user_agent.clone(),
|
||||
response_time: Duration::from_millis(0),
|
||||
status_code: 200,
|
||||
});
|
||||
|
||||
// Garder seulement les 100 derniers événements
|
||||
if ip_limiter.request_patterns.len() > 100 {
|
||||
ip_limiter.request_patterns.pop_front();
|
||||
}
|
||||
|
||||
Ok(RateLimitResult {
|
||||
allowed: true,
|
||||
reason: None,
|
||||
retry_after: None,
|
||||
remaining_tokens,
|
||||
burst_remaining: remaining_tokens,
|
||||
reputation_impact: 0.0,
|
||||
})
|
||||
} else {
|
||||
// Rate limit dépassé
|
||||
ip_limiter.violation_count += 1;
|
||||
|
||||
// Escalade automatique si trop de violations
|
||||
if ip_limiter.violation_count >= 5 {
|
||||
ip_limiter.status = IpStatus::Suspicious;
|
||||
}
|
||||
if ip_limiter.violation_count >= 10 {
|
||||
self.auto_blacklist_ip(ip, "Too many violations".to_string()).await?;
|
||||
}
|
||||
|
||||
Ok(RateLimitResult {
|
||||
allowed: false,
|
||||
reason: Some(format!("Rate limit exceeded for {:?}", limit_type)),
|
||||
retry_after: Some(Duration::from_secs(60)),
|
||||
remaining_tokens: 0,
|
||||
burst_remaining: 0,
|
||||
reputation_impact: 0.0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Vérifie le rate limiting par utilisateur
|
||||
async fn check_user_rate_limit(&self, user_id: i64, limit_type: &LimitType) -> Result<RateLimitResult> {
|
||||
let config = self.config.read().await;
|
||||
|
||||
let mut user_limiter = self.user_limiters.entry(user_id).or_insert_with(|| {
|
||||
UserRateLimiter::new(user_id, &config)
|
||||
});
|
||||
|
||||
// Vérifier la réputation d'abord
|
||||
let capacity_multiplier = match user_limiter.reputation.level {
|
||||
ReputationLevel::NewUser => 0.5,
|
||||
ReputationLevel::Normal => 1.0,
|
||||
ReputationLevel::Trusted => 1.5,
|
||||
ReputationLevel::VIP => 2.0,
|
||||
ReputationLevel::System => 5.0,
|
||||
};
|
||||
|
||||
// Puis accéder au bucket avec la capacité ajustée
|
||||
let remaining_tokens = {
|
||||
let bucket = user_limiter.buckets.get_mut(limit_type)
|
||||
.ok_or_else(|| ChatError::internal_error(format!(
|
||||
"Rate limit bucket not initialized for limit type: {:?}",
|
||||
limit_type
|
||||
)))?;
|
||||
bucket.capacity = (bucket.capacity as f32 * capacity_multiplier) as u32;
|
||||
bucket.refill();
|
||||
|
||||
if bucket.tokens > 0 {
|
||||
bucket.tokens -= 1;
|
||||
bucket.tokens
|
||||
} else {
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
if remaining_tokens > 0 {
|
||||
user_limiter.last_activity = Instant::now();
|
||||
|
||||
Ok(RateLimitResult {
|
||||
allowed: true,
|
||||
reason: None,
|
||||
retry_after: None,
|
||||
remaining_tokens,
|
||||
burst_remaining: remaining_tokens,
|
||||
reputation_impact: 0.0,
|
||||
})
|
||||
} else {
|
||||
user_limiter.violation_count += 1;
|
||||
user_limiter.reputation.violations_today += 1;
|
||||
user_limiter.reputation.score = (user_limiter.reputation.score - 0.05).max(0.0);
|
||||
|
||||
Ok(RateLimitResult {
|
||||
allowed: false,
|
||||
reason: Some(format!("User rate limit exceeded for {:?}", limit_type)),
|
||||
retry_after: Some(Duration::from_secs(30)),
|
||||
remaining_tokens: 0,
|
||||
burst_remaining: 0,
|
||||
reputation_impact: 0.0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Vérifie le rate limiting par canal
|
||||
async fn check_channel_rate_limit(&self, channel_id: &str, limit_type: &LimitType) -> Result<RateLimitResult> {
|
||||
let config = self.config.read().await;
|
||||
|
||||
let mut channel_limiter = self.channel_limiters.entry(channel_id.to_string()).or_insert_with(|| {
|
||||
ChannelRateLimiter::new(channel_id.to_string(), &config)
|
||||
});
|
||||
|
||||
// Appliquer la modération selon le niveau du canal
|
||||
let rate_multiplier = match channel_limiter.moderation_level {
|
||||
ModerationLevel::Low => 1.5,
|
||||
ModerationLevel::Normal => 1.0,
|
||||
ModerationLevel::High => 0.5,
|
||||
ModerationLevel::Lockdown => 0.1,
|
||||
};
|
||||
|
||||
channel_limiter.message_bucket.refill();
|
||||
let tokens_needed = (1.0 / rate_multiplier) as u32;
|
||||
|
||||
if channel_limiter.message_bucket.tokens >= tokens_needed {
|
||||
channel_limiter.message_bucket.tokens -= tokens_needed;
|
||||
channel_limiter.last_activity = Instant::now();
|
||||
|
||||
Ok(RateLimitResult {
|
||||
allowed: true,
|
||||
reason: None,
|
||||
retry_after: None,
|
||||
remaining_tokens: channel_limiter.message_bucket.tokens,
|
||||
burst_remaining: channel_limiter.message_bucket.burst_allowance,
|
||||
reputation_impact: 0.0,
|
||||
})
|
||||
} else {
|
||||
Ok(RateLimitResult {
|
||||
allowed: false,
|
||||
reason: Some(format!("Channel rate limit exceeded (moderation: {:?})", channel_limiter.moderation_level)),
|
||||
retry_after: Some(Duration::from_secs(10)),
|
||||
remaining_tokens: 0,
|
||||
burst_remaining: 0,
|
||||
reputation_impact: 0.0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Analyse les patterns de requêtes pour détecter les attaques
|
||||
async fn analyze_request_pattern(&self, ip: IpAddr, request_info: &RequestInfo) -> Result<()> {
|
||||
// Récupérer l'historique des requêtes pour cette IP
|
||||
if let Some(ip_limiter) = self.ip_limiters.get(&ip) {
|
||||
let recent_requests: Vec<_> = ip_limiter.request_patterns.iter()
|
||||
.filter(|event| event.timestamp.elapsed() < Duration::from_secs(60))
|
||||
.collect();
|
||||
|
||||
// Détecter différents types d'attaques
|
||||
|
||||
// 1. DDoS - Trop de requêtes dans un court laps de temps
|
||||
if recent_requests.len() > 100 {
|
||||
self.detect_ddos_attack(ip, &recent_requests).await?;
|
||||
}
|
||||
|
||||
// 2. Brute Force - Tentatives répétées sur l'authentification
|
||||
if request_info.path.contains("/auth") || request_info.path.contains("/login") {
|
||||
let auth_attempts = recent_requests.iter()
|
||||
.filter(|event| event.path.contains("/auth"))
|
||||
.count();
|
||||
|
||||
if auth_attempts > 10 {
|
||||
self.detect_brute_force_attack(ip).await?;
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Bot Activity - Patterns suspects (même User-Agent, timing régulier)
|
||||
if let Some(user_agent) = &request_info.user_agent {
|
||||
let same_ua_count = recent_requests.iter()
|
||||
.filter(|event| event.user_agent.as_ref() == Some(user_agent))
|
||||
.count();
|
||||
|
||||
if same_ua_count > 50 && self.is_suspicious_user_agent(user_agent) {
|
||||
self.detect_bot_activity(ip, user_agent.clone()).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Détecte une attaque DDoS
|
||||
async fn detect_ddos_attack(&self, ip: IpAddr, recent_requests: &[&RequestEvent]) -> Result<()> {
|
||||
let pattern = AttackPattern {
|
||||
pattern_id: format!("ddos_{}_{}",ip, Instant::now().elapsed().as_secs()),
|
||||
pattern_type: AttackType::DDoS,
|
||||
source_ips: vec![ip],
|
||||
detection_time: Instant::now(),
|
||||
severity: 0.9,
|
||||
requests_count: recent_requests.len() as u32,
|
||||
geographic_spread: 0.0, // Single IP
|
||||
user_agents: recent_requests.iter()
|
||||
.filter_map(|event| event.user_agent.clone())
|
||||
.collect::<std::collections::HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect(),
|
||||
};
|
||||
|
||||
self.attack_patterns.insert(pattern.pattern_id.clone(), pattern);
|
||||
self.auto_blacklist_ip(ip, "DDoS attack detected".to_string()).await?;
|
||||
|
||||
self.metrics.attacks_detected.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
tracing::warn!("DDoS attack detected from IP: {}", ip);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Détecte une attaque de brute force
|
||||
async fn detect_brute_force_attack(&self, ip: IpAddr) -> Result<()> {
|
||||
let pattern = AttackPattern {
|
||||
pattern_id: format!("bruteforce_{}_{}", ip, Instant::now().elapsed().as_secs()),
|
||||
pattern_type: AttackType::BruteForce,
|
||||
source_ips: vec![ip],
|
||||
detection_time: Instant::now(),
|
||||
severity: 0.8,
|
||||
requests_count: 10,
|
||||
geographic_spread: 0.0,
|
||||
user_agents: vec![],
|
||||
};
|
||||
|
||||
self.attack_patterns.insert(pattern.pattern_id.clone(), pattern);
|
||||
self.auto_blacklist_ip(ip, "Brute force attack detected".to_string()).await?;
|
||||
|
||||
tracing::warn!("Brute force attack detected from IP: {}", ip);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Détecte une activité de bot
|
||||
async fn detect_bot_activity(&self, ip: IpAddr, user_agent: String) -> Result<()> {
|
||||
let pattern = AttackPattern {
|
||||
pattern_id: format!("bot_{}_{}", ip, Instant::now().elapsed().as_secs()),
|
||||
pattern_type: AttackType::BotActivity,
|
||||
source_ips: vec![ip],
|
||||
detection_time: Instant::now(),
|
||||
severity: 0.6,
|
||||
requests_count: 50,
|
||||
geographic_spread: 0.0,
|
||||
user_agents: vec![user_agent],
|
||||
};
|
||||
|
||||
self.attack_patterns.insert(pattern.pattern_id.clone(), pattern);
|
||||
|
||||
// Marquer l'IP comme suspecte plutôt que de la blacklister immédiatement
|
||||
if let Some(mut ip_limiter) = self.ip_limiters.get_mut(&ip) {
|
||||
ip_limiter.status = IpStatus::Suspicious;
|
||||
ip_limiter.trust_score = (ip_limiter.trust_score - 0.3).max(0.0);
|
||||
}
|
||||
|
||||
tracing::info!("Bot activity detected from IP: {}", ip);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Vérifie si un User-Agent est suspect
|
||||
fn is_suspicious_user_agent(&self, user_agent: &str) -> bool {
|
||||
let suspicious_patterns = [
|
||||
"bot", "crawler", "spider", "scraper", "curl", "wget", "python", "java",
|
||||
"headless", "selenium", "phantom", "automated"
|
||||
];
|
||||
|
||||
let ua_lower = user_agent.to_lowercase();
|
||||
suspicious_patterns.iter().any(|pattern| ua_lower.contains(pattern))
|
||||
}
|
||||
|
||||
/// Ajoute automatiquement une IP à la liste noire
|
||||
async fn auto_blacklist_ip(&self, ip: IpAddr, reason: String) -> Result<()> {
|
||||
let reason_clone = reason.clone();
|
||||
let config = self.config.read().await;
|
||||
|
||||
let blacklist_entry = BlacklistEntry {
|
||||
ip,
|
||||
reason,
|
||||
blocked_at: Instant::now(),
|
||||
expires_at: Some(Instant::now() + config.auto_blacklist_duration),
|
||||
violation_count: 1,
|
||||
auto_generated: true,
|
||||
};
|
||||
|
||||
self.blacklist.insert(ip, blacklist_entry);
|
||||
self.metrics.attacks_detected.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
tracing::warn!("IP {} automatically blacklisted: {}", ip, reason_clone);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Nettoie les entrées expirées
|
||||
pub async fn cleanup_expired_entries(&self) {
|
||||
let now = Instant::now();
|
||||
|
||||
// Nettoyer les blacklists expirées
|
||||
self.blacklist.retain(|_, entry| {
|
||||
entry.expires_at.map_or(true, |exp| exp > now)
|
||||
});
|
||||
|
||||
// Nettoyer les limiteurs inactifs (plus de 1 heure)
|
||||
self.ip_limiters.retain(|_, limiter| {
|
||||
now.duration_since(limiter.last_activity) < Duration::from_secs(3600)
|
||||
});
|
||||
|
||||
self.user_limiters.retain(|_, limiter| {
|
||||
now.duration_since(limiter.last_activity) < Duration::from_secs(3600)
|
||||
});
|
||||
|
||||
self.channel_limiters.retain(|_, limiter| {
|
||||
now.duration_since(limiter.last_activity) < Duration::from_secs(3600)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Informations sur une requête
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RequestInfo {
|
||||
pub path: String,
|
||||
pub user_agent: Option<String>,
|
||||
pub method: String,
|
||||
pub content_length: Option<usize>,
|
||||
}
|
||||
|
||||
impl TokenBucket {
|
||||
pub fn new(capacity: u32, refill_rate: f32, burst_allowance: u32) -> Self {
|
||||
Self {
|
||||
capacity,
|
||||
tokens: capacity,
|
||||
refill_rate,
|
||||
last_refill: Instant::now(),
|
||||
burst_allowance,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn refill(&mut self) {
|
||||
let now = Instant::now();
|
||||
let elapsed = now.duration_since(self.last_refill).as_secs_f32();
|
||||
let tokens_to_add = (elapsed * self.refill_rate) as u32;
|
||||
|
||||
if tokens_to_add > 0 {
|
||||
self.tokens = (self.tokens + tokens_to_add).min(self.capacity);
|
||||
self.last_refill = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IpRateLimiter {
|
||||
pub fn new(ip: IpAddr, config: &RateLimitConfig) -> Self {
|
||||
let mut buckets = HashMap::new();
|
||||
|
||||
// Créer les buckets pour différents types de limitations
|
||||
buckets.insert(LimitType::MessagesPerMinute, TokenBucket::new(config.messages_per_minute, config.messages_per_minute as f32 / 60.0, 10));
|
||||
buckets.insert(LimitType::ConnectionsPerHour, TokenBucket::new(config.connections_per_hour, config.connections_per_hour as f32 / 3600.0, 5));
|
||||
buckets.insert(LimitType::AuthAttempts, TokenBucket::new(config.auth_attempts_per_minute, config.auth_attempts_per_minute as f32 / 60.0, 2));
|
||||
buckets.insert(LimitType::ApiRequests, TokenBucket::new(1000, 16.67, 50)); // 1000/min
|
||||
buckets.insert(LimitType::FileUploads, TokenBucket::new(10, 0.17, 2)); // 10/min
|
||||
|
||||
Self {
|
||||
ip,
|
||||
buckets,
|
||||
last_activity: Instant::now(),
|
||||
violation_count: 0,
|
||||
trust_score: 0.5, // Score neutre initial
|
||||
request_patterns: VecDeque::new(),
|
||||
status: IpStatus::Normal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UserRateLimiter {
|
||||
pub fn new(user_id: i64, config: &RateLimitConfig) -> Self {
|
||||
let mut buckets = HashMap::new();
|
||||
|
||||
buckets.insert(LimitType::MessagesPerMinute, TokenBucket::new(config.messages_per_minute, config.messages_per_minute as f32 / 60.0, 5));
|
||||
buckets.insert(LimitType::FileUploads, TokenBucket::new(20, 0.33, 3)); // 20/min
|
||||
buckets.insert(LimitType::ChannelCreation, TokenBucket::new(5, 0.083, 1)); // 5/min
|
||||
buckets.insert(LimitType::Invitations, TokenBucket::new(10, 0.17, 2)); // 10/min
|
||||
buckets.insert(LimitType::Reactions, TokenBucket::new(60, 1.0, 10)); // 60/min
|
||||
|
||||
Self {
|
||||
user_id,
|
||||
buckets,
|
||||
last_activity: Instant::now(),
|
||||
violation_count: 0,
|
||||
reputation: UserReputation {
|
||||
score: 0.5,
|
||||
level: ReputationLevel::NewUser,
|
||||
violations_today: 0,
|
||||
positive_actions: 0,
|
||||
last_violation: None,
|
||||
},
|
||||
daily_limits: DailyLimits {
|
||||
messages_sent: 0,
|
||||
max_messages: 1000,
|
||||
files_uploaded: 0,
|
||||
max_files: 50,
|
||||
reset_time: Instant::now() + Duration::from_secs(86400), // 24h
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ChannelRateLimiter {
|
||||
pub fn new(channel_id: String, config: &RateLimitConfig) -> Self {
|
||||
Self {
|
||||
channel_id,
|
||||
message_bucket: TokenBucket::new(config.messages_per_minute * 10, (config.messages_per_minute * 10) as f32 / 60.0, 20),
|
||||
concurrent_users: 0,
|
||||
last_activity: Instant::now(),
|
||||
spam_threshold: 0.7,
|
||||
moderation_level: ModerationLevel::Normal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RateLimitConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
messages_per_minute: 30,
|
||||
connections_per_hour: 100,
|
||||
auth_attempts_per_minute: 5,
|
||||
max_bucket_capacity: 1000,
|
||||
attack_detection_threshold: 0.8,
|
||||
auto_blacklist_duration: Duration::from_secs(900), // 15 minutes
|
||||
enable_geolocation: true,
|
||||
trusted_ips: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,495 +0,0 @@
|
|||
//! Système de channels Discord-like avancé
|
||||
//!
|
||||
//! Ce module implémente un système de channels complet avec :
|
||||
//! - Types de channels variés (Text, Voice, Stage, Forum, etc.)
|
||||
//! - Permissions granulaires par rôle et utilisateur
|
||||
//! - Catégories et organisation hiérarchique
|
||||
//! - Support vocal avec gestion des membres connectés
|
||||
//! - Slow mode et limitations
|
||||
//! - Statistiques détaillées
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{DateTime, Utc};
|
||||
use dashmap::DashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::permissions::{Permission, UserPermissions};
|
||||
use crate::error::{ChatError, Result};
|
||||
|
||||
/// Types de channels disponibles (Discord-like)
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum ChannelType {
|
||||
/// Channel texte standard
|
||||
Text,
|
||||
/// Channel vocal
|
||||
Voice,
|
||||
/// Channel d'annonces (un seul sens)
|
||||
Announcement,
|
||||
/// Channel stage pour events
|
||||
Stage,
|
||||
/// Channel forum avec threads
|
||||
Forum,
|
||||
/// Channel de news
|
||||
News,
|
||||
/// Channel privé (DM)
|
||||
DirectMessage,
|
||||
}
|
||||
|
||||
/// Configuration d'un channel
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ChannelConfig {
|
||||
/// Nom du channel
|
||||
pub name: String,
|
||||
/// Description/Topic
|
||||
pub topic: Option<String>,
|
||||
/// Type de channel
|
||||
pub channel_type: ChannelType,
|
||||
/// NSFW ?
|
||||
pub nsfw: bool,
|
||||
/// Slow mode (secondes entre messages)
|
||||
pub slowmode_delay: Option<u32>,
|
||||
/// Bitrate pour les channels vocaux (kbps)
|
||||
pub bitrate: Option<u32>,
|
||||
/// Limite d'utilisateurs pour channels vocaux
|
||||
pub user_limit: Option<u32>,
|
||||
/// Position dans la liste
|
||||
pub position: u32,
|
||||
/// ID de la catégorie parent
|
||||
pub parent_id: Option<String>,
|
||||
}
|
||||
|
||||
/// Permissions spécifiques à un channel
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ChannelPermissions {
|
||||
/// Permissions par rôle
|
||||
pub role_permissions: HashMap<String, HashSet<ChannelPermission>>,
|
||||
/// Permissions par utilisateur (overrides)
|
||||
pub user_permissions: HashMap<i64, HashSet<ChannelPermission>>,
|
||||
}
|
||||
|
||||
/// Permissions granulaires pour les channels
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub enum ChannelPermission {
|
||||
// Permissions générales
|
||||
ViewChannel,
|
||||
ManageChannel,
|
||||
ManagePermissions,
|
||||
CreateInvite,
|
||||
|
||||
// Messages
|
||||
SendMessages,
|
||||
SendTTSMessages,
|
||||
ManageMessages,
|
||||
EmbedLinks,
|
||||
AttachFiles,
|
||||
ReadMessageHistory,
|
||||
MentionEveryone,
|
||||
UseExternalEmojis,
|
||||
UseExternalStickers,
|
||||
AddReactions,
|
||||
UseSlashCommands,
|
||||
UseThreads,
|
||||
CreatePublicThreads,
|
||||
CreatePrivateThreads,
|
||||
SendMessagesInThreads,
|
||||
|
||||
// Vocal
|
||||
Connect,
|
||||
Speak,
|
||||
MuteMembers,
|
||||
DeafenMembers,
|
||||
MoveMembers,
|
||||
UseVoiceActivity,
|
||||
Priorityspeaker,
|
||||
Stream,
|
||||
UseEmbeddedActivities,
|
||||
UseSoundboard,
|
||||
|
||||
// Avancé
|
||||
ManageWebhooks,
|
||||
ManageEvents,
|
||||
RequestToSpeak,
|
||||
}
|
||||
|
||||
/// Structure d'un channel Discord-like
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct Channel {
|
||||
pub id: String,
|
||||
pub config: ChannelConfig,
|
||||
pub permissions: ChannelPermissions,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub last_message_id: Option<String>,
|
||||
pub last_activity: DateTime<Utc>,
|
||||
|
||||
/// Membres connectés (pour channels vocaux)
|
||||
#[serde(skip)]
|
||||
pub connected_members: Arc<DashMap<i64, VoiceMember>>,
|
||||
|
||||
/// Statistiques du channel
|
||||
pub stats: ChannelStats,
|
||||
}
|
||||
|
||||
/// Membre connecté à un channel vocal
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct VoiceMember {
|
||||
pub user_id: i64,
|
||||
pub username: String,
|
||||
pub joined_at: DateTime<Utc>,
|
||||
pub is_muted: bool,
|
||||
pub is_deafened: bool,
|
||||
pub is_streaming: bool,
|
||||
pub is_camera_on: bool,
|
||||
}
|
||||
|
||||
/// Statistiques d'un channel
|
||||
#[derive(Debug, Default, Clone, Serialize)]
|
||||
pub struct ChannelStats {
|
||||
pub total_messages: u64,
|
||||
pub total_members: u64,
|
||||
pub active_members_today: u64,
|
||||
pub peak_concurrent_users: u64,
|
||||
pub last_peak_at: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
/// Gestionnaire de channels
|
||||
#[derive(Debug)]
|
||||
pub struct ChannelManager {
|
||||
/// Channels par ID
|
||||
channels: Arc<DashMap<String, Channel>>,
|
||||
/// Index des channels par serveur
|
||||
server_channels: Arc<DashMap<String, HashSet<String>>>,
|
||||
/// Catégories
|
||||
categories: Arc<DashMap<String, ChannelCategory>>,
|
||||
}
|
||||
|
||||
/// Catégorie de channels
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ChannelCategory {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
pub position: u32,
|
||||
pub server_id: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl Default for ChannelManager {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl ChannelManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
channels: Arc::new(DashMap::new()),
|
||||
server_channels: Arc::new(DashMap::new()),
|
||||
categories: Arc::new(DashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Crée un nouveau channel
|
||||
pub async fn create_channel(
|
||||
&self,
|
||||
server_id: &str,
|
||||
config: ChannelConfig,
|
||||
_creator_id: i64,
|
||||
creator_permissions: &UserPermissions,
|
||||
) -> Result<String> {
|
||||
// Vérifier les permissions
|
||||
if !creator_permissions.has_permission(&Permission::ManageChannels) {
|
||||
return Err(ChatError::unauthorized_simple("insufficient_permissions"));
|
||||
}
|
||||
|
||||
let channel_id = format!("ch_{}", Uuid::new_v4());
|
||||
|
||||
let channel = Channel {
|
||||
id: channel_id.clone(),
|
||||
config,
|
||||
permissions: ChannelPermissions {
|
||||
role_permissions: HashMap::new(),
|
||||
user_permissions: HashMap::new(),
|
||||
},
|
||||
created_at: Utc::now(),
|
||||
last_message_id: None,
|
||||
last_activity: Utc::now(),
|
||||
connected_members: Arc::new(DashMap::new()),
|
||||
stats: ChannelStats::default(),
|
||||
};
|
||||
|
||||
// Ajouter le channel
|
||||
self.channels.insert(channel_id.clone(), channel);
|
||||
|
||||
// Indexer par serveur
|
||||
self.server_channels
|
||||
.entry(server_id.to_string())
|
||||
.or_default()
|
||||
.insert(channel_id.clone());
|
||||
|
||||
Ok(channel_id)
|
||||
}
|
||||
|
||||
/// Vérifie si un utilisateur peut voir un channel
|
||||
pub fn can_view_channel(&self, channel_id: &str, user_permissions: &UserPermissions) -> bool {
|
||||
if let Some(channel) = self.channels.get(channel_id) {
|
||||
self.check_channel_permission(
|
||||
&channel.permissions,
|
||||
user_permissions,
|
||||
&ChannelPermission::ViewChannel,
|
||||
)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Vérifie si un utilisateur peut envoyer des messages dans un channel
|
||||
pub fn can_send_messages(&self, channel_id: &str, user_permissions: &UserPermissions) -> bool {
|
||||
if let Some(channel) = self.channels.get(channel_id) {
|
||||
self.check_channel_permission(
|
||||
&channel.permissions,
|
||||
user_permissions,
|
||||
&ChannelPermission::SendMessages,
|
||||
)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Joint un utilisateur à un channel vocal
|
||||
pub async fn join_voice_channel(
|
||||
&self,
|
||||
channel_id: &str,
|
||||
user_id: i64,
|
||||
username: String,
|
||||
user_permissions: &UserPermissions,
|
||||
) -> Result<()> {
|
||||
let channel = self.channels.get(channel_id)
|
||||
.ok_or_else(|| ChatError::not_found_simple("channel_not_found"))?;
|
||||
|
||||
// Vérifier le type de channel
|
||||
if !matches!(channel.config.channel_type, ChannelType::Voice | ChannelType::Stage) {
|
||||
return Err(ChatError::validation_error("not_voice_channel"));
|
||||
}
|
||||
|
||||
// Vérifier les permissions
|
||||
if !self.check_channel_permission(
|
||||
&channel.permissions,
|
||||
user_permissions,
|
||||
&ChannelPermission::Connect,
|
||||
) {
|
||||
return Err(ChatError::unauthorized_simple("cannot_connect"));
|
||||
}
|
||||
|
||||
// Vérifier la limite d'utilisateurs
|
||||
if let Some(limit) = channel.config.user_limit {
|
||||
if channel.connected_members.len() >= limit as usize {
|
||||
return Err(ChatError::validation_error("channel_full"));
|
||||
}
|
||||
}
|
||||
|
||||
let voice_member = VoiceMember {
|
||||
user_id,
|
||||
username,
|
||||
joined_at: Utc::now(),
|
||||
is_muted: false,
|
||||
is_deafened: false,
|
||||
is_streaming: false,
|
||||
is_camera_on: false,
|
||||
};
|
||||
|
||||
channel.connected_members.insert(user_id, voice_member);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Quitte un channel vocal
|
||||
pub async fn leave_voice_channel(&self, channel_id: &str, user_id: i64) -> Result<()> {
|
||||
if let Some(channel) = self.channels.get(channel_id) {
|
||||
channel.connected_members.remove(&user_id);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Met à jour les permissions d'un channel
|
||||
pub async fn update_channel_permissions(
|
||||
&self,
|
||||
channel_id: &str,
|
||||
target_type: PermissionTargetType,
|
||||
target_id: String,
|
||||
permissions: HashSet<ChannelPermission>,
|
||||
user_permissions: &UserPermissions,
|
||||
) -> Result<()> {
|
||||
if !user_permissions.has_permission(&Permission::ManageChannels) {
|
||||
return Err(ChatError::unauthorized_simple("insufficient_permissions"));
|
||||
}
|
||||
|
||||
if let Some(mut channel) = self.channels.get_mut(channel_id) {
|
||||
match target_type {
|
||||
PermissionTargetType::Role => {
|
||||
channel.permissions.role_permissions.insert(target_id, permissions);
|
||||
}
|
||||
PermissionTargetType::User => {
|
||||
if let Ok(user_id) = target_id.parse::<i64>() {
|
||||
channel.permissions.user_permissions.insert(user_id, permissions);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Active le slow mode sur un channel
|
||||
pub async fn set_slowmode(
|
||||
&self,
|
||||
channel_id: &str,
|
||||
delay_seconds: Option<u32>,
|
||||
user_permissions: &UserPermissions,
|
||||
) -> Result<()> {
|
||||
if !user_permissions.has_permission(&Permission::ManageChannels) {
|
||||
return Err(ChatError::unauthorized_simple("insufficient_permissions"));
|
||||
}
|
||||
|
||||
if let Some(mut channel) = self.channels.get_mut(channel_id) {
|
||||
channel.config.slowmode_delay = delay_seconds;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Obtient les channels d'un serveur organisés par catégories
|
||||
pub fn get_server_channels(&self, server_id: &str) -> Vec<ChannelWithCategory> {
|
||||
let mut result = Vec::new();
|
||||
|
||||
if let Some(channel_ids) = self.server_channels.get(server_id) {
|
||||
for channel_id in channel_ids.iter() {
|
||||
if let Some(channel) = self.channels.get(channel_id) {
|
||||
let category = channel.config.parent_id.as_ref()
|
||||
.and_then(|id| self.categories.get(id))
|
||||
.map(|cat| cat.value().clone());
|
||||
|
||||
result.push(ChannelWithCategory {
|
||||
channel: channel.value().clone(),
|
||||
category,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trier par position
|
||||
result.sort_by_key(|ch| ch.channel.config.position);
|
||||
result
|
||||
}
|
||||
|
||||
/// Vérifie une permission spécifique pour un channel
|
||||
fn check_channel_permission(
|
||||
&self,
|
||||
channel_permissions: &ChannelPermissions,
|
||||
user_permissions: &UserPermissions,
|
||||
required_permission: &ChannelPermission,
|
||||
) -> bool {
|
||||
// Permissions utilisateur spécifiques (override)
|
||||
if let Some(user_perms) = channel_permissions.user_permissions.get(&user_permissions.user_id) {
|
||||
return user_perms.contains(required_permission);
|
||||
}
|
||||
|
||||
// Permissions de rôle
|
||||
for role in &user_permissions.roles {
|
||||
let role_str = format!("{:?}", role);
|
||||
if let Some(role_perms) = channel_permissions.role_permissions.get(&role_str) {
|
||||
if role_perms.contains(required_permission) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Permissions par défaut (everyone peut voir les channels publics)
|
||||
matches!(required_permission, ChannelPermission::ViewChannel | ChannelPermission::SendMessages)
|
||||
}
|
||||
}
|
||||
|
||||
/// Type de cible pour les permissions
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum PermissionTargetType {
|
||||
Role,
|
||||
User,
|
||||
}
|
||||
|
||||
/// Channel avec sa catégorie
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ChannelWithCategory {
|
||||
pub channel: Channel,
|
||||
pub category: Option<ChannelCategory>,
|
||||
}
|
||||
|
||||
impl Default for ChannelType {
|
||||
fn default() -> Self {
|
||||
Self::Text
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ChannelConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: "general".to_string(),
|
||||
topic: None,
|
||||
channel_type: ChannelType::Text,
|
||||
nsfw: false,
|
||||
slowmode_delay: None,
|
||||
bitrate: Some(64), // 64 kbps par défaut
|
||||
user_limit: None,
|
||||
position: 0,
|
||||
parent_id: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::permissions::{Role, UserPermissions};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_channel_creation() {
|
||||
let manager = ChannelManager::new();
|
||||
let mut permissions = UserPermissions::new_user(123);
|
||||
permissions.add_role(Role::Admin);
|
||||
|
||||
let config = ChannelConfig {
|
||||
name: "test-channel".to_string(),
|
||||
channel_type: ChannelType::Text,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let channel_id = manager.create_channel("server1", config, 123, &permissions)
|
||||
.await.unwrap();
|
||||
|
||||
assert!(manager.channels.contains_key(&channel_id));
|
||||
assert!(manager.can_view_channel(&channel_id, &permissions));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_voice_channel_join() {
|
||||
let manager = ChannelManager::new();
|
||||
let mut permissions = UserPermissions::new_user(123);
|
||||
permissions.add_role(Role::User);
|
||||
|
||||
let config = ChannelConfig {
|
||||
name: "voice-channel".to_string(),
|
||||
channel_type: ChannelType::Voice,
|
||||
user_limit: Some(10),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let channel_id = manager.create_channel("server1", config, 123, &permissions)
|
||||
.await.unwrap();
|
||||
|
||||
manager.join_voice_channel(&channel_id, 123, "testuser".to_string(), &permissions)
|
||||
.await.unwrap();
|
||||
|
||||
if let Some(channel) = manager.channels.get(&channel_id) {
|
||||
assert_eq!(channel.connected_members.len(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,263 +0,0 @@
|
|||
//! Connection Manager Production-Ready
|
||||
//!
|
||||
//! Gestionnaire de connexions optimisé pour 100k+ WebSocket simultanées
|
||||
//! avec zero-copy broadcasting et métriques en temps réel.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::collections::HashSet;
|
||||
use dashmap::DashMap;
|
||||
use tokio::sync::{RwLock, broadcast};
|
||||
use uuid::Uuid;
|
||||
use bytes::Bytes;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use tracing::{info, warn, error, debug};
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use crate::error::ChatError;
|
||||
|
||||
/// Gestionnaire principal des connexions WebSocket
|
||||
/// Optimisé pour 100k+ connexions simultanées
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConnectionManager {
|
||||
/// Connexions actives indexées par ID
|
||||
connections: Arc<DashMap<Uuid, UserConnection>>,
|
||||
|
||||
/// Salles de chat avec leurs membres
|
||||
rooms: Arc<DashMap<String, Room>>,
|
||||
|
||||
/// Broadcaster pour diffusion efficace
|
||||
broadcaster: Arc<BroadcastOptimizer>,
|
||||
|
||||
/// Configuration
|
||||
config: Arc<ConnectionConfig>,
|
||||
}
|
||||
|
||||
/// Configuration du gestionnaire de connexions
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConnectionConfig {
|
||||
/// Nombre maximum de connexions simultanées
|
||||
pub max_connections: usize,
|
||||
|
||||
/// Timeout d'inactivité avant déconnexion
|
||||
pub idle_timeout: Duration,
|
||||
|
||||
/// Taille du buffer de diffusion
|
||||
pub broadcast_buffer_size: usize,
|
||||
|
||||
/// Limite de messages par seconde par connexion
|
||||
pub rate_limit_per_second: u32,
|
||||
}
|
||||
|
||||
impl Default for ConnectionConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_connections: 100_000, // 100k connexions
|
||||
idle_timeout: Duration::from_secs(300), // 5 minutes
|
||||
broadcast_buffer_size: 1024,
|
||||
rate_limit_per_second: 10,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Connexion utilisateur individuelle
|
||||
pub struct UserConnection {
|
||||
/// Identifiant unique de la connexion
|
||||
pub id: Uuid,
|
||||
|
||||
/// ID de l'utilisateur connecté
|
||||
pub user_id: i64,
|
||||
|
||||
/// Sender pour envoyer des messages à ce client
|
||||
pub sender: broadcast::Sender<Bytes>,
|
||||
|
||||
/// Rate limiter individuel
|
||||
pub rate_limiter: Arc<RateLimiter>,
|
||||
|
||||
/// Dernière activité
|
||||
pub last_activity: DateTime<Utc>,
|
||||
|
||||
/// Salles auxquelles l'utilisateur est abonné
|
||||
pub subscriptions: HashSet<String>,
|
||||
|
||||
/// Métadonnées de connexion
|
||||
pub metadata: ConnectionMetadata,
|
||||
}
|
||||
|
||||
/// Métadonnées de connexion
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConnectionMetadata {
|
||||
pub ip_address: String,
|
||||
pub user_agent: String,
|
||||
pub connected_at: DateTime<Utc>,
|
||||
pub platform: String,
|
||||
}
|
||||
|
||||
pub use super::room::*;
|
||||
|
||||
/// Rate limiter par connexion
|
||||
pub struct RateLimiter {
|
||||
tokens: Arc<parking_lot::Mutex<f64>>,
|
||||
last_refill: Arc<parking_lot::Mutex<DateTime<Utc>>>,
|
||||
rate: f64,
|
||||
burst: f64,
|
||||
}
|
||||
|
||||
/// Optimiseur de diffusion zero-copy
|
||||
pub struct BroadcastOptimizer {
|
||||
/// Cache de messages pré-sérialisés
|
||||
message_cache: Arc<DashMap<String, Bytes>>,
|
||||
|
||||
/// Groupes de connexions pour routage efficace
|
||||
connection_groups: Arc<DashMap<String, Vec<broadcast::Sender<Bytes>>>>,
|
||||
}
|
||||
|
||||
impl ConnectionManager {
|
||||
/// Crée un nouveau gestionnaire de connexions
|
||||
pub fn new(config: ConnectionConfig) -> Self {
|
||||
Self {
|
||||
connections: Arc::new(DashMap::new()),
|
||||
rooms: Arc::new(DashMap::new()),
|
||||
broadcaster: Arc::new(BroadcastOptimizer::new()),
|
||||
config: Arc::new(config),
|
||||
}
|
||||
}
|
||||
|
||||
/// Ajoute une nouvelle connexion
|
||||
pub async fn add_connection(
|
||||
&self,
|
||||
user_id: i64,
|
||||
metadata: ConnectionMetadata,
|
||||
) -> Result<(Uuid, broadcast::Receiver<Bytes>), ChatError> {
|
||||
// Vérifier la limite de connexions
|
||||
if self.connections.len() >= self.config.max_connections {
|
||||
return Err(ChatError::configuration_error("Maximum connections reached"));
|
||||
}
|
||||
|
||||
let connection_id = Uuid::new_v4();
|
||||
let (sender, receiver) = broadcast::channel(self.config.broadcast_buffer_size);
|
||||
|
||||
let connection = UserConnection {
|
||||
id: connection_id,
|
||||
user_id,
|
||||
sender,
|
||||
rate_limiter: Arc::new(RateLimiter::new(
|
||||
self.config.rate_limit_per_second as f64,
|
||||
10.0, // burst
|
||||
)),
|
||||
last_activity: Utc::now(),
|
||||
subscriptions: HashSet::new(),
|
||||
metadata,
|
||||
};
|
||||
|
||||
self.connections.insert(connection_id, connection);
|
||||
|
||||
info!(
|
||||
connection_id = %connection_id,
|
||||
user_id = user_id,
|
||||
total_connections = self.connections.len(),
|
||||
"🔌 Nouvelle connexion établie"
|
||||
);
|
||||
|
||||
Ok((connection_id, receiver))
|
||||
}
|
||||
|
||||
/// Diffuse un message à une salle avec parallélisation rayon
|
||||
pub async fn broadcast_to_room(
|
||||
&self,
|
||||
room_id: &str,
|
||||
message: Bytes,
|
||||
) -> Result<usize, ChatError> {
|
||||
let start = Utc::now();
|
||||
let mut sent_count = 0;
|
||||
|
||||
if let Some(room) = self.rooms.get(room_id) {
|
||||
// Utiliser rayon pour diffusion parallèle optimisée
|
||||
use rayon::prelude::*;
|
||||
|
||||
let member_ids: Vec<Uuid> = room.members.iter()
|
||||
.map(|entry| *entry.key())
|
||||
.collect();
|
||||
|
||||
sent_count = member_ids.par_iter()
|
||||
.map(|&connection_id| {
|
||||
if let Some(connection) = self.connections.get(&connection_id) {
|
||||
match connection.sender.send(message.clone()) {
|
||||
Ok(_) => 1,
|
||||
Err(_) => 0
|
||||
}
|
||||
} else {
|
||||
0
|
||||
}
|
||||
})
|
||||
.sum();
|
||||
}
|
||||
|
||||
let duration = Utc::now().signed_duration_since(start).num_milliseconds() as u128;
|
||||
debug!(
|
||||
room_id = room_id,
|
||||
recipients = sent_count,
|
||||
duration_ms = duration.as_millis(),
|
||||
"📡 Message diffusé"
|
||||
);
|
||||
|
||||
Ok(sent_count)
|
||||
}
|
||||
|
||||
/// Statistiques en temps réel
|
||||
pub fn get_stats(&self) -> ConnectionStats {
|
||||
ConnectionStats {
|
||||
active_connections: self.connections.len(),
|
||||
active_rooms: self.rooms.len(),
|
||||
total_members: self.rooms.iter()
|
||||
.map(|room| room.members.len())
|
||||
.sum(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistiques de connexion
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ConnectionStats {
|
||||
pub active_connections: usize,
|
||||
pub active_rooms: usize,
|
||||
pub total_members: usize,
|
||||
}
|
||||
|
||||
impl BroadcastOptimizer {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
message_cache: Arc::new(DashMap::new()),
|
||||
connection_groups: Arc::new(DashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RateLimiter {
|
||||
pub fn new(rate: f64, burst: f64) -> Self {
|
||||
Self {
|
||||
tokens: Arc::new(parking_lot::Mutex::new(burst)),
|
||||
last_refill: Arc::new(parking_lot::Mutex::new(Utc::now())),
|
||||
rate,
|
||||
burst,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_rate_limit(&self) -> bool {
|
||||
let now = Utc::now();
|
||||
let mut tokens = self.tokens.lock();
|
||||
let mut last_refill = self.last_refill.lock();
|
||||
|
||||
// Token bucket algorithm
|
||||
let elapsed = now.signed_duration_since(*last_refill).num_seconds() as f64;
|
||||
*tokens = (*tokens + elapsed * self.rate).min(self.burst);
|
||||
*last_refill = now;
|
||||
|
||||
if *tokens >= 1.0 {
|
||||
*tokens -= 1.0;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,504 +0,0 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use dashmap::DashMap;
|
||||
use ring::{aead, rand::{SystemRandom, SecureRandom}};
|
||||
use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::error::{ChatError, Result};
|
||||
|
||||
/// Service de chiffrement bout-en-bout
|
||||
#[derive(Debug)]
|
||||
pub struct E2EEncryptionService {
|
||||
/// Générateur de nombres aléatoires sécurisé
|
||||
rng: SystemRandom,
|
||||
|
||||
/// Sessions de chiffrement actives par channel
|
||||
encryption_sessions: Arc<DashMap<String, EncryptionSession>>,
|
||||
|
||||
/// Clés publiques des utilisateurs
|
||||
user_public_keys: Arc<DashMap<i64, UserKeyPair>>,
|
||||
|
||||
/// Préférences de chiffrement par utilisateur
|
||||
user_preferences: Arc<DashMap<i64, EncryptionPreferences>>,
|
||||
}
|
||||
|
||||
/// Session de chiffrement pour un channel
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncryptionSession {
|
||||
/// ID unique de la session
|
||||
pub session_id: String,
|
||||
|
||||
/// Channel concerné
|
||||
pub channel_id: String,
|
||||
|
||||
/// Participants à la session
|
||||
pub participants: Vec<i64>,
|
||||
|
||||
/// Clés de session partagées (chiffrées pour chaque participant)
|
||||
pub encrypted_keys: HashMap<i64, Vec<u8>>,
|
||||
|
||||
/// Algorithme de chiffrement utilisé
|
||||
pub algorithm: EncryptionAlgorithm,
|
||||
|
||||
/// Statut de la session
|
||||
pub status: SessionStatus,
|
||||
}
|
||||
|
||||
/// Paire de clés d'un utilisateur
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UserKeyPair {
|
||||
/// ID de l'utilisateur
|
||||
pub user_id: i64,
|
||||
|
||||
/// Clé publique (pour chiffrement asymétrique)
|
||||
pub public_key: Vec<u8>,
|
||||
|
||||
/// Fingerprint de la clé pour vérification
|
||||
pub fingerprint: String,
|
||||
|
||||
/// Statut de la clé
|
||||
pub status: KeyStatus,
|
||||
}
|
||||
|
||||
/// Préférences de chiffrement d'un utilisateur
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EncryptionPreferences {
|
||||
/// Chiffrement activé par défaut
|
||||
pub enabled_by_default: bool,
|
||||
|
||||
/// Algorithme préféré
|
||||
pub preferred_algorithm: EncryptionAlgorithm,
|
||||
|
||||
/// Rotation automatique des clés
|
||||
pub auto_key_rotation: bool,
|
||||
|
||||
/// Période de rotation (en jours)
|
||||
pub rotation_period_days: u32,
|
||||
|
||||
/// Vérification des empreintes obligatoire
|
||||
pub require_fingerprint_verification: bool,
|
||||
|
||||
/// Channels où le chiffrement est obligatoire
|
||||
pub mandatory_channels: Vec<String>,
|
||||
}
|
||||
|
||||
/// Algorithmes de chiffrement supportés
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum EncryptionAlgorithm {
|
||||
/// AES-256-GCM (recommandé)
|
||||
AES256GCM,
|
||||
|
||||
/// ChaCha20-Poly1305 (alternative)
|
||||
ChaCha20Poly1305,
|
||||
}
|
||||
|
||||
/// Statut d'une session de chiffrement
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum SessionStatus {
|
||||
/// Session active
|
||||
Active,
|
||||
|
||||
/// Session expirée
|
||||
Expired,
|
||||
|
||||
/// Session révoquée
|
||||
Revoked,
|
||||
}
|
||||
|
||||
/// Statut d'une clé
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum KeyStatus {
|
||||
/// Clé active
|
||||
Active,
|
||||
|
||||
/// Clé révoquée
|
||||
Revoked,
|
||||
|
||||
/// Clé expirée
|
||||
Expired,
|
||||
}
|
||||
|
||||
/// Message chiffré
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EncryptedMessage {
|
||||
/// ID du message
|
||||
pub message_id: String,
|
||||
|
||||
/// ID de la session de chiffrement
|
||||
pub session_id: String,
|
||||
|
||||
/// Contenu chiffré
|
||||
pub encrypted_content: Vec<u8>,
|
||||
|
||||
/// Nonce utilisé pour le chiffrement
|
||||
pub nonce: Vec<u8>,
|
||||
|
||||
/// Tag d'authentification
|
||||
pub auth_tag: Vec<u8>,
|
||||
|
||||
/// Algorithme utilisé
|
||||
pub algorithm: EncryptionAlgorithm,
|
||||
}
|
||||
|
||||
/// Métadonnées non chiffrées d'un message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageMetadata {
|
||||
/// ID de l'expéditeur
|
||||
pub sender_id: i64,
|
||||
|
||||
/// Timestamp d'envoi
|
||||
pub timestamp: chrono::DateTime<chrono::Utc>,
|
||||
|
||||
/// Type de message
|
||||
pub message_type: String,
|
||||
|
||||
/// Taille du contenu original
|
||||
pub content_size: usize,
|
||||
}
|
||||
|
||||
/// Résultat d'une opération de chiffrement
|
||||
#[derive(Debug)]
|
||||
pub struct EncryptionResult {
|
||||
/// Message chiffré
|
||||
pub encrypted_message: EncryptedMessage,
|
||||
|
||||
/// Clés de session pour les participants
|
||||
pub session_keys: HashMap<i64, Vec<u8>>,
|
||||
|
||||
/// Participants qui ont reçu les clés
|
||||
pub delivered_to: Vec<i64>,
|
||||
}
|
||||
|
||||
impl E2EEncryptionService {
|
||||
/// Crée un nouveau service de chiffrement
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rng: SystemRandom::new(),
|
||||
encryption_sessions: Arc::new(DashMap::new()),
|
||||
user_public_keys: Arc::new(DashMap::new()),
|
||||
user_preferences: Arc::new(DashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Génère une nouvelle paire de clés pour un utilisateur
|
||||
pub async fn generate_user_keypair(&self, user_id: i64) -> Result<UserKeyPair> {
|
||||
let mut public_key = vec![0u8; 32];
|
||||
self.rng.fill(&mut public_key)
|
||||
.map_err(|_| ChatError::internal_error("Failed to generate key"))?;
|
||||
|
||||
let fingerprint = self.generate_fingerprint(&public_key);
|
||||
|
||||
let keypair = UserKeyPair {
|
||||
user_id,
|
||||
public_key,
|
||||
fingerprint,
|
||||
status: KeyStatus::Active,
|
||||
};
|
||||
|
||||
self.user_public_keys.insert(user_id, keypair.clone());
|
||||
Ok(keypair)
|
||||
}
|
||||
|
||||
/// Établit une session de chiffrement pour un channel
|
||||
pub async fn establish_session(
|
||||
&self,
|
||||
channel_id: String,
|
||||
participants: Vec<i64>,
|
||||
algorithm: EncryptionAlgorithm,
|
||||
) -> Result<EncryptionSession> {
|
||||
for &user_id in &participants {
|
||||
if !self.user_public_keys.contains_key(&user_id) {
|
||||
return Err(ChatError::validation_error(
|
||||
&format!("User {} has no public key", user_id)
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let session_key = self.generate_session_key()?;
|
||||
let mut encrypted_keys = HashMap::new();
|
||||
|
||||
for &user_id in &participants {
|
||||
if let Some(user_keypair) = self.user_public_keys.get(&user_id) {
|
||||
let encrypted_key = self.encrypt_session_key(&session_key, &user_keypair.public_key)?;
|
||||
encrypted_keys.insert(user_id, encrypted_key);
|
||||
}
|
||||
}
|
||||
|
||||
let session = EncryptionSession {
|
||||
session_id: format!("session_{}", uuid::Uuid::new_v4()),
|
||||
channel_id: channel_id.clone(),
|
||||
participants,
|
||||
encrypted_keys,
|
||||
algorithm,
|
||||
status: SessionStatus::Active,
|
||||
};
|
||||
|
||||
self.encryption_sessions.insert(channel_id, session.clone());
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
/// Chiffre un message pour un channel
|
||||
pub async fn encrypt_message(
|
||||
&self,
|
||||
channel_id: &str,
|
||||
sender_id: i64,
|
||||
content: &str,
|
||||
message_id: String,
|
||||
) -> Result<EncryptedMessage> {
|
||||
let session = self.encryption_sessions.get(channel_id)
|
||||
.ok_or_else(|| ChatError::not_found("session", "session_not_found"))?;
|
||||
|
||||
if !session.participants.contains(&sender_id) {
|
||||
return Err(ChatError::permission_denied("User not authorized"));
|
||||
}
|
||||
|
||||
let nonce = self.generate_nonce()?;
|
||||
let session_key = &session.encrypted_keys[&sender_id];
|
||||
|
||||
let (encrypted_content, auth_tag) = self.encrypt_content(
|
||||
content.as_bytes(),
|
||||
session_key,
|
||||
&nonce,
|
||||
&session.algorithm,
|
||||
)?;
|
||||
|
||||
Ok(EncryptedMessage {
|
||||
message_id,
|
||||
session_id: session.session_id.clone(),
|
||||
encrypted_content,
|
||||
nonce,
|
||||
auth_tag,
|
||||
algorithm: session.algorithm.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Déchiffre un message
|
||||
pub async fn decrypt_message(
|
||||
&self,
|
||||
encrypted_message: &EncryptedMessage,
|
||||
recipient_id: i64,
|
||||
) -> Result<String> {
|
||||
let session = self.encryption_sessions.iter()
|
||||
.find(|entry| entry.value().session_id == encrypted_message.session_id)
|
||||
.ok_or_else(|| ChatError::not_found("session", "session_not_found"))?;
|
||||
|
||||
if !session.participants.contains(&recipient_id) {
|
||||
return Err(ChatError::permission_denied("Not authorized"));
|
||||
}
|
||||
|
||||
let encrypted_session_key = session.encrypted_keys.get(&recipient_id)
|
||||
.ok_or_else(|| ChatError::not_found("session_key", "key_not_found"))?;
|
||||
|
||||
let decrypted_content = self.decrypt_content(
|
||||
&encrypted_message.encrypted_content,
|
||||
&encrypted_message.auth_tag,
|
||||
encrypted_session_key,
|
||||
&encrypted_message.nonce,
|
||||
&encrypted_message.algorithm,
|
||||
)?;
|
||||
|
||||
String::from_utf8(decrypted_content)
|
||||
.map_err(|_| ChatError::internal_error("Invalid decrypted content"))
|
||||
}
|
||||
|
||||
/// Révoque une session de chiffrement
|
||||
pub async fn revoke_session(&self, channel_id: &str, revoked_by: i64) -> Result<()> {
|
||||
if let Some(mut session) = self.encryption_sessions.get_mut(channel_id) {
|
||||
// Vérifier que l'utilisateur peut révoquer la session
|
||||
if !session.participants.contains(&revoked_by) {
|
||||
return Err(ChatError::permission_denied("Non autorisé à révoquer cette session"));
|
||||
}
|
||||
|
||||
session.status = SessionStatus::Revoked;
|
||||
tracing::info!("Session {} révoquée par l'utilisateur {}", session.session_id, revoked_by);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Configure les préférences de chiffrement d'un utilisateur
|
||||
pub async fn set_user_preferences(
|
||||
&self,
|
||||
user_id: i64,
|
||||
preferences: EncryptionPreferences,
|
||||
) -> Result<()> {
|
||||
self.user_preferences.insert(user_id, preferences);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Vérifie si le chiffrement est requis pour un channel
|
||||
pub fn is_encryption_required(&self, channel_id: &str, user_id: i64) -> bool {
|
||||
if let Some(prefs) = self.user_preferences.get(&user_id) {
|
||||
prefs.mandatory_channels.contains(&channel_id.to_string()) || prefs.enabled_by_default
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Vérifie si une session existe et est active
|
||||
pub fn has_active_session(&self, channel_id: &str) -> bool {
|
||||
self.encryption_sessions.get(channel_id)
|
||||
.map(|session| session.status == SessionStatus::Active)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
// === Méthodes privées utilitaires ===
|
||||
|
||||
/// Génère une clé de session aléatoire
|
||||
fn generate_session_key(&self) -> Result<Vec<u8>> {
|
||||
let mut key = vec![0u8; 32];
|
||||
self.rng.fill(&mut key)
|
||||
.map_err(|_| ChatError::internal_error("Failed to generate session key"))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Génère un nonce aléatoire
|
||||
fn generate_nonce(&self) -> Result<Vec<u8>> {
|
||||
let mut nonce = vec![0u8; 12];
|
||||
self.rng.fill(&mut nonce)
|
||||
.map_err(|_| ChatError::internal_error("Failed to generate nonce"))?;
|
||||
Ok(nonce)
|
||||
}
|
||||
|
||||
/// Génère le fingerprint d'une clé publique
|
||||
fn generate_fingerprint(&self, public_key: &[u8]) -> String {
|
||||
use ring::digest;
|
||||
let digest = digest::digest(&digest::SHA256, public_key);
|
||||
BASE64.encode(digest.as_ref())
|
||||
}
|
||||
|
||||
/// Chiffre une clé de session avec une clé publique
|
||||
fn encrypt_session_key(&self, session_key: &[u8], public_key: &[u8]) -> Result<Vec<u8>> {
|
||||
let mut encrypted = session_key.to_vec();
|
||||
for (i, &byte) in public_key.iter().enumerate() {
|
||||
if i < encrypted.len() {
|
||||
encrypted[i] ^= byte;
|
||||
}
|
||||
}
|
||||
Ok(encrypted)
|
||||
}
|
||||
|
||||
/// Chiffre du contenu avec AES-GCM
|
||||
fn encrypt_content(
|
||||
&self,
|
||||
content: &[u8],
|
||||
key: &[u8],
|
||||
nonce: &[u8],
|
||||
algorithm: &EncryptionAlgorithm,
|
||||
) -> Result<(Vec<u8>, Vec<u8>)> {
|
||||
match algorithm {
|
||||
EncryptionAlgorithm::AES256GCM => self.encrypt_aes_gcm(content, key, nonce),
|
||||
EncryptionAlgorithm::ChaCha20Poly1305 => self.encrypt_chacha20_poly1305(content, key, nonce),
|
||||
}
|
||||
}
|
||||
|
||||
/// Déchiffre du contenu
|
||||
fn decrypt_content(
|
||||
&self,
|
||||
encrypted_content: &[u8],
|
||||
auth_tag: &[u8],
|
||||
key: &[u8],
|
||||
nonce: &[u8],
|
||||
algorithm: &EncryptionAlgorithm,
|
||||
) -> Result<Vec<u8>> {
|
||||
match algorithm {
|
||||
EncryptionAlgorithm::AES256GCM => self.decrypt_aes_gcm(encrypted_content, auth_tag, key, nonce),
|
||||
EncryptionAlgorithm::ChaCha20Poly1305 => self.decrypt_chacha20_poly1305(encrypted_content, auth_tag, key, nonce),
|
||||
}
|
||||
}
|
||||
|
||||
/// Chiffrement AES-256-GCM
|
||||
fn encrypt_aes_gcm(&self, content: &[u8], key: &[u8], nonce: &[u8]) -> Result<(Vec<u8>, Vec<u8>)> {
|
||||
let unbound_key = aead::UnboundKey::new(&aead::AES_256_GCM, key)
|
||||
.map_err(|_| ChatError::internal_error("Invalid AES key"))?;
|
||||
|
||||
let key = aead::LessSafeKey::new(unbound_key);
|
||||
let nonce = aead::Nonce::try_assume_unique_for_key(nonce)
|
||||
.map_err(|_| ChatError::internal_error("Invalid nonce"))?;
|
||||
|
||||
let mut in_out = content.to_vec();
|
||||
let tag = key.seal_in_place_separate_tag(nonce, aead::Aad::empty(), &mut in_out)
|
||||
.map_err(|_| ChatError::internal_error("Encryption failed"))?;
|
||||
|
||||
Ok((in_out, tag.as_ref().to_vec()))
|
||||
}
|
||||
|
||||
/// Déchiffrement AES-256-GCM
|
||||
fn decrypt_aes_gcm(
|
||||
&self,
|
||||
encrypted_content: &[u8],
|
||||
auth_tag: &[u8],
|
||||
key: &[u8],
|
||||
nonce: &[u8],
|
||||
) -> Result<Vec<u8>> {
|
||||
let unbound_key = aead::UnboundKey::new(&aead::AES_256_GCM, key)
|
||||
.map_err(|_| ChatError::internal_error("Invalid AES key"))?;
|
||||
|
||||
let key = aead::LessSafeKey::new(unbound_key);
|
||||
let nonce = aead::Nonce::try_assume_unique_for_key(nonce)
|
||||
.map_err(|_| ChatError::internal_error("Invalid nonce"))?;
|
||||
|
||||
let mut in_out = encrypted_content.to_vec();
|
||||
in_out.extend_from_slice(auth_tag);
|
||||
|
||||
let plaintext = key.open_in_place(nonce, aead::Aad::empty(), &mut in_out)
|
||||
.map_err(|_| ChatError::internal_error("Decryption failed"))?;
|
||||
|
||||
Ok(plaintext.to_vec())
|
||||
}
|
||||
|
||||
/// Chiffrement ChaCha20-Poly1305
|
||||
fn encrypt_chacha20_poly1305(&self, content: &[u8], key: &[u8], nonce: &[u8]) -> Result<(Vec<u8>, Vec<u8>)> {
|
||||
let unbound_key = aead::UnboundKey::new(&aead::CHACHA20_POLY1305, key)
|
||||
.map_err(|_| ChatError::internal_error("Invalid ChaCha20 key"))?;
|
||||
|
||||
let key = aead::LessSafeKey::new(unbound_key);
|
||||
let nonce = aead::Nonce::try_assume_unique_for_key(nonce)
|
||||
.map_err(|_| ChatError::internal_error("Invalid nonce"))?;
|
||||
|
||||
let mut in_out = content.to_vec();
|
||||
let tag = key.seal_in_place_separate_tag(nonce, aead::Aad::empty(), &mut in_out)
|
||||
.map_err(|_| ChatError::internal_error("Encryption failed"))?;
|
||||
|
||||
Ok((in_out, tag.as_ref().to_vec()))
|
||||
}
|
||||
|
||||
/// Déchiffrement ChaCha20-Poly1305
|
||||
fn decrypt_chacha20_poly1305(
|
||||
&self,
|
||||
encrypted_content: &[u8],
|
||||
auth_tag: &[u8],
|
||||
key: &[u8],
|
||||
nonce: &[u8],
|
||||
) -> Result<Vec<u8>> {
|
||||
let unbound_key = aead::UnboundKey::new(&aead::CHACHA20_POLY1305, key)
|
||||
.map_err(|_| ChatError::internal_error("Invalid ChaCha20 key"))?;
|
||||
|
||||
let key = aead::LessSafeKey::new(unbound_key);
|
||||
let nonce = aead::Nonce::try_assume_unique_for_key(nonce)
|
||||
.map_err(|_| ChatError::internal_error("Invalid nonce"))?;
|
||||
|
||||
let mut in_out = encrypted_content.to_vec();
|
||||
in_out.extend_from_slice(auth_tag);
|
||||
|
||||
let plaintext = key.open_in_place(nonce, aead::Aad::empty(), &mut in_out)
|
||||
.map_err(|_| ChatError::internal_error("Decryption failed"))?;
|
||||
|
||||
Ok(plaintext.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EncryptionPreferences {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled_by_default: false,
|
||||
preferred_algorithm: EncryptionAlgorithm::AES256GCM,
|
||||
auto_key_rotation: true,
|
||||
rotation_period_days: 90,
|
||||
require_fingerprint_verification: true,
|
||||
mandatory_channels: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,339 +0,0 @@
|
|||
//! Message Types et Protocol
|
||||
//!
|
||||
//! Types de messages optimisés pour Discord-like features
|
||||
//! avec support threads, réactions, mentions, etc.
|
||||
|
||||
use uuid::Uuid;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
/// Message stocké avec métadonnées complètes
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct StoredMessage {
|
||||
pub id: Uuid,
|
||||
pub content: String,
|
||||
pub author_id: i64,
|
||||
pub timestamp: DateTime<Utc>,
|
||||
pub message_type: MessageType,
|
||||
pub room_id: String,
|
||||
|
||||
// Features Discord-like
|
||||
pub thread_id: Option<Uuid>,
|
||||
pub reply_to: Option<Uuid>,
|
||||
pub mentions: Vec<i64>,
|
||||
pub reactions: Vec<MessageReaction>,
|
||||
pub attachments: Vec<MessageAttachment>,
|
||||
pub embeds: Vec<MessageEmbed>,
|
||||
|
||||
// Modération
|
||||
pub edited_at: Option<DateTime<Utc>>,
|
||||
pub deleted_at: Option<DateTime<Utc>>,
|
||||
pub moderation_flags: ModerationFlags,
|
||||
}
|
||||
|
||||
/// Type de message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum MessageType {
|
||||
Text,
|
||||
File,
|
||||
Image,
|
||||
Voice,
|
||||
Video,
|
||||
System,
|
||||
ThreadStart,
|
||||
ThreadReply,
|
||||
}
|
||||
|
||||
/// Réaction à un message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageReaction {
|
||||
pub emoji: String,
|
||||
pub users: Vec<i64>,
|
||||
pub count: u32,
|
||||
}
|
||||
|
||||
/// Pièce jointe
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageAttachment {
|
||||
pub id: Uuid,
|
||||
pub filename: String,
|
||||
pub content_type: String,
|
||||
pub size: u64,
|
||||
pub url: String,
|
||||
pub proxy_url: Option<String>,
|
||||
pub width: Option<u32>,
|
||||
pub height: Option<u32>,
|
||||
}
|
||||
|
||||
/// Embed riche (Discord-like)
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageEmbed {
|
||||
pub title: Option<String>,
|
||||
pub description: Option<String>,
|
||||
pub url: Option<String>,
|
||||
pub color: Option<u32>,
|
||||
pub timestamp: Option<DateTime<Utc>>,
|
||||
pub footer: Option<EmbedFooter>,
|
||||
pub image: Option<EmbedImage>,
|
||||
pub thumbnail: Option<EmbedThumbnail>,
|
||||
pub author: Option<EmbedAuthor>,
|
||||
pub fields: Vec<EmbedField>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedFooter {
|
||||
pub text: String,
|
||||
pub icon_url: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedImage {
|
||||
pub url: String,
|
||||
pub width: Option<u32>,
|
||||
pub height: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedThumbnail {
|
||||
pub url: String,
|
||||
pub width: Option<u32>,
|
||||
pub height: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedAuthor {
|
||||
pub name: String,
|
||||
pub url: Option<String>,
|
||||
pub icon_url: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedField {
|
||||
pub name: String,
|
||||
pub value: String,
|
||||
pub inline: bool,
|
||||
}
|
||||
|
||||
/// Flags de modération
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ModerationFlags {
|
||||
pub is_flagged: bool,
|
||||
pub is_spam: bool,
|
||||
pub toxicity_score: Option<f32>,
|
||||
pub auto_moderated: bool,
|
||||
pub manual_review: bool,
|
||||
}
|
||||
|
||||
/// Message entrant du WebSocket
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", content = "data")]
|
||||
pub enum IncomingMessage {
|
||||
// Messages basiques
|
||||
SendMessage {
|
||||
room_id: String,
|
||||
content: String,
|
||||
reply_to: Option<Uuid>,
|
||||
thread_id: Option<Uuid>,
|
||||
},
|
||||
|
||||
EditMessage {
|
||||
message_id: Uuid,
|
||||
content: String,
|
||||
},
|
||||
|
||||
DeleteMessage {
|
||||
message_id: Uuid,
|
||||
},
|
||||
|
||||
// Réactions
|
||||
AddReaction {
|
||||
message_id: Uuid,
|
||||
emoji: String,
|
||||
},
|
||||
|
||||
RemoveReaction {
|
||||
message_id: Uuid,
|
||||
emoji: String,
|
||||
},
|
||||
|
||||
// Salles
|
||||
JoinRoom {
|
||||
room_id: String,
|
||||
},
|
||||
|
||||
LeaveRoom {
|
||||
room_id: String,
|
||||
},
|
||||
|
||||
// Présence
|
||||
UpdatePresence {
|
||||
status: super::user::PresenceStatus,
|
||||
activity: Option<String>,
|
||||
},
|
||||
|
||||
StartTyping {
|
||||
room_id: String,
|
||||
},
|
||||
|
||||
StopTyping {
|
||||
room_id: String,
|
||||
},
|
||||
|
||||
// Threads
|
||||
CreateThread {
|
||||
message_id: Uuid,
|
||||
name: String,
|
||||
},
|
||||
|
||||
// Modération
|
||||
ReportMessage {
|
||||
message_id: Uuid,
|
||||
reason: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Message sortant vers le WebSocket
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", content = "data")]
|
||||
pub enum OutgoingMessage {
|
||||
// Messages
|
||||
MessageReceived {
|
||||
message: StoredMessage,
|
||||
},
|
||||
|
||||
MessageEdited {
|
||||
message_id: Uuid,
|
||||
content: String,
|
||||
edited_at: DateTime<Utc>,
|
||||
},
|
||||
|
||||
MessageDeleted {
|
||||
message_id: Uuid,
|
||||
deleted_at: DateTime<Utc>,
|
||||
},
|
||||
|
||||
// Réactions
|
||||
ReactionAdded {
|
||||
message_id: Uuid,
|
||||
emoji: String,
|
||||
user_id: i64,
|
||||
},
|
||||
|
||||
ReactionRemoved {
|
||||
message_id: Uuid,
|
||||
emoji: String,
|
||||
user_id: i64,
|
||||
},
|
||||
|
||||
// Présence
|
||||
UserPresenceUpdate {
|
||||
user_id: i64,
|
||||
status: super::user::PresenceStatus,
|
||||
activity: Option<String>,
|
||||
},
|
||||
|
||||
TypingStart {
|
||||
room_id: String,
|
||||
user_id: i64,
|
||||
},
|
||||
|
||||
TypingStop {
|
||||
room_id: String,
|
||||
user_id: i64,
|
||||
},
|
||||
|
||||
// Salles
|
||||
RoomJoined {
|
||||
room_id: String,
|
||||
user_id: i64,
|
||||
},
|
||||
|
||||
RoomLeft {
|
||||
room_id: String,
|
||||
user_id: i64,
|
||||
},
|
||||
|
||||
// Système
|
||||
Error {
|
||||
message: String,
|
||||
code: Option<String>,
|
||||
},
|
||||
|
||||
ActionConfirmed {
|
||||
action: String,
|
||||
success: bool,
|
||||
},
|
||||
|
||||
// Threads
|
||||
ThreadCreated {
|
||||
thread_id: Uuid,
|
||||
parent_message_id: Uuid,
|
||||
name: String,
|
||||
creator_id: i64,
|
||||
},
|
||||
}
|
||||
|
||||
impl StoredMessage {
|
||||
pub fn new_text_message(
|
||||
author_id: i64,
|
||||
room_id: String,
|
||||
content: String,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4(),
|
||||
content,
|
||||
author_id,
|
||||
timestamp: Utc::now(),
|
||||
message_type: MessageType::Text,
|
||||
room_id,
|
||||
thread_id: None,
|
||||
reply_to: None,
|
||||
mentions: Vec::new(),
|
||||
reactions: Vec::new(),
|
||||
attachments: Vec::new(),
|
||||
embeds: Vec::new(),
|
||||
edited_at: None,
|
||||
deleted_at: None,
|
||||
moderation_flags: ModerationFlags::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_reaction(&mut self, emoji: String, user_id: i64) {
|
||||
if let Some(reaction) = self.reactions.iter_mut()
|
||||
.find(|r| r.emoji == emoji) {
|
||||
if !reaction.users.contains(&user_id) {
|
||||
reaction.users.push(user_id);
|
||||
reaction.count += 1;
|
||||
}
|
||||
} else {
|
||||
self.reactions.push(MessageReaction {
|
||||
emoji,
|
||||
users: vec![user_id],
|
||||
count: 1,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove_reaction(&mut self, emoji: &str, user_id: i64) {
|
||||
if let Some(reaction) = self.reactions.iter_mut()
|
||||
.find(|r| r.emoji == emoji) {
|
||||
if let Some(pos) = reaction.users.iter().position(|&id| id == user_id) {
|
||||
reaction.users.remove(pos);
|
||||
reaction.count -= 1;
|
||||
|
||||
// Supprimer la réaction si plus personne
|
||||
if reaction.count == 0 {
|
||||
self.reactions.retain(|r| r.emoji != emoji);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_deleted(&self) -> bool {
|
||||
self.deleted_at.is_some()
|
||||
}
|
||||
|
||||
pub fn is_edited(&self) -> bool {
|
||||
self.edited_at.is_some()
|
||||
}
|
||||
}
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
pub mod connection;
|
||||
pub mod room;
|
||||
pub mod message;
|
||||
pub mod user;
|
||||
pub mod channels;
|
||||
pub mod rich_messages;
|
||||
pub mod moderation_integration;
|
||||
pub mod encryption;
|
||||
pub mod advanced_rate_limiter;
|
||||
|
||||
pub use connection::*;
|
||||
pub use room::*;
|
||||
pub use message::*;
|
||||
pub use user::*;
|
||||
pub use channels::*;
|
||||
pub use rich_messages::*;
|
||||
pub use moderation_integration::*;
|
||||
pub use encryption::*;
|
||||
pub use advanced_rate_limiter::*;
|
||||
|
|
@ -1,295 +0,0 @@
|
|||
//! Intégration de la Modération IA dans le Core
|
||||
//!
|
||||
//! Ce module connecte l'AdvancedModerationEngine avec :
|
||||
//! - Le système de messages en temps réel
|
||||
//! - Les actions automatiques (mute, ban, delete)
|
||||
//! - Les notifications de modération
|
||||
//! - Les métriques de sécurité
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{DateTime, Utc};
|
||||
use dashmap::DashMap;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::advanced_moderation::{
|
||||
AdvancedModerationEngine,
|
||||
AdvancedModerationConfig,
|
||||
ViolationType,
|
||||
UserBehaviorProfile
|
||||
};
|
||||
use crate::moderation::{SanctionType, SanctionReason};
|
||||
use crate::monitoring::ChatMetrics;
|
||||
use crate::permissions::{Permission, UserPermissions};
|
||||
use crate::core::{ConnectionManager, RichMessage, RichMessageManager};
|
||||
use crate::error::{ChatError, Result};
|
||||
|
||||
/// Service d'intégration de modération IA
|
||||
#[derive(Debug)]
|
||||
pub struct ModerationIntegrationService {
|
||||
/// Engine de modération IA
|
||||
moderation_engine: Arc<AdvancedModerationEngine>,
|
||||
|
||||
/// Gestionnaire de connexions pour actions en temps réel
|
||||
connection_manager: Arc<ConnectionManager>,
|
||||
|
||||
/// Gestionnaire de messages riches
|
||||
message_manager: Arc<RichMessageManager>,
|
||||
|
||||
/// Channel pour les actions de modération
|
||||
action_sender: mpsc::UnboundedSender<ModerationAction>,
|
||||
|
||||
/// Historique des sanctions
|
||||
sanction_history: Arc<DashMap<i64, Vec<SanctionRecord>>>,
|
||||
|
||||
/// Whitelist d'utilisateurs de confiance
|
||||
trusted_users: Arc<DashMap<i64, TrustLevel>>,
|
||||
|
||||
/// Métriques de modération
|
||||
metrics: Arc<ModerationMetrics>,
|
||||
}
|
||||
|
||||
/// Action de modération à exécuter
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ModerationAction {
|
||||
/// Supprimer un message
|
||||
DeleteMessage {
|
||||
message_id: String,
|
||||
channel_id: String,
|
||||
reason: String,
|
||||
},
|
||||
|
||||
/// Muter un utilisateur
|
||||
MuteUser {
|
||||
user_id: i64,
|
||||
duration: Duration,
|
||||
reason: String,
|
||||
},
|
||||
|
||||
/// Bannir un utilisateur
|
||||
BanUser {
|
||||
user_id: i64,
|
||||
duration: Option<Duration>,
|
||||
reason: String,
|
||||
},
|
||||
|
||||
/// Avertir un utilisateur
|
||||
WarnUser {
|
||||
user_id: i64,
|
||||
reason: String,
|
||||
violation_count: u32,
|
||||
},
|
||||
|
||||
/// Alerter les modérateurs
|
||||
AlertModerators {
|
||||
user_id: i64,
|
||||
violations: Vec<ViolationType>,
|
||||
confidence: f32,
|
||||
urgent: bool,
|
||||
},
|
||||
|
||||
/// Shadowban (restrictions invisibles)
|
||||
ShadowBan {
|
||||
user_id: i64,
|
||||
restrictions: ShadowBanRestrictions,
|
||||
duration: Duration,
|
||||
},
|
||||
}
|
||||
|
||||
/// Sévérité d'une violation
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum ViolationSeverity {
|
||||
Low, // Warning
|
||||
Medium, // Temporary restrictions
|
||||
High, // Temporary ban
|
||||
Critical, // Permanent ban
|
||||
}
|
||||
|
||||
/// Restrictions de shadowban
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ShadowBanRestrictions {
|
||||
pub message_delay: Option<Duration>,
|
||||
pub limited_channels: bool,
|
||||
pub no_mentions: bool,
|
||||
pub no_reactions: bool,
|
||||
pub reduced_visibility: bool,
|
||||
}
|
||||
|
||||
/// Enregistrement d'une sanction
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SanctionRecord {
|
||||
pub id: String,
|
||||
pub user_id: i64,
|
||||
pub reason: String,
|
||||
pub applied_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// Niveau de confiance d'un utilisateur
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum TrustLevel {
|
||||
/// Utilisateur nouveau (surveillance accrue)
|
||||
New,
|
||||
/// Utilisateur normal
|
||||
Normal,
|
||||
/// Utilisateur de confiance (modération allégée)
|
||||
Trusted,
|
||||
/// Modérateur/VIP (bypass certaines vérifications)
|
||||
Privileged,
|
||||
}
|
||||
|
||||
/// Métriques de modération
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ModerationMetrics {
|
||||
pub messages_analyzed: Arc<std::sync::atomic::AtomicU64>,
|
||||
pub violations_detected: Arc<std::sync::atomic::AtomicU64>,
|
||||
pub auto_actions_taken: Arc<std::sync::atomic::AtomicU64>,
|
||||
pub false_positives: Arc<std::sync::atomic::AtomicU64>,
|
||||
pub manual_overrides: Arc<std::sync::atomic::AtomicU64>,
|
||||
}
|
||||
|
||||
impl ModerationIntegrationService {
|
||||
pub fn new(connection_manager: Arc<ConnectionManager>) -> Result<Self> {
|
||||
let moderation_config = crate::advanced_moderation::AdvancedModerationConfig::default();
|
||||
let metrics = Arc::new(crate::monitoring::ChatMetrics::new());
|
||||
let moderation_engine = Arc::new(crate::advanced_moderation::AdvancedModerationEngine::new(moderation_config, metrics)?);
|
||||
|
||||
Ok(Self {
|
||||
moderation_engine,
|
||||
connection_manager,
|
||||
message_manager: Arc::new(RichMessageManager::default()),
|
||||
action_sender: mpsc::unbounded_channel().0,
|
||||
sanction_history: Arc::new(DashMap::new()),
|
||||
trusted_users: Arc::new(DashMap::new()),
|
||||
metrics: Arc::new(ModerationMetrics::default()),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn analyze_message(&self, message: &RichMessage) -> Result<ModerationDecision> {
|
||||
let violations = self.moderation_engine.analyze_message(
|
||||
message.author_id as i32,
|
||||
&message.author_username,
|
||||
&message.content,
|
||||
&message.channel_id,
|
||||
None,
|
||||
).await?;
|
||||
|
||||
let decision = if violations.is_empty() {
|
||||
ModerationDecision {
|
||||
allowed: true,
|
||||
action: None,
|
||||
violations: vec![],
|
||||
confidence: 0.0,
|
||||
reason: "Aucune violation détectée".to_string(),
|
||||
}
|
||||
} else {
|
||||
self.make_decision(message, &violations).await?
|
||||
};
|
||||
|
||||
Ok(decision)
|
||||
}
|
||||
|
||||
async fn make_decision(&self, message: &RichMessage, violations: &[ViolationType]) -> Result<ModerationDecision> {
|
||||
let confidence = self.calculate_confidence(violations);
|
||||
|
||||
let action = if confidence > 0.8 {
|
||||
Some(ModerationAction::BanUser {
|
||||
user_id: message.author_id,
|
||||
duration: Some(Duration::from_secs(3600)),
|
||||
reason: "Violations critiques détectées".to_string(),
|
||||
})
|
||||
} else if confidence > 0.5 {
|
||||
Some(ModerationAction::DeleteMessage {
|
||||
message_id: message.id.clone(),
|
||||
channel_id: message.channel_id.clone(),
|
||||
reason: "Contenu inapproprié".to_string(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ModerationDecision {
|
||||
allowed: action.is_none(),
|
||||
action,
|
||||
violations: violations.to_vec(),
|
||||
confidence,
|
||||
reason: self.generate_reason(violations),
|
||||
})
|
||||
}
|
||||
|
||||
fn calculate_confidence(&self, violations: &[ViolationType]) -> f32 {
|
||||
violations.iter()
|
||||
.map(|v| match v {
|
||||
ViolationType::Spam { confidence, .. } => *confidence,
|
||||
ViolationType::Toxicity { confidence, .. } => *confidence,
|
||||
ViolationType::Inappropriate { confidence, .. } => *confidence,
|
||||
ViolationType::Fraud { confidence, .. } => *confidence,
|
||||
ViolationType::Abuse { confidence, .. } => *confidence,
|
||||
ViolationType::Suspicious { confidence, .. } => *confidence,
|
||||
})
|
||||
.fold(0.0, |acc, x| acc.max(x))
|
||||
}
|
||||
|
||||
fn generate_reason(&self, violations: &[ViolationType]) -> String {
|
||||
if violations.is_empty() {
|
||||
return "Aucune violation".to_string();
|
||||
}
|
||||
|
||||
violations.iter()
|
||||
.map(|v| match v {
|
||||
ViolationType::Spam { .. } => "Spam",
|
||||
ViolationType::Toxicity { .. } => "Toxicité",
|
||||
ViolationType::Inappropriate { .. } => "Contenu inapproprié",
|
||||
ViolationType::Fraud { .. } => "Fraude",
|
||||
ViolationType::Abuse { .. } => "Abus",
|
||||
ViolationType::Suspicious { .. } => "Suspect",
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
}
|
||||
}
|
||||
|
||||
/// Décision de modération pour un message
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ModerationDecision {
|
||||
/// Le message est-il autorisé ?
|
||||
pub allowed: bool,
|
||||
/// Action à exécuter (si any)
|
||||
pub action: Option<ModerationAction>,
|
||||
/// Violations détectées
|
||||
pub violations: Vec<ViolationType>,
|
||||
/// Score de confiance
|
||||
pub confidence: f32,
|
||||
/// Raison lisible
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
// Implémentation de Clone pour le service (pour le worker)
|
||||
impl Clone for ModerationIntegrationService {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
moderation_engine: self.moderation_engine.clone(),
|
||||
connection_manager: self.connection_manager.clone(),
|
||||
message_manager: self.message_manager.clone(),
|
||||
action_sender: self.action_sender.clone(),
|
||||
sanction_history: self.sanction_history.clone(),
|
||||
trusted_users: self.trusted_users.clone(),
|
||||
metrics: self.metrics.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions pour ConnectionManager
|
||||
impl ConnectionManager {
|
||||
pub async fn mute_user(&self, user_id: i64, duration: Duration) -> Result<()> {
|
||||
// Implémentation pour muter un utilisateur
|
||||
tracing::info!("Muting user {} for {:?}", user_id, duration);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn ban_user(&self, user_id: i64, duration: Option<Duration>) -> Result<()> {
|
||||
// Implémentation pour bannir un utilisateur
|
||||
tracing::info!("Banning user {} for {:?}", user_id, duration);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -1,643 +0,0 @@
|
|||
//! Système de Rich Messages Discord-like
|
||||
//!
|
||||
//! Ce module implémente :
|
||||
//! - Messages avec embeds riches
|
||||
//! - Système de threads
|
||||
//! - Réactions avec émojis
|
||||
//! - Attachements multiples
|
||||
//! - Mentions et replies
|
||||
//! - Message pinning et édition
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{DateTime, Utc};
|
||||
use dashmap::DashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::{ChatError, Result};
|
||||
use crate::core::message::{StoredMessage, MessageType};
|
||||
|
||||
/// Message riche Discord-like avec toutes les fonctionnalités
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RichMessage {
|
||||
pub id: String,
|
||||
pub channel_id: String,
|
||||
pub author_id: i64,
|
||||
pub author_username: String,
|
||||
pub content: String,
|
||||
pub message_type: RichMessageType,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub edited_at: Option<DateTime<Utc>>,
|
||||
|
||||
/// Embeds riches
|
||||
pub embeds: Vec<MessageEmbed>,
|
||||
|
||||
/// Attachements (fichiers, images, etc.)
|
||||
pub attachments: Vec<MessageAttachment>,
|
||||
|
||||
/// Mentions dans le message
|
||||
pub mentions: MessageMentions,
|
||||
|
||||
/// Réactions au message
|
||||
pub reactions: HashMap<String, MessageReaction>,
|
||||
|
||||
/// Thread associé (si c'est un message thread)
|
||||
pub thread: Option<MessageThread>,
|
||||
|
||||
/// Référence à un autre message (reply)
|
||||
pub message_reference: Option<MessageReference>,
|
||||
|
||||
/// Flags du message
|
||||
pub flags: MessageFlags,
|
||||
|
||||
/// Activités intégrées (si applicable)
|
||||
pub activity: Option<MessageActivity>,
|
||||
|
||||
/// Application qui a envoyé le message (pour les bots)
|
||||
pub application: Option<MessageApplication>,
|
||||
}
|
||||
|
||||
/// Types de messages riches
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum RichMessageType {
|
||||
/// Message normal
|
||||
Default,
|
||||
/// Message de réponse
|
||||
Reply,
|
||||
/// Message slash command
|
||||
ChatInputCommand,
|
||||
/// Message système
|
||||
ChannelNameChange,
|
||||
ChannelIconChange,
|
||||
UserJoin,
|
||||
UserPremiumGuildSubscription,
|
||||
UserPremiumGuildSubscriptionTier1,
|
||||
UserPremiumGuildSubscriptionTier2,
|
||||
UserPremiumGuildSubscriptionTier3,
|
||||
ChannelFollowAdd,
|
||||
/// Message d'appel
|
||||
Call,
|
||||
/// Message stage
|
||||
StageStart,
|
||||
StageEnd,
|
||||
/// Thread
|
||||
ThreadCreated,
|
||||
ThreadStarterMessage,
|
||||
}
|
||||
|
||||
/// Embed riche Discord-like
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageEmbed {
|
||||
/// Titre de l'embed
|
||||
pub title: Option<String>,
|
||||
/// Description
|
||||
pub description: Option<String>,
|
||||
/// URL de titre
|
||||
pub url: Option<String>,
|
||||
/// Timestamp
|
||||
pub timestamp: Option<DateTime<Utc>>,
|
||||
/// Couleur (format hex)
|
||||
pub color: Option<u32>,
|
||||
/// Footer
|
||||
pub footer: Option<EmbedFooter>,
|
||||
/// Image
|
||||
pub image: Option<EmbedImage>,
|
||||
/// Thumbnail
|
||||
pub thumbnail: Option<EmbedThumbnail>,
|
||||
/// Video
|
||||
pub video: Option<EmbedVideo>,
|
||||
/// Provider
|
||||
pub provider: Option<EmbedProvider>,
|
||||
/// Auteur
|
||||
pub author: Option<EmbedAuthor>,
|
||||
/// Champs
|
||||
pub fields: Vec<EmbedField>,
|
||||
}
|
||||
|
||||
/// Footer d'un embed
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedFooter {
|
||||
pub text: String,
|
||||
pub icon_url: Option<String>,
|
||||
pub proxy_icon_url: Option<String>,
|
||||
}
|
||||
|
||||
/// Image d'un embed
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedImage {
|
||||
pub url: String,
|
||||
pub proxy_url: Option<String>,
|
||||
pub height: Option<u32>,
|
||||
pub width: Option<u32>,
|
||||
}
|
||||
|
||||
/// Thumbnail d'un embed
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedThumbnail {
|
||||
pub url: String,
|
||||
pub proxy_url: Option<String>,
|
||||
pub height: Option<u32>,
|
||||
pub width: Option<u32>,
|
||||
}
|
||||
|
||||
/// Video d'un embed
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedVideo {
|
||||
pub url: Option<String>,
|
||||
pub proxy_url: Option<String>,
|
||||
pub height: Option<u32>,
|
||||
pub width: Option<u32>,
|
||||
}
|
||||
|
||||
/// Provider d'un embed
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedProvider {
|
||||
pub name: Option<String>,
|
||||
pub url: Option<String>,
|
||||
}
|
||||
|
||||
/// Auteur d'un embed
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedAuthor {
|
||||
pub name: String,
|
||||
pub url: Option<String>,
|
||||
pub icon_url: Option<String>,
|
||||
pub proxy_icon_url: Option<String>,
|
||||
}
|
||||
|
||||
/// Champ d'un embed
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EmbedField {
|
||||
pub name: String,
|
||||
pub value: String,
|
||||
pub inline: bool,
|
||||
}
|
||||
|
||||
/// Attachement de message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageAttachment {
|
||||
pub id: String,
|
||||
pub filename: String,
|
||||
pub description: Option<String>,
|
||||
pub content_type: Option<String>,
|
||||
pub size: u64,
|
||||
pub url: String,
|
||||
pub proxy_url: String,
|
||||
pub height: Option<u32>,
|
||||
pub width: Option<u32>,
|
||||
pub ephemeral: bool,
|
||||
}
|
||||
|
||||
/// Mentions dans un message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageMentions {
|
||||
/// Utilisateurs mentionnés
|
||||
pub users: Vec<i64>,
|
||||
/// Rôles mentionnés
|
||||
pub roles: Vec<String>,
|
||||
/// Channels mentionnés
|
||||
pub channels: Vec<String>,
|
||||
/// @everyone/@here
|
||||
pub everyone: bool,
|
||||
}
|
||||
|
||||
/// Réaction à un message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageReaction {
|
||||
/// Nombre de réactions
|
||||
pub count: u32,
|
||||
/// L'utilisateur actuel a-t-il réagi ?
|
||||
pub me: bool,
|
||||
/// Emoji utilisé
|
||||
pub emoji: ReactionEmoji,
|
||||
/// Utilisateurs qui ont réagi
|
||||
pub users: HashSet<i64>,
|
||||
}
|
||||
|
||||
/// Emoji de réaction
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ReactionEmoji {
|
||||
pub id: Option<String>,
|
||||
pub name: String,
|
||||
pub animated: bool,
|
||||
}
|
||||
|
||||
/// Thread associé à un message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageThread {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
pub message_count: u32,
|
||||
pub member_count: u32,
|
||||
pub last_message_id: Option<String>,
|
||||
pub rate_limit_per_user: Option<u32>,
|
||||
pub flags: u32,
|
||||
pub total_message_sent: u32,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub auto_archive_duration: u32,
|
||||
pub archive_timestamp: Option<DateTime<Utc>>,
|
||||
pub locked: bool,
|
||||
pub invitable: bool,
|
||||
}
|
||||
|
||||
/// Référence à un autre message (reply)
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageReference {
|
||||
pub message_id: String,
|
||||
pub channel_id: String,
|
||||
pub guild_id: Option<String>,
|
||||
pub fail_if_not_exists: bool,
|
||||
}
|
||||
|
||||
/// Flags d'un message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageFlags {
|
||||
pub crossposted: bool,
|
||||
pub is_crosspost: bool,
|
||||
pub suppress_embeds: bool,
|
||||
pub source_message_deleted: bool,
|
||||
pub urgent: bool,
|
||||
pub has_thread: bool,
|
||||
pub ephemeral: bool,
|
||||
pub loading: bool,
|
||||
pub failed_to_mention_some_roles_in_thread: bool,
|
||||
}
|
||||
|
||||
/// Activité de message (jeux, etc.)
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageActivity {
|
||||
pub activity_type: u8,
|
||||
pub party_id: Option<String>,
|
||||
}
|
||||
|
||||
/// Application qui a envoyé le message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageApplication {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
pub icon: Option<String>,
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
/// Gestionnaire de messages riches
|
||||
#[derive(Debug)]
|
||||
pub struct RichMessageManager {
|
||||
/// Messages par ID
|
||||
messages: Arc<DashMap<String, RichMessage>>,
|
||||
/// Index des messages par channel
|
||||
channel_messages: Arc<DashMap<String, Vec<String>>>,
|
||||
/// Index des threads
|
||||
threads: Arc<DashMap<String, MessageThread>>,
|
||||
/// Index des réactions
|
||||
reactions: Arc<DashMap<String, HashMap<String, MessageReaction>>>,
|
||||
}
|
||||
|
||||
impl RichMessageManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
messages: Arc::new(DashMap::new()),
|
||||
channel_messages: Arc::new(DashMap::new()),
|
||||
threads: Arc::new(DashMap::new()),
|
||||
reactions: Arc::new(DashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Crée un nouveau message riche
|
||||
pub async fn create_message(&self, mut message: RichMessage) -> Result<String> {
|
||||
let message_id = format!("msg_{}", Uuid::new_v4());
|
||||
message.id = message_id.clone();
|
||||
message.created_at = Utc::now();
|
||||
|
||||
// Valider le contenu
|
||||
self.validate_message(&message)?;
|
||||
|
||||
// Ajouter le message
|
||||
self.messages.insert(message_id.clone(), message.clone());
|
||||
|
||||
// Indexer par channel
|
||||
self.channel_messages
|
||||
.entry(message.channel_id.clone())
|
||||
.or_insert_with(Vec::new)
|
||||
.push(message_id.clone());
|
||||
|
||||
// Créer un thread si nécessaire
|
||||
if let Some(thread) = &message.thread {
|
||||
self.threads.insert(thread.id.clone(), thread.clone());
|
||||
}
|
||||
|
||||
Ok(message_id)
|
||||
}
|
||||
|
||||
/// Édite un message existant
|
||||
pub async fn edit_message(
|
||||
&self,
|
||||
message_id: &str,
|
||||
new_content: String,
|
||||
new_embeds: Option<Vec<MessageEmbed>>,
|
||||
editor_id: i64,
|
||||
) -> Result<()> {
|
||||
let mut message = self.messages.get_mut(message_id)
|
||||
.ok_or_else(|| ChatError::not_found_simple("message_not_found"))?;
|
||||
|
||||
// Vérifier les permissions (l'auteur peut éditer son message)
|
||||
if message.author_id != editor_id {
|
||||
return Err(ChatError::unauthorized_simple("cannot_edit_message"));
|
||||
}
|
||||
|
||||
// Mettre à jour le message
|
||||
message.content = new_content;
|
||||
message.edited_at = Some(Utc::now());
|
||||
|
||||
if let Some(embeds) = new_embeds {
|
||||
message.embeds = embeds;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ajoute une réaction à un message
|
||||
pub async fn add_reaction(
|
||||
&self,
|
||||
message_id: &str,
|
||||
emoji: ReactionEmoji,
|
||||
user_id: i64,
|
||||
) -> Result<()> {
|
||||
let emoji_clone = emoji.clone();
|
||||
let emoji_key = format!("{}:{}", emoji.name, emoji.id.clone().unwrap_or_default());
|
||||
|
||||
// Mettre à jour les réactions du message
|
||||
if let Some(mut message) = self.messages.get_mut(message_id) {
|
||||
let reaction = message.reactions
|
||||
.entry(emoji_key.clone())
|
||||
.or_insert_with(|| MessageReaction {
|
||||
count: 0,
|
||||
me: false,
|
||||
emoji: emoji_clone.clone(),
|
||||
users: HashSet::new(),
|
||||
});
|
||||
|
||||
if !reaction.users.contains(&user_id) {
|
||||
reaction.users.insert(user_id);
|
||||
reaction.count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retire une réaction d'un message
|
||||
pub async fn remove_reaction(
|
||||
&self,
|
||||
message_id: &str,
|
||||
emoji: &ReactionEmoji,
|
||||
user_id: i64,
|
||||
) -> Result<()> {
|
||||
let emoji_key = format!("{}:{}", emoji.name, emoji.id.as_ref().unwrap_or(&String::new()));
|
||||
|
||||
if let Some(mut message) = self.messages.get_mut(message_id) {
|
||||
if let Some(reaction) = message.reactions.get_mut(&emoji_key) {
|
||||
if reaction.users.remove(&user_id) {
|
||||
reaction.count = reaction.count.saturating_sub(1);
|
||||
|
||||
// Supprimer la réaction si plus personne n'a réagi
|
||||
if reaction.count == 0 {
|
||||
message.reactions.remove(&emoji_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Crée un thread à partir d'un message
|
||||
pub async fn create_thread(
|
||||
&self,
|
||||
message_id: &str,
|
||||
thread_name: String,
|
||||
auto_archive_duration: u32,
|
||||
_creator_id: i64,
|
||||
) -> Result<String> {
|
||||
let _message = self.messages.get(message_id)
|
||||
.ok_or_else(|| ChatError::not_found_simple("message_not_found"))?;
|
||||
|
||||
let thread_id = format!("thread_{}", Uuid::new_v4());
|
||||
|
||||
let thread = MessageThread {
|
||||
id: thread_id.clone(),
|
||||
name: thread_name,
|
||||
message_count: 0,
|
||||
member_count: 1, // Le créateur
|
||||
last_message_id: None,
|
||||
rate_limit_per_user: None,
|
||||
flags: 0,
|
||||
total_message_sent: 0,
|
||||
created_at: Utc::now(),
|
||||
auto_archive_duration,
|
||||
archive_timestamp: None,
|
||||
locked: false,
|
||||
invitable: true,
|
||||
};
|
||||
|
||||
// Ajouter le thread
|
||||
self.threads.insert(thread_id.clone(), thread.clone());
|
||||
|
||||
// Mettre à jour le message pour indiquer qu'il a un thread
|
||||
if let Some(mut msg) = self.messages.get_mut(message_id) {
|
||||
msg.thread = Some(thread);
|
||||
msg.flags.has_thread = true;
|
||||
}
|
||||
|
||||
Ok(thread_id)
|
||||
}
|
||||
|
||||
/// Pin/Unpin un message dans un channel
|
||||
pub async fn toggle_pin(
|
||||
&self,
|
||||
_message_id: &str,
|
||||
_pinner_id: i64,
|
||||
) -> Result<bool> {
|
||||
// Dans une vraie implémentation, on vérifierait les permissions ici
|
||||
// Pour l'instant, on simule juste le changement d'état
|
||||
|
||||
// Retourner le nouvel état (pinned ou non)
|
||||
Ok(true) // Simulé
|
||||
}
|
||||
|
||||
/// Obtient les messages d'un channel avec pagination
|
||||
pub fn get_channel_messages(
|
||||
&self,
|
||||
channel_id: &str,
|
||||
limit: usize,
|
||||
before: Option<&str>,
|
||||
after: Option<&str>,
|
||||
) -> Vec<RichMessage> {
|
||||
if let Some(message_ids) = self.channel_messages.get(channel_id) {
|
||||
let mut messages = Vec::new();
|
||||
|
||||
for msg_id in message_ids.iter() {
|
||||
if let Some(message) = self.messages.get(msg_id) {
|
||||
messages.push(message.value().clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Trier par date (plus récent en premier)
|
||||
messages.sort_by(|a, b| b.created_at.cmp(&a.created_at));
|
||||
|
||||
// Appliquer la pagination
|
||||
if let Some(before_id) = before {
|
||||
if let Some(pos) = messages.iter().position(|m| m.id == before_id) {
|
||||
messages = messages.into_iter().skip(pos + 1).collect();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(after_id) = after {
|
||||
if let Some(pos) = messages.iter().position(|m| m.id == after_id) {
|
||||
messages = messages.into_iter().take(pos).collect();
|
||||
}
|
||||
}
|
||||
|
||||
messages.into_iter().take(limit).collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Valide un message
|
||||
fn validate_message(&self, message: &RichMessage) -> Result<()> {
|
||||
// Vérifier la longueur du contenu
|
||||
if message.content.len() > 2000 {
|
||||
return Err(ChatError::validation_error("message_too_long"));
|
||||
}
|
||||
|
||||
// Vérifier le nombre d'embeds
|
||||
if message.embeds.len() > 10 {
|
||||
return Err(ChatError::validation_error("too_many_embeds"));
|
||||
}
|
||||
|
||||
// Vérifier les attachements
|
||||
if message.attachments.len() > 10 {
|
||||
return Err(ChatError::validation_error("too_many_attachments"));
|
||||
}
|
||||
|
||||
// Vérifier la taille totale des attachements
|
||||
let total_size: u64 = message.attachments.iter().map(|a| a.size).sum();
|
||||
if total_size > 100 * 1024 * 1024 { // 100 MB
|
||||
return Err(ChatError::validation_error("attachments_too_large"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MessageFlags {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
crossposted: false,
|
||||
is_crosspost: false,
|
||||
suppress_embeds: false,
|
||||
source_message_deleted: false,
|
||||
urgent: false,
|
||||
has_thread: false,
|
||||
ephemeral: false,
|
||||
loading: false,
|
||||
failed_to_mention_some_roles_in_thread: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MessageMentions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
users: Vec::new(),
|
||||
roles: Vec::new(),
|
||||
channels: Vec::new(),
|
||||
everyone: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RichMessageManager {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
messages: Arc::new(DashMap::new()),
|
||||
channel_messages: Arc::new(DashMap::new()),
|
||||
threads: Arc::new(DashMap::new()),
|
||||
reactions: Arc::new(DashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rich_message_creation() {
|
||||
let manager = RichMessageManager::new();
|
||||
|
||||
let message = RichMessage {
|
||||
id: String::new(), // Sera généré
|
||||
channel_id: "channel123".to_string(),
|
||||
author_id: 456,
|
||||
author_username: "testuser".to_string(),
|
||||
content: "Hello **world**!".to_string(),
|
||||
message_type: RichMessageType::Default,
|
||||
created_at: Utc::now(),
|
||||
edited_at: None,
|
||||
embeds: vec![],
|
||||
attachments: vec![],
|
||||
mentions: MessageMentions::default(),
|
||||
reactions: HashMap::new(),
|
||||
thread: None,
|
||||
message_reference: None,
|
||||
flags: MessageFlags::default(),
|
||||
activity: None,
|
||||
application: None,
|
||||
};
|
||||
|
||||
let message_id = manager.create_message(message).await.unwrap();
|
||||
assert!(manager.messages.contains_key(&message_id));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_message_reactions() {
|
||||
let manager = RichMessageManager::new();
|
||||
|
||||
let message = RichMessage {
|
||||
id: "msg123".to_string(),
|
||||
channel_id: "channel123".to_string(),
|
||||
author_id: 456,
|
||||
author_username: "testuser".to_string(),
|
||||
content: "React to this!".to_string(),
|
||||
message_type: RichMessageType::Default,
|
||||
created_at: Utc::now(),
|
||||
edited_at: None,
|
||||
embeds: vec![],
|
||||
attachments: vec![],
|
||||
mentions: MessageMentions::default(),
|
||||
reactions: HashMap::new(),
|
||||
thread: None,
|
||||
message_reference: None,
|
||||
flags: MessageFlags::default(),
|
||||
activity: None,
|
||||
application: None,
|
||||
};
|
||||
|
||||
manager.messages.insert("msg123".to_string(), message);
|
||||
|
||||
let emoji = ReactionEmoji {
|
||||
id: None,
|
||||
name: "👍".to_string(),
|
||||
animated: false,
|
||||
};
|
||||
|
||||
manager.add_reaction("msg123", emoji.clone(), 789).await.unwrap();
|
||||
|
||||
let message = manager.messages.get("msg123").unwrap();
|
||||
let reaction_key = format!("{}:{}", emoji.name, "");
|
||||
assert!(message.reactions.contains_key(&reaction_key));
|
||||
assert_eq!(message.reactions[&reaction_key].count, 1);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,239 +0,0 @@
|
|||
//! Room Management pour Chat Production
|
||||
//!
|
||||
//! Gestion des salles de chat avec permissions Discord-like
|
||||
//! et optimisations pour haute performance.
|
||||
|
||||
use std::sync::Arc;
|
||||
use dashmap::DashMap;
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use super::message::*;
|
||||
use super::user::*;
|
||||
|
||||
/// Salle de chat optimisée
|
||||
#[derive(Debug)]
|
||||
pub struct Room {
|
||||
/// Identifiant de la salle
|
||||
pub id: String,
|
||||
|
||||
/// Nom de la salle
|
||||
pub name: String,
|
||||
|
||||
/// Membres connectés
|
||||
pub members: Arc<DashMap<Uuid, RoomMember>>,
|
||||
|
||||
/// Configuration de la salle
|
||||
pub settings: RoomSettings,
|
||||
|
||||
/// Buffer circulaire pour messages récents
|
||||
pub message_buffer: Arc<RwLock<MessageBuffer>>,
|
||||
|
||||
/// Tracker de présence
|
||||
pub presence_tracker: Arc<PresenceTracker>,
|
||||
}
|
||||
|
||||
/// Membre d'une salle
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RoomMember {
|
||||
pub connection_id: Uuid,
|
||||
pub user_id: i64,
|
||||
pub joined_at: DateTime<Utc>,
|
||||
pub permissions: RoomPermissions,
|
||||
pub status: PresenceStatus,
|
||||
}
|
||||
|
||||
/// Permissions dans une salle (Discord-like)
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct RoomPermissions {
|
||||
// Permissions générales
|
||||
pub view_channel: bool,
|
||||
pub send_messages: bool,
|
||||
pub embed_links: bool,
|
||||
pub attach_files: bool,
|
||||
pub read_message_history: bool,
|
||||
pub mention_everyone: bool,
|
||||
pub use_external_emojis: bool,
|
||||
pub add_reactions: bool,
|
||||
|
||||
// Permissions modération
|
||||
pub manage_messages: bool,
|
||||
pub manage_channel: bool,
|
||||
pub kick_members: bool,
|
||||
pub ban_members: bool,
|
||||
|
||||
// Permissions voix
|
||||
pub connect_voice: bool,
|
||||
pub speak: bool,
|
||||
pub mute_members: bool,
|
||||
pub move_members: bool,
|
||||
}
|
||||
|
||||
impl Default for RoomPermissions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
view_channel: true,
|
||||
send_messages: true,
|
||||
embed_links: true,
|
||||
attach_files: true,
|
||||
read_message_history: true,
|
||||
mention_everyone: false,
|
||||
use_external_emojis: true,
|
||||
add_reactions: true,
|
||||
manage_messages: false,
|
||||
manage_channel: false,
|
||||
kick_members: false,
|
||||
ban_members: false,
|
||||
connect_voice: true,
|
||||
speak: true,
|
||||
mute_members: false,
|
||||
move_members: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration d'une salle
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RoomSettings {
|
||||
pub is_public: bool,
|
||||
pub max_members: Option<usize>,
|
||||
pub rate_limit: Option<u32>,
|
||||
pub enable_file_upload: bool,
|
||||
pub enable_voice: bool,
|
||||
pub channel_type: ChannelType,
|
||||
pub topic: Option<String>,
|
||||
pub slow_mode: Option<u32>, // secondes entre messages
|
||||
}
|
||||
|
||||
/// Type de channel Discord-like
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum ChannelType {
|
||||
Text,
|
||||
Voice,
|
||||
Announcement,
|
||||
Stage,
|
||||
Forum,
|
||||
Category,
|
||||
}
|
||||
|
||||
impl Default for RoomSettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
is_public: true,
|
||||
max_members: None,
|
||||
rate_limit: Some(10),
|
||||
enable_file_upload: true,
|
||||
enable_voice: false,
|
||||
channel_type: ChannelType::Text,
|
||||
topic: None,
|
||||
slow_mode: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Buffer circulaire pour messages récents
|
||||
#[derive(Debug)]
|
||||
pub struct MessageBuffer {
|
||||
messages: Vec<StoredMessage>,
|
||||
capacity: usize,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl MessageBuffer {
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
Self {
|
||||
messages: Vec::with_capacity(capacity),
|
||||
capacity,
|
||||
index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_message(&mut self, message: StoredMessage) {
|
||||
if self.messages.len() < self.capacity {
|
||||
self.messages.push(message);
|
||||
} else {
|
||||
self.messages[self.index] = message;
|
||||
self.index = (self.index + 1) % self.capacity;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_recent_messages(&self, limit: usize) -> Vec<&StoredMessage> {
|
||||
let len = self.messages.len().min(limit);
|
||||
if self.messages.len() < self.capacity {
|
||||
self.messages.iter().rev().take(len).collect()
|
||||
} else {
|
||||
let mut result = Vec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
let idx = (self.index + self.capacity - 1 - i) % self.capacity;
|
||||
result.push(&self.messages[idx]);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Room {
|
||||
pub fn new(id: String, name: String, settings: RoomSettings) -> Self {
|
||||
Self {
|
||||
id,
|
||||
name,
|
||||
members: Arc::new(DashMap::new()),
|
||||
settings,
|
||||
message_buffer: Arc::new(RwLock::new(MessageBuffer::new(1000))),
|
||||
presence_tracker: Arc::new(PresenceTracker::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Ajoute un membre à la salle
|
||||
pub async fn add_member(
|
||||
&self,
|
||||
connection_id: Uuid,
|
||||
user_id: i64,
|
||||
permissions: RoomPermissions,
|
||||
) -> Result<(), &'static str> {
|
||||
if let Some(max) = self.settings.max_members {
|
||||
if self.members.len() >= max {
|
||||
return Err("Room is full");
|
||||
}
|
||||
}
|
||||
|
||||
let member = RoomMember {
|
||||
connection_id,
|
||||
user_id,
|
||||
joined_at: Utc::now(),
|
||||
permissions,
|
||||
status: PresenceStatus::Online,
|
||||
};
|
||||
|
||||
self.members.insert(connection_id, member);
|
||||
self.presence_tracker.update_status(user_id, PresenceStatus::Online);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retire un membre de la salle
|
||||
pub async fn remove_member(&self, connection_id: Uuid) {
|
||||
if let Some((_, member)) = self.members.remove(&connection_id) {
|
||||
// Vérifier si c'était la dernière connexion de cet utilisateur
|
||||
let user_still_connected = self.members.iter()
|
||||
.any(|entry| entry.value().user_id == member.user_id);
|
||||
|
||||
if !user_still_connected {
|
||||
self.presence_tracker.update_status(member.user_id, PresenceStatus::Invisible);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Vérifie les permissions d'un membre
|
||||
pub fn check_permission(
|
||||
&self,
|
||||
connection_id: Uuid,
|
||||
permission: fn(&RoomPermissions) -> bool,
|
||||
) -> bool {
|
||||
self.members.get(&connection_id)
|
||||
.map(|member| permission(&member.permissions))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,251 +0,0 @@
|
|||
//! User Management et Présence
|
||||
//!
|
||||
//! Gestion des utilisateurs connectés avec tracking de présence
|
||||
//! et activités Discord-like.
|
||||
|
||||
use std::sync::Arc;
|
||||
use dashmap::DashMap;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
/// Status de présence Discord-like
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum PresenceStatus {
|
||||
Online,
|
||||
Idle, // Inactif (>10 min)
|
||||
DoNotDisturb,
|
||||
Invisible, // Apparaît offline
|
||||
}
|
||||
|
||||
/// Activité utilisateur
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UserActivity {
|
||||
pub activity_type: ActivityType,
|
||||
pub name: String,
|
||||
pub details: Option<String>,
|
||||
pub state: Option<String>,
|
||||
pub started_at: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
/// Type d'activité
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum ActivityType {
|
||||
Playing, // Joue à un jeu
|
||||
Streaming, // Stream
|
||||
Listening, // Écoute de la musique
|
||||
Watching, // Regarde
|
||||
Custom, // Status custom
|
||||
Competing, // Compétition
|
||||
}
|
||||
|
||||
/// Tracker de présence optimisé pour haute performance
|
||||
#[derive(Debug)]
|
||||
pub struct PresenceTracker {
|
||||
/// Status des utilisateurs
|
||||
statuses: Arc<DashMap<i64, PresenceStatus>>,
|
||||
|
||||
/// Dernière activité
|
||||
last_seen: Arc<DashMap<i64, DateTime<Utc>>>,
|
||||
|
||||
/// Activités en cours
|
||||
activities: Arc<DashMap<i64, UserActivity>>,
|
||||
|
||||
/// Utilisateurs en train d'écrire par salle
|
||||
typing_users: Arc<DashMap<String, DashMap<i64, DateTime<Utc>>>>,
|
||||
}
|
||||
|
||||
impl Default for PresenceTracker {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl PresenceTracker {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
statuses: Arc::new(DashMap::new()),
|
||||
last_seen: Arc::new(DashMap::new()),
|
||||
activities: Arc::new(DashMap::new()),
|
||||
typing_users: Arc::new(DashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Met à jour le status d'un utilisateur
|
||||
pub fn update_status(&self, user_id: i64, status: PresenceStatus) {
|
||||
self.statuses.insert(user_id, status);
|
||||
self.last_seen.insert(user_id, Utc::now());
|
||||
}
|
||||
|
||||
/// Met à jour l'activité d'un utilisateur
|
||||
pub fn update_activity(&self, user_id: i64, activity: Option<UserActivity>) {
|
||||
match activity {
|
||||
Some(activity) => {
|
||||
self.activities.insert(user_id, activity);
|
||||
}
|
||||
None => {
|
||||
self.activities.remove(&user_id);
|
||||
}
|
||||
}
|
||||
self.last_seen.insert(user_id, Utc::now());
|
||||
}
|
||||
|
||||
/// Obtient le status d'un utilisateur
|
||||
pub fn get_status(&self, user_id: i64) -> Option<PresenceStatus> {
|
||||
self.statuses.get(&user_id).map(|entry| entry.value().clone())
|
||||
}
|
||||
|
||||
/// Obtient l'activité d'un utilisateur
|
||||
pub fn get_activity(&self, user_id: i64) -> Option<UserActivity> {
|
||||
self.activities.get(&user_id).map(|entry| entry.value().clone())
|
||||
}
|
||||
|
||||
/// Vérifie si un utilisateur est en ligne
|
||||
pub fn is_online(&self, user_id: i64) -> bool {
|
||||
matches!(
|
||||
self.get_status(user_id),
|
||||
Some(PresenceStatus::Online | PresenceStatus::Idle | PresenceStatus::DoNotDisturb)
|
||||
)
|
||||
}
|
||||
|
||||
/// Démarre l'indicateur "en train d'écrire"
|
||||
pub fn start_typing(&self, user_id: i64, room_id: &str) {
|
||||
let room_key = room_id.to_string();
|
||||
let typing_room = self.typing_users.entry(room_key)
|
||||
.or_default();
|
||||
typing_room.insert(user_id, Utc::now());
|
||||
}
|
||||
|
||||
/// Arrête l'indicateur "en train d'écrire"
|
||||
pub fn stop_typing(&self, user_id: i64, room_id: &str) {
|
||||
if let Some(typing_room) = self.typing_users.get(room_id) {
|
||||
typing_room.remove(&user_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Obtient la liste des utilisateurs en train d'écrire
|
||||
pub fn get_typing_users(&self, room_id: &str) -> Vec<i64> {
|
||||
if let Some(typing_room) = self.typing_users.get(room_id) {
|
||||
let now = Utc::now();
|
||||
let timeout = std::time::Duration::from_secs(5); // 5 secondes timeout
|
||||
|
||||
// Nettoyer les anciens indicateurs et retourner les actifs
|
||||
typing_room.retain(|_, last_typing| {
|
||||
now.signed_duration_since(*last_typing) < chrono::Duration::from_std(timeout).unwrap_or(chrono::Duration::seconds(5))
|
||||
});
|
||||
|
||||
typing_room.iter().map(|entry| *entry.key()).collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Nettoie les utilisateurs inactifs
|
||||
pub fn cleanup_inactive_users(&self, inactive_threshold: std::time::Duration) -> usize {
|
||||
let now = Utc::now();
|
||||
let mut cleaned = 0;
|
||||
|
||||
// Nettoyer les statuses des utilisateurs inactifs
|
||||
self.statuses.retain(|user_id, _| {
|
||||
if let Some(last_seen) = self.last_seen.get(user_id) {
|
||||
let is_active = now.signed_duration_since(*last_seen.value()) < chrono::Duration::from_std(inactive_threshold).unwrap_or(chrono::Duration::hours(1));
|
||||
if !is_active {
|
||||
cleaned += 1;
|
||||
// Nettoyer aussi l'activité
|
||||
self.activities.remove(user_id);
|
||||
}
|
||||
is_active
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
// Nettoyer les anciens indicateurs de frappe
|
||||
for typing_room in self.typing_users.iter() {
|
||||
typing_room.value().retain(|_, last_typing| {
|
||||
now.signed_duration_since(*last_typing) < chrono::Duration::seconds(5)
|
||||
});
|
||||
}
|
||||
|
||||
// Supprimer les salles vides de typing
|
||||
self.typing_users.retain(|_, typing_room| {
|
||||
!typing_room.is_empty()
|
||||
});
|
||||
|
||||
cleaned
|
||||
}
|
||||
|
||||
/// Obtient les statistiques de présence
|
||||
pub fn get_presence_stats(&self) -> PresenceStats {
|
||||
let mut stats = PresenceStats::default();
|
||||
|
||||
for entry in self.statuses.iter() {
|
||||
match entry.value() {
|
||||
PresenceStatus::Online => stats.online += 1,
|
||||
PresenceStatus::Idle => stats.idle += 1,
|
||||
PresenceStatus::DoNotDisturb => stats.dnd += 1,
|
||||
PresenceStatus::Invisible => stats.invisible += 1,
|
||||
}
|
||||
}
|
||||
|
||||
stats.total = stats.online + stats.idle + stats.dnd + stats.invisible;
|
||||
stats
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistiques de présence
|
||||
#[derive(Debug, Default, Serialize)]
|
||||
pub struct PresenceStats {
|
||||
pub total: usize,
|
||||
pub online: usize,
|
||||
pub idle: usize,
|
||||
pub dnd: usize,
|
||||
pub invisible: usize,
|
||||
}
|
||||
|
||||
impl Default for PresenceStatus {
|
||||
fn default() -> Self {
|
||||
Self::Online
|
||||
}
|
||||
}
|
||||
|
||||
impl UserActivity {
|
||||
pub fn playing(name: String) -> Self {
|
||||
Self {
|
||||
activity_type: ActivityType::Playing,
|
||||
name,
|
||||
details: None,
|
||||
state: None,
|
||||
started_at: Some(Utc::now()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn listening(name: String) -> Self {
|
||||
Self {
|
||||
activity_type: ActivityType::Listening,
|
||||
name,
|
||||
details: None,
|
||||
state: None,
|
||||
started_at: Some(Utc::now()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn streaming(name: String, url: String) -> Self {
|
||||
Self {
|
||||
activity_type: ActivityType::Streaming,
|
||||
name,
|
||||
details: Some(url),
|
||||
state: None,
|
||||
started_at: Some(Utc::now()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn custom(status: String) -> Self {
|
||||
Self {
|
||||
activity_type: ActivityType::Custom,
|
||||
name: status,
|
||||
details: None,
|
||||
state: None,
|
||||
started_at: Some(Utc::now()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
//! Module database pour la gestion des connexions PostgreSQL
|
||||
|
||||
pub mod pool;
|
||||
|
||||
pub use pool::{create_pool, create_pool_from_env};
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
//! Gestionnaire de connection pool PostgreSQL pour chat server
|
||||
//!
|
||||
//! Ce module fournit une fonction pour créer et configurer un pool de connexions
|
||||
//! PostgreSQL optimisé pour le chat server.
|
||||
|
||||
use sqlx::postgres::PgPoolOptions;
|
||||
use sqlx::PgPool;
|
||||
use std::time::Duration;
|
||||
|
||||
/// Crée un pool de connexions PostgreSQL avec configuration optimale
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `database_url` - URL de connexion à la base de données PostgreSQL
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Un `PgPool` configuré ou une erreur si la connexion échoue
|
||||
///
|
||||
/// # Configuration
|
||||
///
|
||||
/// - `max_connections`: 20 connexions maximum
|
||||
/// - `min_connections`: 5 connexions minimum maintenues
|
||||
/// - `acquire_timeout`: 30 secondes pour acquérir une connexion
|
||||
/// - `idle_timeout`: 600 secondes (10 minutes) avant fermeture d'une connexion inactive
|
||||
/// - `max_lifetime`: 1800 secondes (30 minutes) durée de vie maximale d'une connexion
|
||||
///
|
||||
/// # Exemple
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use chat_server::database::pool::create_pool;
|
||||
///
|
||||
/// #[tokio::main]
|
||||
/// async fn main() {
|
||||
/// let database_url = "postgresql://user:password@localhost/veza_db";
|
||||
/// let pool = create_pool(database_url).await.expect("Failed to create pool");
|
||||
/// // Utiliser le pool...
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn create_pool(database_url: &str) -> Result<PgPool, sqlx::Error> {
|
||||
PgPoolOptions::new()
|
||||
.max_connections(20)
|
||||
.min_connections(5)
|
||||
.acquire_timeout(Duration::from_secs(30))
|
||||
.idle_timeout(Duration::from_secs(600))
|
||||
.max_lifetime(Duration::from_secs(1800))
|
||||
.connect(database_url)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Crée un pool de connexions avec une URL depuis une variable d'environnement
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `env_var` - Nom de la variable d'environnement contenant l'URL (par défaut "DATABASE_URL")
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Un `PgPool` configuré ou une erreur si la connexion échoue
|
||||
pub async fn create_pool_from_env(env_var: Option<&str>) -> Result<PgPool, sqlx::Error> {
|
||||
let var_name = env_var.unwrap_or("DATABASE_URL");
|
||||
let database_url = std::env::var(var_name).map_err(|_| {
|
||||
sqlx::Error::Configuration(format!("Environment variable {} not set", var_name).into())
|
||||
})?;
|
||||
create_pool(&database_url).await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Nécessite une base de données de test
|
||||
async fn test_create_pool() {
|
||||
let database_url = std::env::var("TEST_DATABASE_URL")
|
||||
.unwrap_or_else(|_| "postgresql://localhost/veza_chat_test".to_string());
|
||||
let pool = create_pool(&database_url).await;
|
||||
assert!(pool.is_ok(), "Pool creation should succeed");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Nécessite une base de données de test
|
||||
async fn test_create_pool_from_env() {
|
||||
std::env::set_var("TEST_DATABASE_URL", "postgresql://localhost/veza_chat_test");
|
||||
let pool = create_pool_from_env(Some("TEST_DATABASE_URL")).await;
|
||||
assert!(pool.is_ok(), "Pool creation from env should succeed");
|
||||
}
|
||||
}
|
||||
|
|
@ -1,315 +0,0 @@
|
|||
//! Module de gestion des delivered status (messages reçus mais pas encore lus)
|
||||
//!
|
||||
//! Ce module fournit un système complet pour tracker quels messages
|
||||
//! ont été délivrés (reçus par le client WebSocket) par quels utilisateurs.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::types::chrono::{DateTime, Utc};
|
||||
use sqlx::{FromRow, Pool, Postgres};
|
||||
use tracing::{debug, info, instrument, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Représente un delivered status pour un message
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
|
||||
pub struct DeliveredStatus {
|
||||
pub id: Uuid,
|
||||
pub message_id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
pub conversation_id: Uuid,
|
||||
pub delivered_at: DateTime<Utc>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// Manager pour gérer les delivered status
|
||||
pub struct DeliveredStatusManager {
|
||||
pool: Pool<Postgres>,
|
||||
}
|
||||
|
||||
impl DeliveredStatusManager {
|
||||
/// Crée un nouveau DeliveredStatusManager
|
||||
pub fn new(pool: Pool<Postgres>) -> Self {
|
||||
Self { pool }
|
||||
}
|
||||
|
||||
/// Marquer un message comme délivré pour un utilisateur
|
||||
///
|
||||
/// Si le delivered status existe déjà, met à jour le timestamp `delivered_at`.
|
||||
/// Retourne le delivered status créé ou mis à jour.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn mark_delivered(
|
||||
&self,
|
||||
user_id: Uuid,
|
||||
message_id: Uuid,
|
||||
conversation_id: Uuid,
|
||||
) -> Result<DeliveredStatus, sqlx::Error> {
|
||||
// Vérifier si le delivered status existe déjà
|
||||
let existing: Option<DeliveredStatus> = sqlx::query_as::<_, DeliveredStatus>(
|
||||
"SELECT id, message_id, user_id, conversation_id, delivered_at, created_at, updated_at
|
||||
FROM delivered_status
|
||||
WHERE message_id = $1 AND user_id = $2",
|
||||
)
|
||||
.bind(message_id)
|
||||
.bind(user_id)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
if let Some(status) = existing {
|
||||
// Mettre à jour le timestamp de délivrance
|
||||
let updated = sqlx::query_as::<_, DeliveredStatus>(
|
||||
"UPDATE delivered_status
|
||||
SET delivered_at = NOW(), updated_at = NOW()
|
||||
WHERE id = $1
|
||||
RETURNING id, message_id, user_id, conversation_id, delivered_at, created_at, updated_at"
|
||||
)
|
||||
.bind(status.id)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
debug!(
|
||||
message_id = %message_id,
|
||||
user_id = %user_id,
|
||||
conversation_id = %conversation_id,
|
||||
"Delivered status updated"
|
||||
);
|
||||
|
||||
return Ok(updated);
|
||||
}
|
||||
|
||||
// Créer un nouveau delivered status
|
||||
let status = sqlx::query_as::<_, DeliveredStatus>(
|
||||
"INSERT INTO delivered_status (message_id, user_id, conversation_id, delivered_at, created_at, updated_at)
|
||||
VALUES ($1, $2, $3, NOW(), NOW(), NOW())
|
||||
RETURNING id, message_id, user_id, conversation_id, delivered_at, created_at, updated_at"
|
||||
)
|
||||
.bind(message_id)
|
||||
.bind(user_id)
|
||||
.bind(conversation_id)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
message_id = %message_id,
|
||||
user_id = %user_id,
|
||||
conversation_id = %conversation_id,
|
||||
"Message marked as delivered"
|
||||
);
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
/// Obtenir tous les delivered status pour un message
|
||||
#[instrument(skip(self))]
|
||||
pub async fn get_delivered_for_message(
|
||||
&self,
|
||||
message_id: Uuid,
|
||||
) -> Result<Vec<DeliveredStatus>, sqlx::Error> {
|
||||
let statuses = sqlx::query_as::<_, DeliveredStatus>(
|
||||
"SELECT id, message_id, user_id, conversation_id, delivered_at, created_at, updated_at
|
||||
FROM delivered_status
|
||||
WHERE message_id = $1
|
||||
ORDER BY delivered_at ASC",
|
||||
)
|
||||
.bind(message_id)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(statuses)
|
||||
}
|
||||
|
||||
/// Obtenir un delivered status spécifique
|
||||
#[instrument(skip(self))]
|
||||
pub async fn get_delivered_status(
|
||||
&self,
|
||||
message_id: Uuid,
|
||||
user_id: Uuid,
|
||||
) -> Result<Option<DeliveredStatus>, sqlx::Error> {
|
||||
let status = sqlx::query_as::<_, DeliveredStatus>(
|
||||
"SELECT id, message_id, user_id, conversation_id, delivered_at, created_at, updated_at
|
||||
FROM delivered_status
|
||||
WHERE message_id = $1 AND user_id = $2",
|
||||
)
|
||||
.bind(message_id)
|
||||
.bind(user_id)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
/// Vérifier si un message a été délivré à un utilisateur
|
||||
#[instrument(skip(self))]
|
||||
pub async fn is_delivered(&self, message_id: Uuid, user_id: Uuid) -> Result<bool, sqlx::Error> {
|
||||
let exists: bool = sqlx::query_scalar(
|
||||
"SELECT EXISTS(
|
||||
SELECT 1 FROM delivered_status
|
||||
WHERE message_id = $1 AND user_id = $2
|
||||
)",
|
||||
)
|
||||
.bind(message_id)
|
||||
.bind(user_id)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(exists)
|
||||
}
|
||||
|
||||
/// Vérifier que le message appartient à la conversation indiquée
|
||||
#[instrument(skip(self))]
|
||||
pub async fn verify_message_belongs_to_conversation(
|
||||
&self,
|
||||
message_id: Uuid,
|
||||
conversation_id: Uuid,
|
||||
) -> Result<bool, sqlx::Error> {
|
||||
let belongs: bool = sqlx::query_scalar(
|
||||
"SELECT EXISTS(
|
||||
SELECT 1 FROM messages
|
||||
WHERE id = $1 AND conversation_id = $2
|
||||
)",
|
||||
)
|
||||
.bind(message_id)
|
||||
.bind(conversation_id)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
if !belongs {
|
||||
warn!(
|
||||
message_id = %message_id,
|
||||
conversation_id = %conversation_id,
|
||||
"Message does not belong to conversation"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(belongs)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use sqlx::PgPool;
|
||||
|
||||
/// Setup une base de données de test
|
||||
async fn setup_test_db() -> PgPool {
|
||||
let database_url =
|
||||
std::env::var("DATABASE_URL").expect("DATABASE_URL must be set for tests");
|
||||
|
||||
sqlx::PgPool::connect(&database_url)
|
||||
.await
|
||||
.expect("Failed to connect to test database")
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Nécessite une base de données de test
|
||||
async fn test_mark_delivered_creates_status() {
|
||||
let pool = setup_test_db().await;
|
||||
let manager = DeliveredStatusManager::new(pool);
|
||||
|
||||
// Créer des UUIDs de test
|
||||
let user_id = Uuid::new_v4();
|
||||
let message_id = Uuid::new_v4();
|
||||
let conversation_id = Uuid::new_v4();
|
||||
|
||||
// Marquer comme délivré
|
||||
let status = manager
|
||||
.mark_delivered(user_id, message_id, conversation_id)
|
||||
.await
|
||||
.expect("Should mark message as delivered");
|
||||
|
||||
assert_eq!(status.message_id, message_id);
|
||||
assert_eq!(status.user_id, user_id);
|
||||
assert_eq!(status.conversation_id, conversation_id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Nécessite une base de données de test
|
||||
async fn test_mark_delivered_updates_existing() {
|
||||
let pool = setup_test_db().await;
|
||||
let manager = DeliveredStatusManager::new(pool);
|
||||
|
||||
let user_id = Uuid::new_v4();
|
||||
let message_id = Uuid::new_v4();
|
||||
let conversation_id = Uuid::new_v4();
|
||||
|
||||
// Première délivrance
|
||||
let status1 = manager
|
||||
.mark_delivered(user_id, message_id, conversation_id)
|
||||
.await
|
||||
.expect("Should mark message as delivered");
|
||||
|
||||
// Attendre un peu pour que le timestamp change
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
|
||||
|
||||
// Deuxième délivrance (devrait mettre à jour)
|
||||
let status2 = manager
|
||||
.mark_delivered(user_id, message_id, conversation_id)
|
||||
.await
|
||||
.expect("Should update existing status");
|
||||
|
||||
// Le delivered_at devrait être mis à jour
|
||||
assert!(status2.delivered_at >= status1.delivered_at);
|
||||
assert_eq!(status1.id, status2.id); // Même ID
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Nécessite une base de données de test
|
||||
async fn test_get_delivered_for_message() {
|
||||
let pool = setup_test_db().await;
|
||||
let manager = DeliveredStatusManager::new(pool);
|
||||
|
||||
let message_id = Uuid::new_v4();
|
||||
let conversation_id = Uuid::new_v4();
|
||||
let user1 = Uuid::new_v4();
|
||||
let user2 = Uuid::new_v4();
|
||||
|
||||
// Marquer comme délivré par deux utilisateurs
|
||||
manager
|
||||
.mark_delivered(user1, message_id, conversation_id)
|
||||
.await
|
||||
.expect("Should mark as delivered");
|
||||
manager
|
||||
.mark_delivered(user2, message_id, conversation_id)
|
||||
.await
|
||||
.expect("Should mark as delivered");
|
||||
|
||||
// Récupérer tous les delivered status
|
||||
let statuses = manager
|
||||
.get_delivered_for_message(message_id)
|
||||
.await
|
||||
.expect("Should get statuses");
|
||||
|
||||
assert_eq!(statuses.len(), 2);
|
||||
assert!(statuses.iter().any(|s| s.user_id == user1));
|
||||
assert!(statuses.iter().any(|s| s.user_id == user2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Nécessite une base de données de test
|
||||
async fn test_is_delivered() {
|
||||
let pool = setup_test_db().await;
|
||||
let manager = DeliveredStatusManager::new(pool);
|
||||
|
||||
let user_id = Uuid::new_v4();
|
||||
let message_id = Uuid::new_v4();
|
||||
let conversation_id = Uuid::new_v4();
|
||||
|
||||
// Avant le marquage
|
||||
let is_delivered_before = manager
|
||||
.is_delivered(message_id, user_id)
|
||||
.await
|
||||
.expect("Should check status");
|
||||
assert!(!is_delivered_before);
|
||||
|
||||
// Après le marquage
|
||||
manager
|
||||
.mark_delivered(user_id, message_id, conversation_id)
|
||||
.await
|
||||
.expect("Should mark as delivered");
|
||||
|
||||
let is_delivered_after = manager
|
||||
.is_delivered(message_id, user_id)
|
||||
.await
|
||||
.expect("Should check status");
|
||||
assert!(is_delivered_after);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,148 +0,0 @@
|
|||
//! Module pour la gestion des variables d'environnement requises
|
||||
//!
|
||||
//! Ce module fournit des fonctions helper pour récupérer des variables d'environnement
|
||||
//! avec validation stricte. L'application refuse de démarrer si les secrets requis
|
||||
//! ne sont pas définis.
|
||||
|
||||
use std::env;
|
||||
|
||||
/// Récupère une variable d'environnement requise.
|
||||
///
|
||||
/// Panic si la variable n'est pas définie ou est vide.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - Le nom de la variable d'environnement
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panic avec un message d'erreur clair si la variable n'est pas définie.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust,should_panic
|
||||
/// # use chat_server::env::require_env;
|
||||
/// // Panic si JWT_SECRET n'est pas défini
|
||||
/// let secret = require_env("JWT_SECRET");
|
||||
/// ```
|
||||
pub fn require_env(key: &str) -> String {
|
||||
env::var(key).unwrap_or_else(|_| {
|
||||
panic!(
|
||||
"FATAL: Required environment variable {} is not set. \
|
||||
Application cannot start without this configuration.",
|
||||
key
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Récupère une variable d'environnement requise avec validation de longueur minimale.
|
||||
///
|
||||
/// Utile pour les secrets qui doivent avoir une certaine complexité.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - Le nom de la variable d'environnement
|
||||
/// * `min_length` - Longueur minimale requise
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panic si la variable n'est pas définie ou si sa longueur est inférieure à `min_length`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust,should_panic
|
||||
/// # use chat_server::env::require_env_min_length;
|
||||
/// // Panic si JWT_SECRET n'est pas défini ou fait moins de 32 caractères
|
||||
/// let secret = require_env_min_length("JWT_SECRET", 32);
|
||||
/// ```
|
||||
pub fn require_env_min_length(key: &str, min_length: usize) -> String {
|
||||
let value = require_env(key);
|
||||
if value.len() < min_length {
|
||||
panic!(
|
||||
"FATAL: Environment variable {} must be at least {} characters long (got {})",
|
||||
key,
|
||||
min_length,
|
||||
value.len()
|
||||
)
|
||||
}
|
||||
value
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::panic;
|
||||
|
||||
#[test]
|
||||
fn test_require_env_panics_on_missing() {
|
||||
let key = "TEST_NONEXISTENT_VAR_12345";
|
||||
env::remove_var(key);
|
||||
|
||||
let result = panic::catch_unwind(|| require_env(key));
|
||||
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"require_env should panic on missing variable"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_require_env_returns_value_when_set() {
|
||||
let key = "TEST_EXISTING_VAR";
|
||||
let value = "test_value_123";
|
||||
env::set_var(key, value);
|
||||
|
||||
let result = require_env(key);
|
||||
assert_eq!(result, value);
|
||||
|
||||
env::remove_var(key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_require_env_min_length_panics_on_short() {
|
||||
let key = "TEST_SHORT_SECRET";
|
||||
env::set_var(key, "short");
|
||||
|
||||
let result = panic::catch_unwind(|| require_env_min_length(key, 32));
|
||||
|
||||
env::remove_var(key);
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"require_env_min_length should panic on short value"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_require_env_min_length_returns_value_when_valid() {
|
||||
let key = "TEST_LONG_SECRET";
|
||||
let value = "this_is_a_long_secret_key_that_meets_the_minimum_length_requirement";
|
||||
env::set_var(key, value);
|
||||
|
||||
let result = require_env_min_length(key, 32);
|
||||
assert_eq!(result, value);
|
||||
|
||||
env::remove_var(key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_require_env_min_length_exact_boundary() {
|
||||
let key = "TEST_EXACT_32";
|
||||
let value = "12345678901234567890123456789012";
|
||||
env::set_var(key, value);
|
||||
|
||||
let result = require_env_min_length(key, 32);
|
||||
assert_eq!(result.len(), 32);
|
||||
|
||||
env::remove_var(key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_require_env_min_length_panics_on_empty() {
|
||||
let key = "TEST_EMPTY_VAR";
|
||||
env::set_var(key, "");
|
||||
|
||||
let result = panic::catch_unwind(|| require_env_min_length(key, 32));
|
||||
env::remove_var(key);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
|
@ -1,744 +0,0 @@
|
|||
//! # Gestion d'erreurs unifiée pour Veza Chat Server
|
||||
//!
|
||||
//! Ce module fournit un système d'erreurs cohérent et complet avec:
|
||||
//! - Catégorisation des erreurs par domaine
|
||||
//! - Codes d'erreur standardisés
|
||||
//! - Logging automatique selon la gravité
|
||||
//! - Sérialisation pour l'API
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
use thiserror::Error;
|
||||
|
||||
/// Type alias pour Result avec notre erreur personnalisée
|
||||
pub type Result<T> = std::result::Result<T, ChatError>;
|
||||
|
||||
/// Erreurs principales du système de chat
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ChatError {
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS D'AUTHENTIFICATION ET AUTORISATION
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Token JWT invalide ou expiré
|
||||
#[error("Token d'authentification invalide: {reason}")]
|
||||
InvalidToken { reason: String },
|
||||
|
||||
/// Utilisateur non autorisé pour cette action
|
||||
#[error("Accès refusé: {action}")]
|
||||
Unauthorized { action: String },
|
||||
|
||||
/// Utilisateur banni ou suspendu
|
||||
#[error("Compte suspendu: {reason}")]
|
||||
AccountSuspended { reason: String },
|
||||
|
||||
/// Tentative de connexion avec des identifiants invalides
|
||||
#[error("Identifiants invalides")]
|
||||
InvalidCredentials,
|
||||
|
||||
/// Authentification à deux facteurs requise
|
||||
#[error("Authentification 2FA requise")]
|
||||
TwoFactorRequired,
|
||||
|
||||
/// Code 2FA invalide
|
||||
#[error("Code d'authentification 2FA invalide")]
|
||||
InvalidTwoFactorCode,
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS DE VALIDATION ET CONTENU
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Contenu de message trop long
|
||||
#[error("Message trop long: {actual} caractères (max: {max})")]
|
||||
MessageTooLong { actual: usize, max: usize },
|
||||
|
||||
/// Contenu inapproprié détecté
|
||||
#[error("Contenu inapproprié détecté: {reason}")]
|
||||
InappropriateContent { reason: String },
|
||||
|
||||
/// Spam détecté par les filtres
|
||||
#[error("Contenu identifié comme spam")]
|
||||
SpamDetected,
|
||||
|
||||
/// Format de données invalide
|
||||
#[error("Format invalide pour {field}: {reason}")]
|
||||
InvalidFormat { field: String, reason: String },
|
||||
|
||||
/// Paramètre requis manquant
|
||||
#[error("Paramètre requis manquant: {param}")]
|
||||
MissingParameter { param: String },
|
||||
|
||||
/// Valeur hors limites acceptables
|
||||
#[error("{field} hors limites: {value} (min: {min}, max: {max})")]
|
||||
OutOfRange {
|
||||
field: String,
|
||||
value: i64,
|
||||
min: i64,
|
||||
max: i64,
|
||||
},
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS DE RATE LIMITING ET QUOTA
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Limite de taux dépassée
|
||||
#[error("Limite de taux dépassée pour {action}: {current}/{limit} dans {window}s")]
|
||||
RateLimitExceeded {
|
||||
action: String,
|
||||
current: u32,
|
||||
limit: u32,
|
||||
window: u64,
|
||||
},
|
||||
|
||||
/// Quota utilisateur dépassé
|
||||
#[error("Quota {quota_type} dépassé: {used}/{limit}")]
|
||||
QuotaExceeded {
|
||||
quota_type: String,
|
||||
used: u64,
|
||||
limit: u64,
|
||||
},
|
||||
|
||||
/// Trop de connexions simultanées
|
||||
#[error("Trop de connexions simultanées: {current}/{max}")]
|
||||
TooManyConnections { current: u32, max: u32 },
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS RÉSEAU ET WEBSOCKET
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Erreur de connexion WebSocket
|
||||
#[error("Erreur WebSocket: {source}")]
|
||||
WebSocket {
|
||||
#[source]
|
||||
source: tokio_tungstenite::tungstenite::Error,
|
||||
},
|
||||
|
||||
/// Connexion fermée de manière inattendue
|
||||
#[error("Connexion fermée: {reason}")]
|
||||
ConnectionClosed { reason: String },
|
||||
|
||||
/// Timeout de connexion
|
||||
#[error("Timeout de connexion après {seconds}s")]
|
||||
ConnectionTimeout { seconds: u64 },
|
||||
|
||||
/// Erreur réseau générale
|
||||
#[error("Erreur réseau: {message}")]
|
||||
NetworkError { message: String },
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS DE BASE DE DONNÉES
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Erreur de base de données
|
||||
#[error("Erreur base de données: {operation}")]
|
||||
Database {
|
||||
operation: String,
|
||||
#[source]
|
||||
source: sqlx::Error,
|
||||
},
|
||||
|
||||
/// Ressource non trouvée
|
||||
#[error("{resource} non trouvé(e): {id}")]
|
||||
NotFound { resource: String, id: String },
|
||||
|
||||
/// Conflit de données (ex: violation de contrainte unique)
|
||||
#[error("Conflit de données: {reason}")]
|
||||
Conflict { reason: String },
|
||||
|
||||
/// Transaction échouée
|
||||
#[error("Transaction échouée: {reason}")]
|
||||
TransactionFailed { reason: String },
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS DE CONVERSATIONS ET MESSAGES
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Conversation inexistante
|
||||
#[error("Conversation {id} inexistante")]
|
||||
ConversationNotFound { id: String },
|
||||
|
||||
/// Utilisateur pas membre de la conversation
|
||||
#[error("Utilisateur non membre de la conversation {conversation_id}")]
|
||||
NotMember { conversation_id: String },
|
||||
|
||||
/// Permissions insuffisantes dans la conversation
|
||||
#[error("Permissions insuffisantes pour {action} dans {conversation_id}")]
|
||||
InsufficientPermissions {
|
||||
action: String,
|
||||
conversation_id: String,
|
||||
},
|
||||
|
||||
/// Conversation archivée
|
||||
#[error("Conversation {id} archivée")]
|
||||
ConversationArchived { id: String },
|
||||
|
||||
/// Message non trouvé
|
||||
#[error("Message {id} non trouvé")]
|
||||
MessageNotFound { id: String },
|
||||
|
||||
/// Impossible d'éditer le message
|
||||
#[error("Edition impossible: {reason}")]
|
||||
EditForbidden { reason: String },
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS DE FICHIERS ET UPLOAD
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Fichier trop volumineux
|
||||
#[error("Fichier trop volumineux: {size} bytes (max: {max_size})")]
|
||||
FileTooLarge { size: u64, max_size: u64 },
|
||||
|
||||
/// Type de fichier non autorisé
|
||||
#[error("Type de fichier non autorisé: {mime_type}")]
|
||||
UnsupportedFileType { mime_type: String },
|
||||
|
||||
/// Fichier infecté détecté
|
||||
#[error("Fichier potentiellement dangereux détecté")]
|
||||
MaliciousFile,
|
||||
|
||||
/// Erreur d'upload
|
||||
#[error("Erreur upload: {reason}")]
|
||||
UploadError { reason: String },
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS SYSTÈME ET CONFIGURATION
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Erreur de configuration
|
||||
#[error("Erreur configuration: {message}")]
|
||||
Configuration { message: String },
|
||||
|
||||
/// Service indisponible
|
||||
#[error("Service {service} indisponible: {reason}")]
|
||||
ServiceUnavailable { service: String, reason: String },
|
||||
|
||||
/// Erreur de cache
|
||||
#[error("Erreur cache: {operation}")]
|
||||
Cache { operation: String },
|
||||
|
||||
/// Timeout d'arrêt du serveur
|
||||
#[error("Timeout lors de l'arrêt du serveur")]
|
||||
ShutdownTimeout,
|
||||
|
||||
/// Erreur interne non spécifiée
|
||||
#[error("Erreur interne: {message}")]
|
||||
Internal { message: String },
|
||||
|
||||
/// EventBus RabbitMQ indisponible
|
||||
#[error("EventBus indisponible: {source}")]
|
||||
EventBusUnavailable {
|
||||
#[from]
|
||||
source: crate::event_bus::EventBusUnavailableError,
|
||||
},
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS DE PERMISSIONS ET RÉACTIONS
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Permission refusée
|
||||
#[error("Permission refusée: {message}")]
|
||||
PermissionDenied { message: String },
|
||||
|
||||
/// Réaction déjà existante
|
||||
#[error("Réaction déjà existante pour ce message")]
|
||||
ReactionAlreadyExists,
|
||||
|
||||
/// Réaction non trouvée
|
||||
#[error("Réaction non trouvée")]
|
||||
ReactionNotFound,
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// ERREURS DE SÉCURITÉ
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
/// Activité suspecte détectée
|
||||
#[error("Activité suspecte détectée: {reason}")]
|
||||
SuspiciousActivity { reason: String },
|
||||
|
||||
/// IP bloquée
|
||||
#[error("Adresse IP {ip} bloquée: {reason}")]
|
||||
IpBlocked { ip: String, reason: String },
|
||||
|
||||
/// Tentative d'injection détectée
|
||||
#[error("Tentative d'injection détectée")]
|
||||
InjectionAttempt,
|
||||
|
||||
/// Validation de sécurité échouée
|
||||
#[error("Validation sécurité échouée: {check}")]
|
||||
SecurityValidationFailed { check: String },
|
||||
|
||||
/// Erreur de sérialisation JSON
|
||||
#[error("Erreur JSON: {source}")]
|
||||
Json {
|
||||
#[source]
|
||||
source: serde_json::Error,
|
||||
},
|
||||
|
||||
/// Erreur de sérialisation générale
|
||||
#[error("Erreur de sérialisation {operation}: {message}")]
|
||||
Serialization { operation: String, message: String },
|
||||
|
||||
/// Fonctionnalité non disponible
|
||||
#[error("Fonctionnalité {feature} non disponible: {reason}")]
|
||||
FeatureNotAvailable { feature: String, reason: String },
|
||||
|
||||
/// Erreur de validation
|
||||
#[error("Erreur de validation pour {field}: {reason}")]
|
||||
ValidationError { field: String, reason: String },
|
||||
|
||||
/// Erreur de parsing
|
||||
#[error("Erreur de parsing: {reason}")]
|
||||
ParseError { reason: String },
|
||||
|
||||
/// Limite de connexions atteinte
|
||||
#[error("Limite de connexions simultanées atteinte")]
|
||||
ConnectionLimitReached,
|
||||
}
|
||||
|
||||
impl ChatError {
|
||||
/// Retourne le code d'erreur HTTP approprié
|
||||
pub fn http_status(&self) -> u16 {
|
||||
match self {
|
||||
// 400 Bad Request
|
||||
Self::InvalidFormat { .. }
|
||||
| Self::MissingParameter { .. }
|
||||
| Self::OutOfRange { .. }
|
||||
| Self::MessageTooLong { .. }
|
||||
| Self::FileTooLarge { .. }
|
||||
| Self::UnsupportedFileType { .. } => 400,
|
||||
|
||||
// 401 Unauthorized
|
||||
Self::InvalidToken { .. }
|
||||
| Self::InvalidCredentials
|
||||
| Self::TwoFactorRequired
|
||||
| Self::InvalidTwoFactorCode => 401,
|
||||
|
||||
// 403 Forbidden
|
||||
Self::Unauthorized { .. }
|
||||
| Self::AccountSuspended { .. }
|
||||
| Self::InsufficientPermissions { .. }
|
||||
| Self::EditForbidden { .. }
|
||||
| Self::IpBlocked { .. } => 403,
|
||||
|
||||
// 404 Not Found
|
||||
Self::NotFound { .. }
|
||||
| Self::ConversationNotFound { .. }
|
||||
| Self::MessageNotFound { .. } => 404,
|
||||
|
||||
// 409 Conflict
|
||||
Self::Conflict { .. } => 409,
|
||||
|
||||
// 413 Payload Too Large
|
||||
|
||||
// 422 Unprocessable Entity
|
||||
Self::InappropriateContent { .. } | Self::SpamDetected | Self::MaliciousFile => 422,
|
||||
|
||||
// 429 Too Many Requests
|
||||
Self::RateLimitExceeded { .. }
|
||||
| Self::QuotaExceeded { .. }
|
||||
| Self::TooManyConnections { .. } => 429,
|
||||
|
||||
// 500 Internal Server Error
|
||||
Self::Database { .. }
|
||||
| Self::Internal { .. }
|
||||
| Self::Configuration { .. }
|
||||
| Self::TransactionFailed { .. }
|
||||
| Self::UploadError { .. }
|
||||
| Self::Cache { .. } => 500,
|
||||
|
||||
// 503 Service Unavailable
|
||||
Self::ServiceUnavailable { .. }
|
||||
| Self::ShutdownTimeout
|
||||
| Self::EventBusUnavailable { .. } => 503, // Added EventBusUnavailable
|
||||
|
||||
// 418 I'm a teapot (pour les tentatives d'injection)
|
||||
Self::InjectionAttempt => 418,
|
||||
|
||||
// Autres erreurs -> 500
|
||||
Self::Json { .. }
|
||||
| Self::Serialization { .. }
|
||||
| Self::FeatureNotAvailable { .. }
|
||||
| Self::ConnectionLimitReached
|
||||
| Self::SecurityValidationFailed { .. }
|
||||
| Self::SuspiciousActivity { .. }
|
||||
| Self::ConversationArchived { .. }
|
||||
| Self::NetworkError { .. }
|
||||
| Self::ConnectionClosed { .. }
|
||||
| Self::ConnectionTimeout { .. }
|
||||
| Self::WebSocket { .. }
|
||||
| Self::NotMember { .. } => 500,
|
||||
|
||||
// Nouvelles erreurs
|
||||
Self::PermissionDenied { .. } => 403,
|
||||
Self::ReactionAlreadyExists => 409,
|
||||
Self::ReactionNotFound => 404,
|
||||
Self::ValidationError { .. } => 400,
|
||||
Self::ParseError { .. } => 400,
|
||||
}
|
||||
}
|
||||
|
||||
/// Retourne la sévérité de l'erreur pour les logs
|
||||
pub fn severity(&self) -> ErrorSeverity {
|
||||
match self {
|
||||
// CRITICAL - Erreur système critique
|
||||
Self::Database { .. }
|
||||
| Self::ServiceUnavailable { .. }
|
||||
| Self::ShutdownTimeout
|
||||
| Self::SuspiciousActivity { .. }
|
||||
| Self::InjectionAttempt
|
||||
| Self::IpBlocked { .. }
|
||||
| Self::EventBusUnavailable { .. } => ErrorSeverity::High, // Added EventBusUnavailable
|
||||
// HIGH - Problème sérieux à traiter rapidement
|
||||
Self::InvalidToken { .. }
|
||||
| Self::AccountSuspended { .. }
|
||||
| Self::InvalidFormat { .. }
|
||||
| Self::MissingParameter { .. }
|
||||
| Self::OutOfRange { .. }
|
||||
| Self::MessageTooLong { .. }
|
||||
| Self::FileTooLarge { .. }
|
||||
| Self::UnsupportedFileType { .. }
|
||||
| Self::TransactionFailed { .. }
|
||||
| Self::UploadError { .. }
|
||||
| Self::InvalidCredentials
|
||||
| Self::InvalidTwoFactorCode
|
||||
| Self::InappropriateContent { .. }
|
||||
| Self::SpamDetected
|
||||
| Self::MaliciousFile
|
||||
| Self::ConversationNotFound { .. }
|
||||
| Self::InsufficientPermissions { .. }
|
||||
| Self::MessageNotFound { .. }
|
||||
| Self::EditForbidden { .. }
|
||||
| Self::Conflict { .. }
|
||||
| Self::ConnectionLimitReached
|
||||
| Self::SecurityValidationFailed { .. } => ErrorSeverity::Medium,
|
||||
|
||||
// Gravité moyenne - Erreurs qui affectent l'utilisateur
|
||||
Self::RateLimitExceeded { .. }
|
||||
| Self::QuotaExceeded { .. }
|
||||
| Self::TooManyConnections { .. }
|
||||
| Self::Unauthorized { .. }
|
||||
| Self::NotFound { .. } => ErrorSeverity::Low,
|
||||
|
||||
// INFO - Information
|
||||
Self::ConnectionClosed { .. }
|
||||
| Self::TwoFactorRequired
|
||||
| Self::NotMember { .. }
|
||||
| Self::Json { .. }
|
||||
| Self::Serialization { .. }
|
||||
| Self::FeatureNotAvailable { .. }
|
||||
| Self::ConversationArchived { .. }
|
||||
| Self::WebSocket { .. }
|
||||
| Self::NetworkError { .. }
|
||||
| Self::ConnectionTimeout { .. }
|
||||
| Self::Cache { .. }
|
||||
| Self::Internal { .. }
|
||||
| Self::Configuration { .. } => ErrorSeverity::Info,
|
||||
|
||||
// Nouvelles erreurs
|
||||
Self::PermissionDenied { .. } => ErrorSeverity::Warning,
|
||||
Self::ReactionAlreadyExists => ErrorSeverity::Info,
|
||||
Self::ReactionNotFound => ErrorSeverity::Info,
|
||||
Self::ValidationError { .. } => ErrorSeverity::Low,
|
||||
Self::ParseError { .. } => ErrorSeverity::Low,
|
||||
}
|
||||
}
|
||||
|
||||
/// Retourne un message d'erreur sécurisé pour le client
|
||||
pub fn public_message(&self) -> String {
|
||||
match self {
|
||||
// Messages détaillés OK pour le client
|
||||
Self::InvalidFormat { field, .. } => format!("Format invalide pour {}", field),
|
||||
Self::MissingParameter { param } => format!("Paramètre manquant: {}", param),
|
||||
Self::MessageTooLong { max, .. } => {
|
||||
format!("Message trop long (max: {} caractères)", max)
|
||||
}
|
||||
Self::RateLimitExceeded { action, window, .. } => {
|
||||
format!(
|
||||
"Trop de requêtes pour {}, veuillez patienter {}s",
|
||||
action, window
|
||||
)
|
||||
}
|
||||
|
||||
// Messages génériques pour éviter la divulgation d'informations
|
||||
Self::Database { .. } => "Erreur temporaire, veuillez réessayer".to_string(),
|
||||
Self::Internal { .. } => "Erreur interne du serveur".to_string(),
|
||||
Self::Configuration { .. } => "Service temporairement indisponible".to_string(),
|
||||
Self::InjectionAttempt => "Requête rejetée".to_string(),
|
||||
Self::SuspiciousActivity { .. } => "Activité inhabituelle détectée".to_string(),
|
||||
|
||||
// Message par défaut
|
||||
_ => self.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Crée une erreur de base de données avec contexte
|
||||
pub fn database_error(operation: &str, source: sqlx::Error) -> Self {
|
||||
Self::Database {
|
||||
operation: operation.to_string(),
|
||||
source,
|
||||
}
|
||||
}
|
||||
|
||||
/// Crée une erreur d'autorisation avec contexte
|
||||
pub fn unauthorized(action: &str) -> Self {
|
||||
Self::Unauthorized {
|
||||
action: action.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Crée une erreur de ressource non trouvée
|
||||
pub fn not_found(resource: &str, id: &str) -> Self {
|
||||
Self::NotFound {
|
||||
resource: resource.to_string(),
|
||||
id: id.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs de configuration
|
||||
pub fn configuration_error(message: &str) -> Self {
|
||||
Self::Configuration {
|
||||
message: message.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs de message trop long
|
||||
pub fn message_too_long(actual: usize, max: usize) -> Self {
|
||||
Self::MessageTooLong { actual, max }
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs de sérialisation
|
||||
pub fn serialization_error(type_name: &str, _data: &str, source: serde_json::Error) -> Self {
|
||||
Self::Serialization {
|
||||
operation: format!("serialize {}", type_name),
|
||||
message: source.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs WebSocket
|
||||
pub fn websocket_error(
|
||||
_operation: &str,
|
||||
source: tokio_tungstenite::tungstenite::Error,
|
||||
) -> Self {
|
||||
Self::WebSocket { source }
|
||||
}
|
||||
|
||||
/// Helper pour les fonctionnalités non disponibles
|
||||
pub fn feature_not_available(feature: &str, reason: &str) -> Self {
|
||||
Self::FeatureNotAvailable {
|
||||
feature: feature.to_string(),
|
||||
reason: reason.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour convertir sqlx::Error avec une meilleure gestion
|
||||
pub fn from_sqlx_error(operation: &str, error: sqlx::Error) -> Self {
|
||||
Self::Database {
|
||||
operation: operation.to_string(),
|
||||
source: error,
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs JSON
|
||||
pub fn from_json_error(error: serde_json::Error) -> Self {
|
||||
Self::Json { source: error }
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs de rate limiting avec des valeurs par défaut
|
||||
pub fn rate_limit_exceeded_simple(action: &str) -> Self {
|
||||
Self::RateLimitExceeded {
|
||||
action: action.to_string(),
|
||||
current: 0,
|
||||
limit: 0,
|
||||
window: 60,
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs d'autorisation
|
||||
pub fn unauthorized_simple(action: &str) -> Self {
|
||||
Self::Unauthorized {
|
||||
action: action.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs de contenu inapproprié
|
||||
pub fn inappropriate_content_simple(reason: &str) -> Self {
|
||||
Self::InappropriateContent {
|
||||
reason: reason.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs de validation
|
||||
pub fn validation_error(reason: &str) -> Self {
|
||||
Self::ValidationError {
|
||||
field: "general".to_string(),
|
||||
reason: reason.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs de permission
|
||||
pub fn permission_denied(message: &str) -> Self {
|
||||
Self::PermissionDenied {
|
||||
message: message.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs internes
|
||||
pub fn internal_error(message: String) -> Self {
|
||||
// Changed to String
|
||||
Self::Internal {
|
||||
message, // Direct assignment
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper pour les erreurs not found avec un seul paramètre
|
||||
pub fn not_found_simple(message: &str) -> Self {
|
||||
Self::NotFound {
|
||||
resource: "resource".to_string(),
|
||||
id: message.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Niveaux de sévérité des erreurs
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum ErrorSeverity {
|
||||
Info,
|
||||
Low,
|
||||
Medium,
|
||||
High,
|
||||
Critical,
|
||||
Warning,
|
||||
}
|
||||
|
||||
impl fmt::Display for ErrorSeverity {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Info => write!(f, "INFO"),
|
||||
Self::Low => write!(f, "LOW"),
|
||||
Self::Medium => write!(f, "MEDIUM"),
|
||||
Self::High => write!(f, "HIGH"),
|
||||
Self::Critical => write!(f, "CRITICAL"),
|
||||
Self::Warning => write!(f, "WARNING"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implémentations de conversion depuis des erreurs externes
|
||||
impl From<sqlx::Error> for ChatError {
|
||||
fn from(err: sqlx::Error) -> Self {
|
||||
Self::database_error("query", err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tokio_tungstenite::tungstenite::Error> for ChatError {
|
||||
fn from(err: tokio_tungstenite::tungstenite::Error) -> Self {
|
||||
Self::WebSocket { source: err }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for ChatError {
|
||||
fn from(err: serde_json::Error) -> Self {
|
||||
Self::InvalidFormat {
|
||||
field: "json".to_string(),
|
||||
reason: err.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::env::VarError> for ChatError {
|
||||
fn from(err: std::env::VarError) -> Self {
|
||||
Self::Configuration {
|
||||
message: format!("Variable d'environnement manquante: {}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Macro pour simplifier la création d'erreurs
|
||||
#[macro_export]
|
||||
macro_rules! chat_error {
|
||||
($variant:ident, $($field:ident = $value:expr),*) => {
|
||||
$crate::error::ChatError::$variant {
|
||||
$($field: $value.into()),*
|
||||
}
|
||||
};
|
||||
($variant:ident) => {
|
||||
$crate::error::ChatError::$variant
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_error_http_status() {
|
||||
assert_eq!(ChatError::InvalidCredentials.http_status(), 401);
|
||||
assert_eq!(ChatError::not_found("user", "123").http_status(), 404);
|
||||
assert_eq!(ChatError::unauthorized("send_message").http_status(), 403);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_severity() {
|
||||
assert_eq!(ChatError::InjectionAttempt.severity(), ErrorSeverity::High);
|
||||
assert_eq!(
|
||||
ChatError::InvalidCredentials.severity(),
|
||||
ErrorSeverity::Medium
|
||||
);
|
||||
assert_eq!(ChatError::SpamDetected.severity(), ErrorSeverity::Medium);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_public_message() {
|
||||
let error = ChatError::InvalidFormat {
|
||||
field: "email".to_string(),
|
||||
reason: "invalid format".to_string(),
|
||||
};
|
||||
assert_eq!(error.public_message(), "Format invalide pour email");
|
||||
|
||||
let db_error = ChatError::database_error("insert", sqlx::Error::RowNotFound);
|
||||
assert_eq!(
|
||||
db_error.public_message(),
|
||||
"Erreur temporaire, veuillez réessayer"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_creation_helpers() {
|
||||
let error = ChatError::not_found("conversation", "room_123");
|
||||
match error {
|
||||
ChatError::NotFound { resource, id } => {
|
||||
assert_eq!(resource, "conversation");
|
||||
assert_eq!(id, "room_123");
|
||||
}
|
||||
_ => panic!("Wrong error type"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_macro() {
|
||||
let error = chat_error!(MessageTooLong, actual = 5000_usize, max = 4000_usize);
|
||||
match error {
|
||||
ChatError::MessageTooLong { actual, max } => {
|
||||
assert_eq!(actual, 5000);
|
||||
assert_eq!(max, 4000);
|
||||
}
|
||||
_ => panic!("Wrong error type"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_severity_display() {
|
||||
assert_eq!(format!("{}", ErrorSeverity::Info), "INFO");
|
||||
assert_eq!(format!("{}", ErrorSeverity::High), "HIGH");
|
||||
assert_eq!(format!("{}", ErrorSeverity::Critical), "CRITICAL");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_severity_equality() {
|
||||
assert_eq!(ErrorSeverity::Low, ErrorSeverity::Low);
|
||||
assert_ne!(ErrorSeverity::Info, ErrorSeverity::Warning);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_too_long_helper() {
|
||||
let err = ChatError::message_too_long(500, 400);
|
||||
assert_eq!(err.http_status(), 400);
|
||||
assert!(err.public_message().contains("400"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validation_error_helper() {
|
||||
let err = ChatError::validation_error("invalid input");
|
||||
assert_eq!(err.http_status(), 400);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue