Triple cleanup, landed together because they share the same cleanup branch intent and touch non-overlapping trees. 1. 38× tracked .playwright-mcp/*.yml stage-deleted MCP session recordings that had been inadvertently committed. .gitignore already covers .playwright-mcp/ (post-audit J2 block added ind12b901de). Working tree copies removed separately. 2. 19× disabled CI workflows moved to docs/archive/workflows/ Legacy .yml.disabled files in .github/workflows/ were 1676 LOC of dead config (backend-ci, cd, staging-validation, accessibility, chromatic, visual-regression, storybook-audit, contract-testing, zap-dast, container-scan, semgrep, sast, mutation-testing, rust-mutation, load-test-nightly, flaky-report, openapi-lint, commitlint, performance). Preserved in docs/archive/workflows/ for historical reference; `.github/workflows/` now only lists the 5 actually-running pipelines. 3. Orphan code removed (0 consumers confirmed via grep) - veza-backend-api/internal/repository/user_repository.go In-memory UserRepository mock, never imported anywhere. - proto/chat/chat.proto Chat server Rust deleted 2026-02-22 (commit279a10d31); proto file was orphan spec. Chat lives 100% in Go backend now. - veza-common/src/types/chat.rs (Conversation, Message, MessageType, Attachment, Reaction) - veza-common/src/types/websocket.rs (WebSocketMessage, PresenceStatus, CallType — depended on chat::MessageType) - veza-common/src/types/mod.rs updated: removed `pub mod chat;`, `pub mod websocket;`, and their re-exports. Only `veza_common::logging` is consumed by veza-stream-server (verified with `grep -r "veza_common::"`). `cargo check` on veza-common passes post-removal. Refs: AUDIT_REPORT.md §8.2 "Code mort / orphelin" + §9.1. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
50 lines
1.6 KiB
JavaScript
50 lines
1.6 KiB
JavaScript
/**
|
|
* Compare k6 summary JSON against baseline thresholds.
|
|
* Usage: node compare.mjs <summary.json>
|
|
* Exit 1 if p95 latency degrades by > 20% from baseline.
|
|
*/
|
|
import { readFileSync } from 'fs';
|
|
|
|
const baselineThresholds = {
|
|
http_req_duration_p95: 500, // ms
|
|
http_req_duration_p99: 1000, // ms
|
|
http_req_failed_rate: 0.01, // 1%
|
|
};
|
|
|
|
const summaryPath = process.argv[2];
|
|
if (!summaryPath) {
|
|
console.error('Usage: node compare.mjs <k6-summary.json>');
|
|
process.exit(1);
|
|
}
|
|
|
|
try {
|
|
const summary = JSON.parse(readFileSync(summaryPath, 'utf8'));
|
|
const metrics = summary.metrics || {};
|
|
let failed = false;
|
|
|
|
const p95 = metrics.http_req_duration?.values?.['p(95)'];
|
|
if (p95 && p95 > baselineThresholds.http_req_duration_p95) {
|
|
console.error(`FAIL: p95 latency ${p95.toFixed(0)}ms > baseline ${baselineThresholds.http_req_duration_p95}ms`);
|
|
failed = true;
|
|
}
|
|
|
|
const p99 = metrics.http_req_duration?.values?.['p(99)'];
|
|
if (p99 && p99 > baselineThresholds.http_req_duration_p99) {
|
|
console.error(`FAIL: p99 latency ${p99.toFixed(0)}ms > baseline ${baselineThresholds.http_req_duration_p99}ms`);
|
|
failed = true;
|
|
}
|
|
|
|
const failRate = metrics.http_req_failed?.values?.rate;
|
|
if (failRate && failRate > baselineThresholds.http_req_failed_rate) {
|
|
console.error(`FAIL: error rate ${(failRate * 100).toFixed(2)}% > baseline ${baselineThresholds.http_req_failed_rate * 100}%`);
|
|
failed = true;
|
|
}
|
|
|
|
if (failed) {
|
|
process.exit(1);
|
|
}
|
|
console.log('PASS: All performance metrics within baseline thresholds');
|
|
} catch (e) {
|
|
console.error('Failed to parse summary:', e.message);
|
|
process.exit(1);
|
|
}
|