$ -weight: 500;">brew tap mack-overflow/tap
-weight: 500;">brew -weight: 500;">install benchmarkr
-weight: 500;">brew tap mack-overflow/tap
-weight: 500;">brew -weight: 500;">install benchmarkr
-weight: 500;">brew tap mack-overflow/tap
-weight: 500;">brew -weight: 500;">install benchmarkr
echo "deb [trusted=yes] https://-weight: 500;">apt.fury.io/mack-overflow/ /" \ | -weight: 600;">sudo tee /etc/-weight: 500;">apt/sources.list.d/benchmarkr.list
-weight: 600;">sudo -weight: 500;">apt -weight: 500;">update
-weight: 600;">sudo -weight: 500;">apt -weight: 500;">install benchmarkr
echo "deb [trusted=yes] https://-weight: 500;">apt.fury.io/mack-overflow/ /" \ | -weight: 600;">sudo tee /etc/-weight: 500;">apt/sources.list.d/benchmarkr.list
-weight: 600;">sudo -weight: 500;">apt -weight: 500;">update
-weight: 600;">sudo -weight: 500;">apt -weight: 500;">install benchmarkr
echo "deb [trusted=yes] https://-weight: 500;">apt.fury.io/mack-overflow/ /" \ | -weight: 600;">sudo tee /etc/-weight: 500;">apt/sources.list.d/benchmarkr.list
-weight: 600;">sudo -weight: 500;">apt -weight: 500;">update
-weight: 600;">sudo -weight: 500;">apt -weight: 500;">install benchmarkr
version: 1 endpoints: - name: list-users method: GET url: ${API_BASE:-http://localhost:8080}/users headers: Authorization: Bearer ${API_TOKEN} defaults: concurrency: 10 duration_seconds: 30 - name: search-users method: GET url: ${API_BASE}/users/search params: q: "test" limit: "50" defaults: concurrency: 5 duration_seconds: 15 - name: create-order method: POST url: ${API_BASE}/orders headers: Authorization: Bearer ${API_TOKEN} Content-Type: application/json body: sku: "ABC-123" quantity: 1 defaults: concurrency: 2 duration_seconds: 10
version: 1 endpoints: - name: list-users method: GET url: ${API_BASE:-http://localhost:8080}/users headers: Authorization: Bearer ${API_TOKEN} defaults: concurrency: 10 duration_seconds: 30 - name: search-users method: GET url: ${API_BASE}/users/search params: q: "test" limit: "50" defaults: concurrency: 5 duration_seconds: 15 - name: create-order method: POST url: ${API_BASE}/orders headers: Authorization: Bearer ${API_TOKEN} Content-Type: application/json body: sku: "ABC-123" quantity: 1 defaults: concurrency: 2 duration_seconds: 10
version: 1 endpoints: - name: list-users method: GET url: ${API_BASE:-http://localhost:8080}/users headers: Authorization: Bearer ${API_TOKEN} defaults: concurrency: 10 duration_seconds: 30 - name: search-users method: GET url: ${API_BASE}/users/search params: q: "test" limit: "50" defaults: concurrency: 5 duration_seconds: 15 - name: create-order method: POST url: ${API_BASE}/orders headers: Authorization: Bearer ${API_TOKEN} Content-Type: application/json body: sku: "ABC-123" quantity: 1 defaults: concurrency: 2 duration_seconds: 10
benchmarkr run -e list-users
benchmarkr run -e list-users
benchmarkr run -e list-users
benchmarkr run -e list-users \ --header "X-Trace: debug-2026-04-28" \ --concurrency 50
benchmarkr run -e list-users \ --header "X-Trace: debug-2026-04-28" \ --concurrency 50
benchmarkr run -e list-users \ --header "X-Trace: debug-2026-04-28" \ --concurrency 50
benchmarkr run --all
benchmarkr run --all
benchmarkr run --all
# .github/workflows/perf.yml
- name: Benchmark every endpoint env: API_BASE: https://api.staging.example.com API_TOKEN: ${{ secrets.STAGING_API_TOKEN }} BENCH_CLOUD_TOKEN: ${{ secrets.BENCHMARKR_TOKEN }} run: benchmarkr run --all --store --json > perf-results.json
# .github/workflows/perf.yml
- name: Benchmark every endpoint env: API_BASE: https://api.staging.example.com API_TOKEN: ${{ secrets.STAGING_API_TOKEN }} BENCH_CLOUD_TOKEN: ${{ secrets.BENCHMARKR_TOKEN }} run: benchmarkr run --all --store --json > perf-results.json
# .github/workflows/perf.yml
- name: Benchmark every endpoint env: API_BASE: https://api.staging.example.com API_TOKEN: ${{ secrets.STAGING_API_TOKEN }} BENCH_CLOUD_TOKEN: ${{ secrets.BENCHMARKR_TOKEN }} run: benchmarkr run --all --store --json > perf-results.json
[ { "name": "list-users", "stop_reason": "completed", "duration": "30.001s", "stored": true, "result": { "requests": 12483, "p50_ms": 4, "p95_ms": 12, "p99_ms": 23, "errors_total": 0 } }, { "name": "search-users", "stop_reason": "completed", "duration": "15.002s", "stored": true, "result": { "requests": 4127, "p50_ms": 18, "p95_ms": 47, "p99_ms": 92, "errors_total": 0 } }, { "name": "create-order", "stop_reason": "completed", "duration": "10.001s", "stored": true, "result": { "requests": 312, "p50_ms": 41, "p95_ms": 88, "p99_ms": 121, "errors_total": 0 } }
]
[ { "name": "list-users", "stop_reason": "completed", "duration": "30.001s", "stored": true, "result": { "requests": 12483, "p50_ms": 4, "p95_ms": 12, "p99_ms": 23, "errors_total": 0 } }, { "name": "search-users", "stop_reason": "completed", "duration": "15.002s", "stored": true, "result": { "requests": 4127, "p50_ms": 18, "p95_ms": 47, "p99_ms": 92, "errors_total": 0 } }, { "name": "create-order", "stop_reason": "completed", "duration": "10.001s", "stored": true, "result": { "requests": 312, "p50_ms": 41, "p95_ms": 88, "p99_ms": 121, "errors_total": 0 } }
]
[ { "name": "list-users", "stop_reason": "completed", "duration": "30.001s", "stored": true, "result": { "requests": 12483, "p50_ms": 4, "p95_ms": 12, "p99_ms": 23, "errors_total": 0 } }, { "name": "search-users", "stop_reason": "completed", "duration": "15.002s", "stored": true, "result": { "requests": 4127, "p50_ms": 18, "p95_ms": 47, "p99_ms": 92, "errors_total": 0 } }, { "name": "create-order", "stop_reason": "completed", "duration": "10.001s", "stored": true, "result": { "requests": 312, "p50_ms": 41, "p95_ms": 88, "p99_ms": 121, "errors_total": 0 } }
] - Env var substitution. ${API_BASE} and ${API_BASE:-default} work the way they do in shell. A sibling .env file is auto-loaded but never overrides what's already in the environment, so the same file works on a laptop, in CI, and in staging.
- Defaults travel with the endpoint. create-order runs at concurrency 2 for 10 seconds because that's what makes sense for a write path. list-users runs at concurrency 10. You set this once in the file you already review.
- Discovery walks up from CWD. Run the CLI from any subdirectory and it finds the file, like -weight: 500;">git does. - Export from the dashboard. Open any endpoint and click Export for YAML or JSON. Or click Export all in the endpoints nav to dump every endpoint to one file you can drop into a fresh repo.
- Import to the dashboard. Click Import, pick a benchmarkr.yaml, and endpoints upsert by (user, name). If the config changed, a new version is recorded — so you get a history of how each endpoint's load shape evolved. - Define endpoints in benchmarkr.yaml, commit them.
- CI runs the loop above on every PR with --store and the cloud token, persisting results to the dashboard.
- Open the endpoint in the dashboard to see the trend line for that endpoint across the last N PRs.
- If somebody adds an endpoint via the dashboard UI for ad-hoc poking, Export → drop the file into the repo → it's now part of the CI matrix. - New endpoints get a perf budget at the same moment they get a route handler.
- Reviewers can see in the diff that a new write path is being benchmarked at concurrency 2, not 100, and push back if that's wrong.
- CI gets a free regression signal across every endpoint, not just the one someone remembered to add to a script.
- The dashboard gives you the historical view without anyone manually re-entering endpoints.