node --prof server.js
node --prof server.js
node --prof server.js
node --prof-process isolate-*.log > profile.txt
node --prof-process isolate-*.log > profile.txt
node --prof-process isolate-*.log > profile.txt
[Summary]: ticks total nonlib name 4321 43.2% 44.1% JavaScript 3201 32.0% 32.7% C++ ... [JavaScript]: ticks total nonlib name 892 8.9% 9.1% LazyCompile: *parseJson /app/src/parser.js:42 741 7.4% 7.6% LazyCompile: *buildIndex /app/src/indexer.js:118
[Summary]: ticks total nonlib name 4321 43.2% 44.1% JavaScript 3201 32.0% 32.7% C++ ... [JavaScript]: ticks total nonlib name 892 8.9% 9.1% LazyCompile: *parseJson /app/src/parser.js:42 741 7.4% 7.6% LazyCompile: *buildIndex /app/src/indexer.js:118
[Summary]: ticks total nonlib name 4321 43.2% 44.1% JavaScript 3201 32.0% 32.7% C++ ... [JavaScript]: ticks total nonlib name 892 8.9% 9.1% LazyCompile: *parseJson /app/src/parser.js:42 741 7.4% 7.6% LazyCompile: *buildIndex /app/src/indexer.js:118
const profiler = require('v8-profiler-next');
const fs = require('fs'); // Start profiling
profiler.startProfiling('my-request', true); // ... run the code you want to profile // Stop and save
const profile = profiler.stopProfiling('my-request');
profile.export((error, result) => { fs.writeFileSync('profile.cpuprofile', result); profile.delete();
});
const profiler = require('v8-profiler-next');
const fs = require('fs'); // Start profiling
profiler.startProfiling('my-request', true); // ... run the code you want to profile // Stop and save
const profile = profiler.stopProfiling('my-request');
profile.export((error, result) => { fs.writeFileSync('profile.cpuprofile', result); profile.delete();
});
const profiler = require('v8-profiler-next');
const fs = require('fs'); // Start profiling
profiler.startProfiling('my-request', true); // ... run the code you want to profile // Stop and save
const profile = profiler.stopProfiling('my-request');
profile.export((error, result) => { fs.writeFileSync('profile.cpuprofile', result); profile.delete();
});
const { performance, PerformanceObserver } = require('perf_hooks'); // Mark start
performance.mark('db-query-start'); const results = await db.query(sql); // Mark end and measure
performance.mark('db-query-end');
performance.measure('db-query', 'db-query-start', 'db-query-end'); // Observe asynchronously
const obs = new PerformanceObserver((list) => { const entries = list.getEntries(); entries.forEach(entry => { console.log(`${entry.name}: ${entry.duration.toFixed(2)}ms`); });
}); obs.observe({ entryTypes: ['measure'] });
const { performance, PerformanceObserver } = require('perf_hooks'); // Mark start
performance.mark('db-query-start'); const results = await db.query(sql); // Mark end and measure
performance.mark('db-query-end');
performance.measure('db-query', 'db-query-start', 'db-query-end'); // Observe asynchronously
const obs = new PerformanceObserver((list) => { const entries = list.getEntries(); entries.forEach(entry => { console.log(`${entry.name}: ${entry.duration.toFixed(2)}ms`); });
}); obs.observe({ entryTypes: ['measure'] });
const { performance, PerformanceObserver } = require('perf_hooks'); // Mark start
performance.mark('db-query-start'); const results = await db.query(sql); // Mark end and measure
performance.mark('db-query-end');
performance.measure('db-query', 'db-query-start', 'db-query-end'); // Observe asynchronously
const obs = new PerformanceObserver((list) => { const entries = list.getEntries(); entries.forEach(entry => { console.log(`${entry.name}: ${entry.duration.toFixed(2)}ms`); });
}); obs.observe({ entryTypes: ['measure'] });
npm install -g clinic
npm install -g clinic
npm install -g clinic
clinic doctor -- node server.js
clinic doctor -- node server.js
clinic doctor -- node server.js
clinic flame -- node server.js
clinic flame -- node server.js
clinic flame -- node server.js
clinic bubbleprof -- node server.js
clinic bubbleprof -- node server.js
clinic bubbleprof -- node server.js
npm install -g 0x
0x server.js
npm install -g 0x
0x server.js
npm install -g 0x
0x server.js
const { monitorEventLoopDelay } = require('perf_hooks'); const h = monitorEventLoopDelay({ resolution: 20 });
h.enable(); setInterval(() => { console.log({ min: h.min / 1e6, // nanoseconds → milliseconds max: h.max / 1e6, mean: h.mean / 1e6, p99: h.percentile(99) / 1e6, }); h.reset();
}, 5000);
const { monitorEventLoopDelay } = require('perf_hooks'); const h = monitorEventLoopDelay({ resolution: 20 });
h.enable(); setInterval(() => { console.log({ min: h.min / 1e6, // nanoseconds → milliseconds max: h.max / 1e6, mean: h.mean / 1e6, p99: h.percentile(99) / 1e6, }); h.reset();
}, 5000);
const { monitorEventLoopDelay } = require('perf_hooks'); const h = monitorEventLoopDelay({ resolution: 20 });
h.enable(); setInterval(() => { console.log({ min: h.min / 1e6, // nanoseconds → milliseconds max: h.max / 1e6, mean: h.mean / 1e6, p99: h.percentile(99) / 1e6, }); h.reset();
}, 5000);
const looplag = require('looplag');
const lag = looplag(1000); // sample every 1000ms
// lag.value() returns current lag in ms
const looplag = require('looplag');
const lag = looplag(1000); // sample every 1000ms
// lag.value() returns current lag in ms
const looplag = require('looplag');
const lag = looplag(1000); // sample every 1000ms
// lag.value() returns current lag in ms
# Terminal 1: Start the server with profiling
clinic doctor -- node server.js # Terminal 2: Generate load
npx autocannon -c 100 -d 30 http://localhost:3000/api/endpoint
# Terminal 1: Start the server with profiling
clinic doctor -- node server.js # Terminal 2: Generate load
npx autocannon -c 100 -d 30 http://localhost:3000/api/endpoint
# Terminal 1: Start the server with profiling
clinic doctor -- node server.js # Terminal 2: Generate load
npx autocannon -c 100 -d 30 http://localhost:3000/api/endpoint
// Only activate via environment variable or feature flag
if (process.env.ENABLE_PROFILING === 'true') { const profiler = require('v8-profiler-next'); profiler.startProfiling('prod-sample', true); setTimeout(() => { const profile = profiler.stopProfiling('prod-sample'); profile.export((err, result) => { // Upload to S3/GCS, not local disk uploadToStorage(`profile-${Date.now()}.cpuprofile`, result); profile.delete(); }); }, 30_000);
}
// Only activate via environment variable or feature flag
if (process.env.ENABLE_PROFILING === 'true') { const profiler = require('v8-profiler-next'); profiler.startProfiling('prod-sample', true); setTimeout(() => { const profile = profiler.stopProfiling('prod-sample'); profile.export((err, result) => { // Upload to S3/GCS, not local disk uploadToStorage(`profile-${Date.now()}.cpuprofile`, result); profile.delete(); }); }, 30_000);
}
// Only activate via environment variable or feature flag
if (process.env.ENABLE_PROFILING === 'true') { const profiler = require('v8-profiler-next'); profiler.startProfiling('prod-sample', true); setTimeout(() => { const profile = profiler.stopProfiling('prod-sample'); profile.export((err, result) => { // Upload to S3/GCS, not local disk uploadToStorage(`profile-${Date.now()}.cpuprofile`, result); profile.delete(); }); }, 30_000);
}
const { PerformanceObserver } = require('perf_hooks'); const obs = new PerformanceObserver((list) => { list.getEntries().forEach(entry => { if (entry.duration > 50) { console.warn(`GC pause: ${entry.duration.toFixed(1)}ms (kind: ${entry.detail.kind})`); } });
}); obs.observe({ entryTypes: ['gc'] });
const { PerformanceObserver } = require('perf_hooks'); const obs = new PerformanceObserver((list) => { list.getEntries().forEach(entry => { if (entry.duration > 50) { console.warn(`GC pause: ${entry.duration.toFixed(1)}ms (kind: ${entry.detail.kind})`); } });
}); obs.observe({ entryTypes: ['gc'] });
const { PerformanceObserver } = require('perf_hooks'); const obs = new PerformanceObserver((list) => { list.getEntries().forEach(entry => { if (entry.duration > 50) { console.warn(`GC pause: ${entry.duration.toFixed(1)}ms (kind: ${entry.detail.kind})`); } });
}); obs.observe({ entryTypes: ['gc'] }); - A slow synchronous function blocks everything. Unlike Java or Go, there's no other thread to pick up the slack.
- Async code can still starve the event loop. Thousands of microtasks queuing per tick will make your service feel blocked even if nothing is technically "slow."
- Memory pressure causes GC pauses. V8's garbage collector runs on the same thread. Large heaps mean frequent stop-the-world pauses — milliseconds that show up as P99 latency spikes. - I/O issues — Your app is waiting on slow I/O (database, disk, network)
- Event loop issues — Synchronous code is blocking the loop
- Memory issues — GC pressure, potential leaks
- CPU issues — Computation-heavy paths - Blocks taking >5% of total width — these are your hot paths
- Blocks that are unexpectedly wide given what they should be doing (JSON parsing, string manipulation)
- V8 internal functions (*_NATIVE, BytecodeHandler) — usually fine, but can indicate optimization failures - Expose event loop delay as a Prometheus metric. Alert if P99 > 100ms for >2 minutes.
- Record GC pause duration. Alert if mean GC pause >30ms.
- Add custom perf_hooks marks around your 5 slowest endpoints. These become your early-warning system.
- Keep clinic/0x in your runbook. When your Grafana alert fires, the next step is already documented. - [ ] Check event loop delay metric — is it >50ms?
- [ ] Check GC pause frequency and duration
- [ ] Run clinic doctor under representative load
- [ ] If CPU-bound: use clinic flame or 0x to find the hot function
- [ ] If async-bound: use clinic bubbleprof to find the slow async operation
- [ ] Check for recently deployed code (commits in the last 48h)
- [ ] Validate that no synchronous operations snuck into hot paths (file reads, JSON.parse on large payloads)
- [ ] Confirm database query plans haven't regressed (EXPLAIN ANALYZE) - Clinic.js — Comprehensive Node.js performance toolkit
- 0x — Flame graph generator
- v8-profiler-next — Programmatic V8 CPU profiles
- perf_hooks — Built-in performance measurement API
- autocannon — HTTP load generator for profiling sessions