$ -weight: 500;">npm -weight: 500;">install \ @opentelemetry/sdk-node \ @opentelemetry/auto-instrumentations-node \ @opentelemetry/exporter-trace-otlp-http \ @opentelemetry/exporter-prometheus \ @opentelemetry/sdk-metrics \ @opentelemetry/semantic-conventions
-weight: 500;">npm -weight: 500;">install \ @opentelemetry/sdk-node \ @opentelemetry/auto-instrumentations-node \ @opentelemetry/exporter-trace-otlp-http \ @opentelemetry/exporter-prometheus \ @opentelemetry/sdk-metrics \ @opentelemetry/semantic-conventions
-weight: 500;">npm -weight: 500;">install \ @opentelemetry/sdk-node \ @opentelemetry/auto-instrumentations-node \ @opentelemetry/exporter-trace-otlp-http \ @opentelemetry/exporter-prometheus \ @opentelemetry/sdk-metrics \ @opentelemetry/semantic-conventions
// tracing.js
'use strict'; const { NodeSDK } = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { OTLPMetricExporter } = require('@opentelemetry/exporter-metrics-otlp-http');
const { PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions'); const resource = new Resource({ [SemanticResourceAttributes.SERVICE_NAME]: 'order--weight: 500;">service', [SemanticResourceAttributes.SERVICE_VERSION]: '1.4.2', [SemanticResourceAttributes.DEPLOYMENT_ENVIRONMENT]: process.env.NODE_ENV || 'development',
}); const traceExporter = new OTLPTraceExporter({ url: process.env.OTEL_EXPORTER_OTLP_TRACES_ENDPOINT || 'http://localhost:4318/v1/traces',
}); const metricExporter = new OTLPMetricExporter({ url: process.env.OTEL_EXPORTER_OTLP_METRICS_ENDPOINT || 'http://localhost:4318/v1/metrics',
}); const sdk = new NodeSDK({ resource, traceExporter, metricReader: new PeriodicExportingMetricReader({ exporter: metricExporter, exportIntervalMillis: 15_000, // export every 15 seconds }), instrumentations: [ getNodeAutoInstrumentations({ '@opentelemetry/instrumentation-fs': { enabled: false }, // too noisy '@opentelemetry/instrumentation-http': { enabled: true }, '@opentelemetry/instrumentation-express': { enabled: true }, '@opentelemetry/instrumentation-pg': { enabled: true }, '@opentelemetry/instrumentation-redis': { enabled: true }, }), ],
}); sdk.-weight: 500;">start(); process.on('SIGTERM', () => { sdk.shutdown() .then(() => console.log('OTel SDK shut down')) .catch(err => console.error('Error shutting down OTel SDK', err)) .finally(() => process.exit(0));
});
// tracing.js
'use strict'; const { NodeSDK } = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { OTLPMetricExporter } = require('@opentelemetry/exporter-metrics-otlp-http');
const { PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions'); const resource = new Resource({ [SemanticResourceAttributes.SERVICE_NAME]: 'order--weight: 500;">service', [SemanticResourceAttributes.SERVICE_VERSION]: '1.4.2', [SemanticResourceAttributes.DEPLOYMENT_ENVIRONMENT]: process.env.NODE_ENV || 'development',
}); const traceExporter = new OTLPTraceExporter({ url: process.env.OTEL_EXPORTER_OTLP_TRACES_ENDPOINT || 'http://localhost:4318/v1/traces',
}); const metricExporter = new OTLPMetricExporter({ url: process.env.OTEL_EXPORTER_OTLP_METRICS_ENDPOINT || 'http://localhost:4318/v1/metrics',
}); const sdk = new NodeSDK({ resource, traceExporter, metricReader: new PeriodicExportingMetricReader({ exporter: metricExporter, exportIntervalMillis: 15_000, // export every 15 seconds }), instrumentations: [ getNodeAutoInstrumentations({ '@opentelemetry/instrumentation-fs': { enabled: false }, // too noisy '@opentelemetry/instrumentation-http': { enabled: true }, '@opentelemetry/instrumentation-express': { enabled: true }, '@opentelemetry/instrumentation-pg': { enabled: true }, '@opentelemetry/instrumentation-redis': { enabled: true }, }), ],
}); sdk.-weight: 500;">start(); process.on('SIGTERM', () => { sdk.shutdown() .then(() => console.log('OTel SDK shut down')) .catch(err => console.error('Error shutting down OTel SDK', err)) .finally(() => process.exit(0));
});
// tracing.js
'use strict'; const { NodeSDK } = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { OTLPMetricExporter } = require('@opentelemetry/exporter-metrics-otlp-http');
const { PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions'); const resource = new Resource({ [SemanticResourceAttributes.SERVICE_NAME]: 'order--weight: 500;">service', [SemanticResourceAttributes.SERVICE_VERSION]: '1.4.2', [SemanticResourceAttributes.DEPLOYMENT_ENVIRONMENT]: process.env.NODE_ENV || 'development',
}); const traceExporter = new OTLPTraceExporter({ url: process.env.OTEL_EXPORTER_OTLP_TRACES_ENDPOINT || 'http://localhost:4318/v1/traces',
}); const metricExporter = new OTLPMetricExporter({ url: process.env.OTEL_EXPORTER_OTLP_METRICS_ENDPOINT || 'http://localhost:4318/v1/metrics',
}); const sdk = new NodeSDK({ resource, traceExporter, metricReader: new PeriodicExportingMetricReader({ exporter: metricExporter, exportIntervalMillis: 15_000, // export every 15 seconds }), instrumentations: [ getNodeAutoInstrumentations({ '@opentelemetry/instrumentation-fs': { enabled: false }, // too noisy '@opentelemetry/instrumentation-http': { enabled: true }, '@opentelemetry/instrumentation-express': { enabled: true }, '@opentelemetry/instrumentation-pg': { enabled: true }, '@opentelemetry/instrumentation-redis': { enabled: true }, }), ],
}); sdk.-weight: 500;">start(); process.on('SIGTERM', () => { sdk.shutdown() .then(() => console.log('OTel SDK shut down')) .catch(err => console.error('Error shutting down OTel SDK', err)) .finally(() => process.exit(0));
});
// package.json
{ "scripts": { "-weight: 500;">start": "node -r ./tracing.js server.js" }
}
// package.json
{ "scripts": { "-weight: 500;">start": "node -r ./tracing.js server.js" }
}
// package.json
{ "scripts": { "-weight: 500;">start": "node -r ./tracing.js server.js" }
}
node --require ./tracing.js server.js
node --require ./tracing.js server.js
node --require ./tracing.js server.js
// server.js
const express = require('express');
const { Pool } = require('pg');
const redis = require('redis'); const app = express();
const pool = new Pool({ connectionString: process.env.DATABASE_URL });
const redisClient = redis.createClient({ url: process.env.REDIS_URL }); redisClient.connect(); app.get('/orders/:id', async (req, res) => { const { id } = req.params; // Redis cache lookup const cached = await redisClient.get(`order:${id}`); if (cached) { return res.json(JSON.parse(cached)); } // Postgres query const { rows } = await pool.query('SELECT * FROM orders WHERE id = $1', [id]); if (!rows.length) return res.-weight: 500;">status(404).json({ error: 'not found' }); await redisClient.setEx(`order:${id}`, 300, JSON.stringify(rows[0])); res.json(rows[0]);
}); app.listen(3000, () => console.log('Listening on :3000'));
// server.js
const express = require('express');
const { Pool } = require('pg');
const redis = require('redis'); const app = express();
const pool = new Pool({ connectionString: process.env.DATABASE_URL });
const redisClient = redis.createClient({ url: process.env.REDIS_URL }); redisClient.connect(); app.get('/orders/:id', async (req, res) => { const { id } = req.params; // Redis cache lookup const cached = await redisClient.get(`order:${id}`); if (cached) { return res.json(JSON.parse(cached)); } // Postgres query const { rows } = await pool.query('SELECT * FROM orders WHERE id = $1', [id]); if (!rows.length) return res.-weight: 500;">status(404).json({ error: 'not found' }); await redisClient.setEx(`order:${id}`, 300, JSON.stringify(rows[0])); res.json(rows[0]);
}); app.listen(3000, () => console.log('Listening on :3000'));
// server.js
const express = require('express');
const { Pool } = require('pg');
const redis = require('redis'); const app = express();
const pool = new Pool({ connectionString: process.env.DATABASE_URL });
const redisClient = redis.createClient({ url: process.env.REDIS_URL }); redisClient.connect(); app.get('/orders/:id', async (req, res) => { const { id } = req.params; // Redis cache lookup const cached = await redisClient.get(`order:${id}`); if (cached) { return res.json(JSON.parse(cached)); } // Postgres query const { rows } = await pool.query('SELECT * FROM orders WHERE id = $1', [id]); if (!rows.length) return res.-weight: 500;">status(404).json({ error: 'not found' }); await redisClient.setEx(`order:${id}`, 300, JSON.stringify(rows[0])); res.json(rows[0]);
}); app.listen(3000, () => console.log('Listening on :3000'));
const { trace, SpanStatusCode } = require('@opentelemetry/api'); const tracer = trace.getTracer('order--weight: 500;">service', '1.4.2'); async function processPayment(orderId, amount, currency) { // Create a custom span return tracer.startActiveSpan('payment.process', async (span) => { try { // Add semantic attributes span.setAttributes({ 'order.id': orderId, 'payment.amount': amount, 'payment.currency': currency, 'payment.provider': 'stripe', }); const result = await stripeClient.charges.create({ amount: amount * 100, currency, source: await getPaymentToken(orderId), }); span.setAttributes({ 'payment.charge_id': result.id, 'payment.-weight: 500;">status': result.-weight: 500;">status, }); span.setStatus({ code: SpanStatusCode.OK }); return result; } catch (err) { // Record the exception — this adds a span event with the stack trace span.recordException(err); span.setStatus({ code: SpanStatusCode.ERROR, message: err.message, }); throw err; } finally { span.end(); } });
}
const { trace, SpanStatusCode } = require('@opentelemetry/api'); const tracer = trace.getTracer('order--weight: 500;">service', '1.4.2'); async function processPayment(orderId, amount, currency) { // Create a custom span return tracer.startActiveSpan('payment.process', async (span) => { try { // Add semantic attributes span.setAttributes({ 'order.id': orderId, 'payment.amount': amount, 'payment.currency': currency, 'payment.provider': 'stripe', }); const result = await stripeClient.charges.create({ amount: amount * 100, currency, source: await getPaymentToken(orderId), }); span.setAttributes({ 'payment.charge_id': result.id, 'payment.-weight: 500;">status': result.-weight: 500;">status, }); span.setStatus({ code: SpanStatusCode.OK }); return result; } catch (err) { // Record the exception — this adds a span event with the stack trace span.recordException(err); span.setStatus({ code: SpanStatusCode.ERROR, message: err.message, }); throw err; } finally { span.end(); } });
}
const { trace, SpanStatusCode } = require('@opentelemetry/api'); const tracer = trace.getTracer('order--weight: 500;">service', '1.4.2'); async function processPayment(orderId, amount, currency) { // Create a custom span return tracer.startActiveSpan('payment.process', async (span) => { try { // Add semantic attributes span.setAttributes({ 'order.id': orderId, 'payment.amount': amount, 'payment.currency': currency, 'payment.provider': 'stripe', }); const result = await stripeClient.charges.create({ amount: amount * 100, currency, source: await getPaymentToken(orderId), }); span.setAttributes({ 'payment.charge_id': result.id, 'payment.-weight: 500;">status': result.-weight: 500;">status, }); span.setStatus({ code: SpanStatusCode.OK }); return result; } catch (err) { // Record the exception — this adds a span event with the stack trace span.recordException(err); span.setStatus({ code: SpanStatusCode.ERROR, message: err.message, }); throw err; } finally { span.end(); } });
}
async function fulfillOrder(orderId) { return tracer.startActiveSpan('order.fulfill', async (parentSpan) => { parentSpan.setAttribute('order.id', orderId); // This span is automatically a child of order.fulfill const payment = await processPayment(orderId, 99.99, 'usd'); // Another child span await tracer.startActiveSpan('order.notify', async (notifySpan) => { await sendConfirmationEmail(orderId); notifySpan.end(); }); parentSpan.end(); return { orderId, payment }; });
}
async function fulfillOrder(orderId) { return tracer.startActiveSpan('order.fulfill', async (parentSpan) => { parentSpan.setAttribute('order.id', orderId); // This span is automatically a child of order.fulfill const payment = await processPayment(orderId, 99.99, 'usd'); // Another child span await tracer.startActiveSpan('order.notify', async (notifySpan) => { await sendConfirmationEmail(orderId); notifySpan.end(); }); parentSpan.end(); return { orderId, payment }; });
}
async function fulfillOrder(orderId) { return tracer.startActiveSpan('order.fulfill', async (parentSpan) => { parentSpan.setAttribute('order.id', orderId); // This span is automatically a child of order.fulfill const payment = await processPayment(orderId, 99.99, 'usd'); // Another child span await tracer.startActiveSpan('order.notify', async (notifySpan) => { await sendConfirmationEmail(orderId); notifySpan.end(); }); parentSpan.end(); return { orderId, payment }; });
}
span.addEvent('cache.miss', { 'cache.key': `order:${id}` });
span.addEvent('db.query.-weight: 500;">start');
// ... query executes ...
span.addEvent('db.query.complete', { 'db.rows_returned': rows.length });
span.addEvent('cache.miss', { 'cache.key': `order:${id}` });
span.addEvent('db.query.-weight: 500;">start');
// ... query executes ...
span.addEvent('db.query.complete', { 'db.rows_returned': rows.length });
span.addEvent('cache.miss', { 'cache.key': `order:${id}` });
span.addEvent('db.query.-weight: 500;">start');
// ... query executes ...
span.addEvent('db.query.complete', { 'db.rows_returned': rows.length });
// metrics.js
const { metrics } = require('@opentelemetry/api'); const meter = metrics.getMeter('order--weight: 500;">service', '1.4.2'); // Counter: monotonically increasing (requests, errors, events)
const requestCounter = meter.createCounter('http.requests.total', { description: 'Total number of HTTP requests',
}); // Histogram: distribution of values (latency, payload size)
const latencyHistogram = meter.createHistogram('http.request.duration_ms', { description: 'HTTP request latency in milliseconds', unit: 'ms', advice: { explicitBucketBoundaries: [5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000], },
}); // UpDownCounter: can go up or down (queue depth, active connections)
const activeConnections = meter.createUpDownCounter('db.connections.active', { description: 'Active database connections',
}); // Observable Gauge: sampled on demand (CPU, memory — use callbacks)
const memoryGauge = meter.createObservableGauge('process.memory_mb', { description: 'Process memory usage in MB',
});
memoryGauge.addCallback((observableResult) => { const usage = process.memoryUsage(); observableResult.observe(usage.heapUsed / 1024 / 1024, { type: 'heap' }); observableResult.observe(usage.rss / 1024 / 1024, { type: 'rss' });
}); module.exports = { requestCounter, latencyHistogram, activeConnections };
// metrics.js
const { metrics } = require('@opentelemetry/api'); const meter = metrics.getMeter('order--weight: 500;">service', '1.4.2'); // Counter: monotonically increasing (requests, errors, events)
const requestCounter = meter.createCounter('http.requests.total', { description: 'Total number of HTTP requests',
}); // Histogram: distribution of values (latency, payload size)
const latencyHistogram = meter.createHistogram('http.request.duration_ms', { description: 'HTTP request latency in milliseconds', unit: 'ms', advice: { explicitBucketBoundaries: [5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000], },
}); // UpDownCounter: can go up or down (queue depth, active connections)
const activeConnections = meter.createUpDownCounter('db.connections.active', { description: 'Active database connections',
}); // Observable Gauge: sampled on demand (CPU, memory — use callbacks)
const memoryGauge = meter.createObservableGauge('process.memory_mb', { description: 'Process memory usage in MB',
});
memoryGauge.addCallback((observableResult) => { const usage = process.memoryUsage(); observableResult.observe(usage.heapUsed / 1024 / 1024, { type: 'heap' }); observableResult.observe(usage.rss / 1024 / 1024, { type: 'rss' });
}); module.exports = { requestCounter, latencyHistogram, activeConnections };
// metrics.js
const { metrics } = require('@opentelemetry/api'); const meter = metrics.getMeter('order--weight: 500;">service', '1.4.2'); // Counter: monotonically increasing (requests, errors, events)
const requestCounter = meter.createCounter('http.requests.total', { description: 'Total number of HTTP requests',
}); // Histogram: distribution of values (latency, payload size)
const latencyHistogram = meter.createHistogram('http.request.duration_ms', { description: 'HTTP request latency in milliseconds', unit: 'ms', advice: { explicitBucketBoundaries: [5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000], },
}); // UpDownCounter: can go up or down (queue depth, active connections)
const activeConnections = meter.createUpDownCounter('db.connections.active', { description: 'Active database connections',
}); // Observable Gauge: sampled on demand (CPU, memory — use callbacks)
const memoryGauge = meter.createObservableGauge('process.memory_mb', { description: 'Process memory usage in MB',
});
memoryGauge.addCallback((observableResult) => { const usage = process.memoryUsage(); observableResult.observe(usage.heapUsed / 1024 / 1024, { type: 'heap' }); observableResult.observe(usage.rss / 1024 / 1024, { type: 'rss' });
}); module.exports = { requestCounter, latencyHistogram, activeConnections };
// middleware/metrics.js
const { requestCounter, latencyHistogram } = require('../metrics'); function metricsMiddleware(req, res, next) { const -weight: 500;">start = Date.now(); res.on('finish', () => { const duration = Date.now() - -weight: 500;">start; const labels = { method: req.method, route: req.route?.path || 'unknown', status_code: String(res.statusCode), }; requestCounter.add(1, labels); latencyHistogram.record(duration, labels); }); next();
} module.exports = metricsMiddleware;
// middleware/metrics.js
const { requestCounter, latencyHistogram } = require('../metrics'); function metricsMiddleware(req, res, next) { const -weight: 500;">start = Date.now(); res.on('finish', () => { const duration = Date.now() - -weight: 500;">start; const labels = { method: req.method, route: req.route?.path || 'unknown', status_code: String(res.statusCode), }; requestCounter.add(1, labels); latencyHistogram.record(duration, labels); }); next();
} module.exports = metricsMiddleware;
// middleware/metrics.js
const { requestCounter, latencyHistogram } = require('../metrics'); function metricsMiddleware(req, res, next) { const -weight: 500;">start = Date.now(); res.on('finish', () => { const duration = Date.now() - -weight: 500;">start; const labels = { method: req.method, route: req.route?.path || 'unknown', status_code: String(res.statusCode), }; requestCounter.add(1, labels); latencyHistogram.record(duration, labels); }); next();
} module.exports = metricsMiddleware;
// server.js
app.use(require('./middleware/metrics'));
// server.js
app.use(require('./middleware/metrics'));
// server.js
app.use(require('./middleware/metrics'));
// logger.js — structured logger with automatic trace correlation
const { trace, context } = require('@opentelemetry/api'); function getTraceContext() { const span = trace.getActiveSpan(); if (!span) return {}; const { traceId, spanId, traceFlags } = span.spanContext(); return { traceId, spanId, traceSampled: (traceFlags & 0x01) === 1, };
} const logger = { info(message, extra = {}) { console.log(JSON.stringify({ level: 'info', message, timestamp: new Date().toISOString(), -weight: 500;">service: 'order--weight: 500;">service', ...getTraceContext(), ...extra, })); }, error(message, err, extra = {}) { console.error(JSON.stringify({ level: 'error', message, timestamp: new Date().toISOString(), -weight: 500;">service: 'order--weight: 500;">service', error: { name: err?.name, message: err?.message, stack: err?.stack }, ...getTraceContext(), ...extra, })); },
}; module.exports = logger;
// logger.js — structured logger with automatic trace correlation
const { trace, context } = require('@opentelemetry/api'); function getTraceContext() { const span = trace.getActiveSpan(); if (!span) return {}; const { traceId, spanId, traceFlags } = span.spanContext(); return { traceId, spanId, traceSampled: (traceFlags & 0x01) === 1, };
} const logger = { info(message, extra = {}) { console.log(JSON.stringify({ level: 'info', message, timestamp: new Date().toISOString(), -weight: 500;">service: 'order--weight: 500;">service', ...getTraceContext(), ...extra, })); }, error(message, err, extra = {}) { console.error(JSON.stringify({ level: 'error', message, timestamp: new Date().toISOString(), -weight: 500;">service: 'order--weight: 500;">service', error: { name: err?.name, message: err?.message, stack: err?.stack }, ...getTraceContext(), ...extra, })); },
}; module.exports = logger;
// logger.js — structured logger with automatic trace correlation
const { trace, context } = require('@opentelemetry/api'); function getTraceContext() { const span = trace.getActiveSpan(); if (!span) return {}; const { traceId, spanId, traceFlags } = span.spanContext(); return { traceId, spanId, traceSampled: (traceFlags & 0x01) === 1, };
} const logger = { info(message, extra = {}) { console.log(JSON.stringify({ level: 'info', message, timestamp: new Date().toISOString(), -weight: 500;">service: 'order--weight: 500;">service', ...getTraceContext(), ...extra, })); }, error(message, err, extra = {}) { console.error(JSON.stringify({ level: 'error', message, timestamp: new Date().toISOString(), -weight: 500;">service: 'order--weight: 500;">service', error: { name: err?.name, message: err?.message, stack: err?.stack }, ...getTraceContext(), ...extra, })); },
}; module.exports = logger;
// In your route handler
const logger = require('./logger'); app.get('/orders/:id', async (req, res) => { logger.info('Fetching order', { orderId: req.params.id }); try { const order = await getOrder(req.params.id); logger.info('Order retrieved', { orderId: req.params.id, -weight: 500;">status: order.-weight: 500;">status }); res.json(order); } catch (err) { logger.error('Failed to fetch order', err, { orderId: req.params.id }); res.-weight: 500;">status(500).json({ error: 'internal error' }); }
});
// In your route handler
const logger = require('./logger'); app.get('/orders/:id', async (req, res) => { logger.info('Fetching order', { orderId: req.params.id }); try { const order = await getOrder(req.params.id); logger.info('Order retrieved', { orderId: req.params.id, -weight: 500;">status: order.-weight: 500;">status }); res.json(order); } catch (err) { logger.error('Failed to fetch order', err, { orderId: req.params.id }); res.-weight: 500;">status(500).json({ error: 'internal error' }); }
});
// In your route handler
const logger = require('./logger'); app.get('/orders/:id', async (req, res) => { logger.info('Fetching order', { orderId: req.params.id }); try { const order = await getOrder(req.params.id); logger.info('Order retrieved', { orderId: req.params.id, -weight: 500;">status: order.-weight: 500;">status }); res.json(order); } catch (err) { logger.error('Failed to fetch order', err, { orderId: req.params.id }); res.-weight: 500;">status(500).json({ error: 'internal error' }); }
});
{ "level": "info", "message": "Order retrieved", "timestamp": "2026-03-22T14:23:01.882Z", "-weight: 500;">service": "order--weight: 500;">service", "traceId": "3e8a1b2c4d5e6f7a8b9c0d1e2f3a4b5c", "spanId": "a1b2c3d4e5f6a7b8", "traceSampled": true, "orderId": "ord_9182", "-weight: 500;">status": "shipped"
}
{ "level": "info", "message": "Order retrieved", "timestamp": "2026-03-22T14:23:01.882Z", "-weight: 500;">service": "order--weight: 500;">service", "traceId": "3e8a1b2c4d5e6f7a8b9c0d1e2f3a4b5c", "spanId": "a1b2c3d4e5f6a7b8", "traceSampled": true, "orderId": "ord_9182", "-weight: 500;">status": "shipped"
}
{ "level": "info", "message": "Order retrieved", "timestamp": "2026-03-22T14:23:01.882Z", "-weight: 500;">service": "order--weight: 500;">service", "traceId": "3e8a1b2c4d5e6f7a8b9c0d1e2f3a4b5c", "spanId": "a1b2c3d4e5f6a7b8", "traceSampled": true, "orderId": "ord_9182", "-weight: 500;">status": "shipped"
}
# -weight: 500;">docker-compose.yml
version: '3.8'
services: jaeger: image: jaegertracing/all-in-one:1.54 ports: - "16686:16686" # Jaeger UI - "4317:4317" # OTLP gRPC - "4318:4318" # OTLP HTTP environment: - COLLECTOR_OTLP_ENABLED=true app: build: . environment: - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://jaeger:4318/v1/traces - NODE_ENV=development depends_on: - jaeger
# -weight: 500;">docker-compose.yml
version: '3.8'
services: jaeger: image: jaegertracing/all-in-one:1.54 ports: - "16686:16686" # Jaeger UI - "4317:4317" # OTLP gRPC - "4318:4318" # OTLP HTTP environment: - COLLECTOR_OTLP_ENABLED=true app: build: . environment: - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://jaeger:4318/v1/traces - NODE_ENV=development depends_on: - jaeger
# -weight: 500;">docker-compose.yml
version: '3.8'
services: jaeger: image: jaegertracing/all-in-one:1.54 ports: - "16686:16686" # Jaeger UI - "4317:4317" # OTLP gRPC - "4318:4318" # OTLP HTTP environment: - COLLECTOR_OTLP_ENABLED=true app: build: . environment: - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://jaeger:4318/v1/traces - NODE_ENV=development depends_on: - jaeger
-weight: 500;">docker-compose up -d
# App runs on :3000, Jaeger UI at http://localhost:16686
-weight: 500;">docker-compose up -d
# App runs on :3000, Jaeger UI at http://localhost:16686
-weight: 500;">docker-compose up -d
# App runs on :3000, Jaeger UI at http://localhost:16686
# otel-collector-config.yaml
receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 http: endpoint: 0.0.0.0:4318 processors: batch: timeout: 1s send_batch_size: 1024 memory_limiter: check_interval: 1s limit_mib: 512 exporters: otlp/tempo: endpoint: tempo:4317 tls: insecure: true prometheusremotewrite: endpoint: http://prometheus:9090/api/v1/write logging: loglevel: warn -weight: 500;">service: pipelines: traces: receivers: [otlp] processors: [memory_limiter, batch] exporters: [otlp/tempo] metrics: receivers: [otlp] processors: [memory_limiter, batch] exporters: [prometheusremotewrite]
# otel-collector-config.yaml
receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 http: endpoint: 0.0.0.0:4318 processors: batch: timeout: 1s send_batch_size: 1024 memory_limiter: check_interval: 1s limit_mib: 512 exporters: otlp/tempo: endpoint: tempo:4317 tls: insecure: true prometheusremotewrite: endpoint: http://prometheus:9090/api/v1/write logging: loglevel: warn -weight: 500;">service: pipelines: traces: receivers: [otlp] processors: [memory_limiter, batch] exporters: [otlp/tempo] metrics: receivers: [otlp] processors: [memory_limiter, batch] exporters: [prometheusremotewrite]
# otel-collector-config.yaml
receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 http: endpoint: 0.0.0.0:4318 processors: batch: timeout: 1s send_batch_size: 1024 memory_limiter: check_interval: 1s limit_mib: 512 exporters: otlp/tempo: endpoint: tempo:4317 tls: insecure: true prometheusremotewrite: endpoint: http://prometheus:9090/api/v1/write logging: loglevel: warn -weight: 500;">service: pipelines: traces: receivers: [otlp] processors: [memory_limiter, batch] exporters: [otlp/tempo] metrics: receivers: [otlp] processors: [memory_limiter, batch] exporters: [prometheusremotewrite]
OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
const { TraceIdRatioBasedSampler, ParentBasedSampler } = require('@opentelemetry/sdk-trace-base'); // Sample 10% of traces, but always respect parent sampling decision
const sampler = new ParentBasedSampler({ root: new TraceIdRatioBasedSampler(0.1), // 10% sampling rate
}); const sdk = new NodeSDK({ sampler, // ...rest of config
});
const { TraceIdRatioBasedSampler, ParentBasedSampler } = require('@opentelemetry/sdk-trace-base'); // Sample 10% of traces, but always respect parent sampling decision
const sampler = new ParentBasedSampler({ root: new TraceIdRatioBasedSampler(0.1), // 10% sampling rate
}); const sdk = new NodeSDK({ sampler, // ...rest of config
});
const { TraceIdRatioBasedSampler, ParentBasedSampler } = require('@opentelemetry/sdk-trace-base'); // Sample 10% of traces, but always respect parent sampling decision
const sampler = new ParentBasedSampler({ root: new TraceIdRatioBasedSampler(0.1), // 10% sampling rate
}); const sdk = new NodeSDK({ sampler, // ...rest of config
});
# otel-collector-config.yaml (tail sampling)
processors: tail_sampling: decision_wait: 10s num_traces: 50000 expected_new_traces_per_sec: 1000 policies: - name: errors-policy type: status_code status_code: { status_codes: [ERROR] } - name: slow-traces-policy type: latency latency: { threshold_ms: 2000 } - name: random-policy type: probabilistic probabilistic: { sampling_percentage: 5 }
# otel-collector-config.yaml (tail sampling)
processors: tail_sampling: decision_wait: 10s num_traces: 50000 expected_new_traces_per_sec: 1000 policies: - name: errors-policy type: status_code status_code: { status_codes: [ERROR] } - name: slow-traces-policy type: latency latency: { threshold_ms: 2000 } - name: random-policy type: probabilistic probabilistic: { sampling_percentage: 5 }
# otel-collector-config.yaml (tail sampling)
processors: tail_sampling: decision_wait: 10s num_traces: 50000 expected_new_traces_per_sec: 1000 policies: - name: errors-policy type: status_code status_code: { status_codes: [ERROR] } - name: slow-traces-policy type: latency latency: { threshold_ms: 2000 } - name: random-policy type: probabilistic probabilistic: { sampling_percentage: 5 }
# -weight: 500;">docker-compose.prod-local.yml
version: '3.8'
services: tempo: image: grafana/tempo:2.3.1 command: ["-config.file=/etc/tempo.yaml"] volumes: - ./tempo.yaml:/etc/tempo.yaml - tempo-data:/var/tempo ports: - "4317:4317" # OTLP gRPC - "3200:3200" # Tempo query API prometheus: image: prom/prometheus:v2.48.0 volumes: - ./prometheus.yml:/etc/prometheus/prometheus.yml ports: - "9090:9090" grafana: image: grafana/grafana:10.2.2 ports: - "3001:3000" environment: - GF_AUTH_ANONYMOUS_ENABLED=true - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin volumes: - ./grafana/provisioning:/etc/grafana/provisioning volumes: tempo-data:
# -weight: 500;">docker-compose.prod-local.yml
version: '3.8'
services: tempo: image: grafana/tempo:2.3.1 command: ["-config.file=/etc/tempo.yaml"] volumes: - ./tempo.yaml:/etc/tempo.yaml - tempo-data:/var/tempo ports: - "4317:4317" # OTLP gRPC - "3200:3200" # Tempo query API prometheus: image: prom/prometheus:v2.48.0 volumes: - ./prometheus.yml:/etc/prometheus/prometheus.yml ports: - "9090:9090" grafana: image: grafana/grafana:10.2.2 ports: - "3001:3000" environment: - GF_AUTH_ANONYMOUS_ENABLED=true - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin volumes: - ./grafana/provisioning:/etc/grafana/provisioning volumes: tempo-data:
# -weight: 500;">docker-compose.prod-local.yml
version: '3.8'
services: tempo: image: grafana/tempo:2.3.1 command: ["-config.file=/etc/tempo.yaml"] volumes: - ./tempo.yaml:/etc/tempo.yaml - tempo-data:/var/tempo ports: - "4317:4317" # OTLP gRPC - "3200:3200" # Tempo query API prometheus: image: prom/prometheus:v2.48.0 volumes: - ./prometheus.yml:/etc/prometheus/prometheus.yml ports: - "9090:9090" grafana: image: grafana/grafana:10.2.2 ports: - "3001:3000" environment: - GF_AUTH_ANONYMOUS_ENABLED=true - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin volumes: - ./grafana/provisioning:/etc/grafana/provisioning volumes: tempo-data:
# tempo.yaml
server: http_listen_port: 3200 distributor: receivers: otlp: protocols: grpc: http: storage: trace: backend: local local: path: /var/tempo/traces wal: path: /var/tempo/wal
# tempo.yaml
server: http_listen_port: 3200 distributor: receivers: otlp: protocols: grpc: http: storage: trace: backend: local local: path: /var/tempo/traces wal: path: /var/tempo/wal
# tempo.yaml
server: http_listen_port: 3200 distributor: receivers: otlp: protocols: grpc: http: storage: trace: backend: local local: path: /var/tempo/traces wal: path: /var/tempo/wal
# grafana/provisioning/datasources/datasources.yaml
apiVersion: 1
datasources: - name: Tempo type: tempo url: http://tempo:3200 jsonData: tracesToLogsV2: datasourceUid: loki serviceMap: datasourceUid: prometheus nodeGraph: enabled: true - name: Prometheus type: prometheus url: http://prometheus:9090 uid: prometheus
# grafana/provisioning/datasources/datasources.yaml
apiVersion: 1
datasources: - name: Tempo type: tempo url: http://tempo:3200 jsonData: tracesToLogsV2: datasourceUid: loki serviceMap: datasourceUid: prometheus nodeGraph: enabled: true - name: Prometheus type: prometheus url: http://prometheus:9090 uid: prometheus
# grafana/provisioning/datasources/datasources.yaml
apiVersion: 1
datasources: - name: Tempo type: tempo url: http://tempo:3200 jsonData: tracesToLogsV2: datasourceUid: loki serviceMap: datasourceUid: prometheus nodeGraph: enabled: true - name: Prometheus type: prometheus url: http://prometheus:9090 uid: prometheus
{ resource.-weight: 500;">service.name = "order--weight: 500;">service" && span.http.status_code >= 500 } | rate()
{ resource.-weight: 500;">service.name = "order--weight: 500;">service" && span.http.status_code >= 500 } | rate()
{ resource.-weight: 500;">service.name = "order--weight: 500;">service" && span.http.status_code >= 500 } | rate()
// tracing.js — production-ready
'use strict'; const { NodeSDK } = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { OTLPMetricExporter } = require('@opentelemetry/exporter-metrics-otlp-http');
const { PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions');
const { ParentBasedSampler, TraceIdRatioBasedSampler } = require('@opentelemetry/sdk-trace-base'); const isProd = process.env.NODE_ENV === 'production'; const resource = new Resource({ [SemanticResourceAttributes.SERVICE_NAME]: process.env.OTEL_SERVICE_NAME || 'my--weight: 500;">service', [SemanticResourceAttributes.SERVICE_VERSION]: process.env.npm_package_version || '0.0.0', [SemanticResourceAttributes.DEPLOYMENT_ENVIRONMENT]: process.env.NODE_ENV || 'development',
}); const sdk = new NodeSDK({ resource, sampler: new ParentBasedSampler({ root: new TraceIdRatioBasedSampler(isProd ? 0.1 : 1.0), // 100% in dev, 10% in prod }), traceExporter: new OTLPTraceExporter(), // uses OTEL_EXPORTER_OTLP_ENDPOINT env var metricReader: new PeriodicExportingMetricReader({ exporter: new OTLPMetricExporter(), exportIntervalMillis: isProd ? 15_000 : 5_000, }), instrumentations: [ getNodeAutoInstrumentations({ '@opentelemetry/instrumentation-fs': { enabled: false }, }), ],
}); sdk.-weight: 500;">start();
console.log(`OTel SDK started [${process.env.NODE_ENV}]`); process.on('SIGTERM', async () => { await sdk.shutdown(); process.exit(0);
});
// tracing.js — production-ready
'use strict'; const { NodeSDK } = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { OTLPMetricExporter } = require('@opentelemetry/exporter-metrics-otlp-http');
const { PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions');
const { ParentBasedSampler, TraceIdRatioBasedSampler } = require('@opentelemetry/sdk-trace-base'); const isProd = process.env.NODE_ENV === 'production'; const resource = new Resource({ [SemanticResourceAttributes.SERVICE_NAME]: process.env.OTEL_SERVICE_NAME || 'my--weight: 500;">service', [SemanticResourceAttributes.SERVICE_VERSION]: process.env.npm_package_version || '0.0.0', [SemanticResourceAttributes.DEPLOYMENT_ENVIRONMENT]: process.env.NODE_ENV || 'development',
}); const sdk = new NodeSDK({ resource, sampler: new ParentBasedSampler({ root: new TraceIdRatioBasedSampler(isProd ? 0.1 : 1.0), // 100% in dev, 10% in prod }), traceExporter: new OTLPTraceExporter(), // uses OTEL_EXPORTER_OTLP_ENDPOINT env var metricReader: new PeriodicExportingMetricReader({ exporter: new OTLPMetricExporter(), exportIntervalMillis: isProd ? 15_000 : 5_000, }), instrumentations: [ getNodeAutoInstrumentations({ '@opentelemetry/instrumentation-fs': { enabled: false }, }), ],
}); sdk.-weight: 500;">start();
console.log(`OTel SDK started [${process.env.NODE_ENV}]`); process.on('SIGTERM', async () => { await sdk.shutdown(); process.exit(0);
});
// tracing.js — production-ready
'use strict'; const { NodeSDK } = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { OTLPMetricExporter } = require('@opentelemetry/exporter-metrics-otlp-http');
const { PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions');
const { ParentBasedSampler, TraceIdRatioBasedSampler } = require('@opentelemetry/sdk-trace-base'); const isProd = process.env.NODE_ENV === 'production'; const resource = new Resource({ [SemanticResourceAttributes.SERVICE_NAME]: process.env.OTEL_SERVICE_NAME || 'my--weight: 500;">service', [SemanticResourceAttributes.SERVICE_VERSION]: process.env.npm_package_version || '0.0.0', [SemanticResourceAttributes.DEPLOYMENT_ENVIRONMENT]: process.env.NODE_ENV || 'development',
}); const sdk = new NodeSDK({ resource, sampler: new ParentBasedSampler({ root: new TraceIdRatioBasedSampler(isProd ? 0.1 : 1.0), // 100% in dev, 10% in prod }), traceExporter: new OTLPTraceExporter(), // uses OTEL_EXPORTER_OTLP_ENDPOINT env var metricReader: new PeriodicExportingMetricReader({ exporter: new OTLPMetricExporter(), exportIntervalMillis: isProd ? 15_000 : 5_000, }), instrumentations: [ getNodeAutoInstrumentations({ '@opentelemetry/instrumentation-fs': { enabled: false }, }), ],
}); sdk.-weight: 500;">start();
console.log(`OTel SDK started [${process.env.NODE_ENV}]`); process.on('SIGTERM', async () => { await sdk.shutdown(); process.exit(0);
});
OTEL_SERVICE_NAME=order--weight: 500;">service
OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
NODE_ENV=production
OTEL_SERVICE_NAME=order--weight: 500;">service
OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
NODE_ENV=production
OTEL_SERVICE_NAME=order--weight: 500;">service
OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
NODE_ENV=production - Always keeps traces with errors
- Always keeps traces slower than 2 seconds
- Randomly samples 5% of everything else - Service Map — an auto-generated DAG of all your services and their dependencies, with error rate and latency for each edge
- Trace Waterfall — click any span to see attributes, events, and linked logs
- RED Dashboard — Rate, Errors, Duration for each -weight: 500;">service endpoint, derived automatically from trace data
- Metrics Correlation — jump from a Prometheus alert to the traces that fired during the anomaly window