┌────────────────────── Linux Kernel (perf events + eBPF) ──────────────────────┐
│ ┌──────────────────────────────────────────────────────────────────────┐ │
│ │ opentelemetry-ebpf-profiler (CO-RE eBPF, perf-format stack trace) │ │
│ │ C/C++ · Go · Rust · Python · Java · NodeJS · .NET · PHP · Ruby │ │
│ │ Automatic Go symbolization · new runtimes · low-overhead sampling │ │
│ └────────────────────────────────────┬─────────────────────────────────┘ │
└────────────────────────────────────────┼──────────────────────────────────────┘ │ profiling samples
┌────────────────────────────────────────▼──────────────────────────────────────┐
│ OpenTelemetry Collector v0.151.0 │
│ ┌──────────────────────────┐ ┌────────────────────────────────────────┐ │
│ │ profiler receiver │──▶│ k8sattributesprocessor │ │
│ │ (Elastic donation) │ │ container.id → namespace/pod/deployment│ │
│ └──────────────────────────┘ └─────────────────┬─────────────────────┘ │
│ │ │
│ ┌──────────────────────────────────▼───────────────────┐ │
│ │ batchprocessor · resourceprocessor · tail_sampling │ │
│ │ (same pipeline reused with traces and metrics) │ │
│ └──────────────────────────────────┬───────────────────┘ │
│ │ OTLP │
│ ▼ │
│ ┌──────────────────────────────────────────┐ │
│ │ otlp/profiles exporter → backend │ │
│ │ trace_id · span_id attributes preserved │ │
│ └──────────────────────────────────────────┘ │
└───────────────────────────────────────────────────────────────────────────────┘ │ ▼ ┌────────────────────────────────────────────────────────────┐ │ OpAMP Supervisor fleet (Instana GA · Bindplane) │ │ Remote config · health reporting · package management │ └────────────────────────────────────────────────────────────┘
┌────────────────────── Linux Kernel (perf events + eBPF) ──────────────────────┐
│ ┌──────────────────────────────────────────────────────────────────────┐ │
│ │ opentelemetry-ebpf-profiler (CO-RE eBPF, perf-format stack trace) │ │
│ │ C/C++ · Go · Rust · Python · Java · NodeJS · .NET · PHP · Ruby │ │
│ │ Automatic Go symbolization · new runtimes · low-overhead sampling │ │
│ └────────────────────────────────────┬─────────────────────────────────┘ │
└────────────────────────────────────────┼──────────────────────────────────────┘ │ profiling samples
┌────────────────────────────────────────▼──────────────────────────────────────┐
│ OpenTelemetry Collector v0.151.0 │
│ ┌──────────────────────────┐ ┌────────────────────────────────────────┐ │
│ │ profiler receiver │──▶│ k8sattributesprocessor │ │
│ │ (Elastic donation) │ │ container.id → namespace/pod/deployment│ │
│ └──────────────────────────┘ └─────────────────┬─────────────────────┘ │
│ │ │
│ ┌──────────────────────────────────▼───────────────────┐ │
│ │ batchprocessor · resourceprocessor · tail_sampling │ │
│ │ (same pipeline reused with traces and metrics) │ │
│ └──────────────────────────────────┬───────────────────┘ │
│ │ OTLP │
│ ▼ │
│ ┌──────────────────────────────────────────┐ │
│ │ otlp/profiles exporter → backend │ │
│ │ trace_id · span_id attributes preserved │ │
│ └──────────────────────────────────────────┘ │
└───────────────────────────────────────────────────────────────────────────────┘ │ ▼ ┌────────────────────────────────────────────────────────────┐ │ OpAMP Supervisor fleet (Instana GA · Bindplane) │ │ Remote config · health reporting · package management │ └────────────────────────────────────────────────────────────┘
┌────────────────────── Linux Kernel (perf events + eBPF) ──────────────────────┐
│ ┌──────────────────────────────────────────────────────────────────────┐ │
│ │ opentelemetry-ebpf-profiler (CO-RE eBPF, perf-format stack trace) │ │
│ │ C/C++ · Go · Rust · Python · Java · NodeJS · .NET · PHP · Ruby │ │
│ │ Automatic Go symbolization · new runtimes · low-overhead sampling │ │
│ └────────────────────────────────────┬─────────────────────────────────┘ │
└────────────────────────────────────────┼──────────────────────────────────────┘ │ profiling samples
┌────────────────────────────────────────▼──────────────────────────────────────┐
│ OpenTelemetry Collector v0.151.0 │
│ ┌──────────────────────────┐ ┌────────────────────────────────────────┐ │
│ │ profiler receiver │──▶│ k8sattributesprocessor │ │
│ │ (Elastic donation) │ │ container.id → namespace/pod/deployment│ │
│ └──────────────────────────┘ └─────────────────┬─────────────────────┘ │
│ │ │
│ ┌──────────────────────────────────▼───────────────────┐ │
│ │ batchprocessor · resourceprocessor · tail_sampling │ │
│ │ (same pipeline reused with traces and metrics) │ │
│ └──────────────────────────────────┬───────────────────┘ │
│ │ OTLP │
│ ▼ │
│ ┌──────────────────────────────────────────┐ │
│ │ otlp/profiles exporter → backend │ │
│ │ trace_id · span_id attributes preserved │ │
│ └──────────────────────────────────────────┘ │
└───────────────────────────────────────────────────────────────────────────────┘ │ ▼ ┌────────────────────────────────────────────────────────────┐ │ OpAMP Supervisor fleet (Instana GA · Bindplane) │ │ Remote config · health reporting · package management │ └────────────────────────────────────────────────────────────┘
# otel-collector-config.yaml — v0.151.0 recommended baseline
extensions: opamp: # ❶ Register supervisor (Instana/Bindplane/etc.) server: ws: endpoint: wss://opamp.example.com/v1/opamp capabilities: reports_effective_config: true accepts_remote_config: true reports_health: true receivers: otlp: # metrics, traces, logs protocols: grpc: { endpoint: 0.0.0.0:4317 } http: { endpoint: 0.0.0.0:4318 } profiler: # ❷ fourth signal (Alpha) sampling_period: 19ms # ~50Hz, Elastic recommended default include_kernel: false metadata: include_pod_labels: true processors: k8sattributes: # ❸ container.id → namespace/pod/deployment auth_type: serviceAccount extract: metadata: [k8s.namespace.name, k8s.pod.name, k8s.deployment.name, k8s.node.name] batch: send_batch_size: 8192 timeout: 5s exporters: otlp: endpoint: backend.example.com:4317 sending_queue: enabled: true # ❹ v0.151.0 — send_failed metric now carries error.type / error.permanent service: extensions: [opamp] telemetry: metrics: level: detailed # required to inspect send_failed attributes pipelines: profiles: # ❺ new signal pipeline receivers: [profiler] processors: [k8sattributes, batch] exporters: [otlp]
# otel-collector-config.yaml — v0.151.0 recommended baseline
extensions: opamp: # ❶ Register supervisor (Instana/Bindplane/etc.) server: ws: endpoint: wss://opamp.example.com/v1/opamp capabilities: reports_effective_config: true accepts_remote_config: true reports_health: true receivers: otlp: # metrics, traces, logs protocols: grpc: { endpoint: 0.0.0.0:4317 } http: { endpoint: 0.0.0.0:4318 } profiler: # ❷ fourth signal (Alpha) sampling_period: 19ms # ~50Hz, Elastic recommended default include_kernel: false metadata: include_pod_labels: true processors: k8sattributes: # ❸ container.id → namespace/pod/deployment auth_type: serviceAccount extract: metadata: [k8s.namespace.name, k8s.pod.name, k8s.deployment.name, k8s.node.name] batch: send_batch_size: 8192 timeout: 5s exporters: otlp: endpoint: backend.example.com:4317 sending_queue: enabled: true # ❹ v0.151.0 — send_failed metric now carries error.type / error.permanent service: extensions: [opamp] telemetry: metrics: level: detailed # required to inspect send_failed attributes pipelines: profiles: # ❺ new signal pipeline receivers: [profiler] processors: [k8sattributes, batch] exporters: [otlp]
# otel-collector-config.yaml — v0.151.0 recommended baseline
extensions: opamp: # ❶ Register supervisor (Instana/Bindplane/etc.) server: ws: endpoint: wss://opamp.example.com/v1/opamp capabilities: reports_effective_config: true accepts_remote_config: true reports_health: true receivers: otlp: # metrics, traces, logs protocols: grpc: { endpoint: 0.0.0.0:4317 } http: { endpoint: 0.0.0.0:4318 } profiler: # ❷ fourth signal (Alpha) sampling_period: 19ms # ~50Hz, Elastic recommended default include_kernel: false metadata: include_pod_labels: true processors: k8sattributes: # ❸ container.id → namespace/pod/deployment auth_type: serviceAccount extract: metadata: [k8s.namespace.name, k8s.pod.name, k8s.deployment.name, k8s.node.name] batch: send_batch_size: 8192 timeout: 5s exporters: otlp: endpoint: backend.example.com:4317 sending_queue: enabled: true # ❹ v0.151.0 — send_failed metric now carries error.type / error.permanent service: extensions: [opamp] telemetry: metrics: level: detailed # required to inspect send_failed attributes pipelines: profiles: # ❺ new signal pipeline receivers: [profiler] processors: [k8sattributes, batch] exporters: [otlp]
[Old workflow]
APM shows a slow span → log into Pyroscope separately → align times manually →
match function names manually → form a hypothesis → ~30–60 minutes on average [After Profiles Alpha]
APM shows a slow span → click → call stack appears in the same view →
container.id auto-attaches K8s context → ~2–5 minutes on average
[Old workflow]
APM shows a slow span → log into Pyroscope separately → align times manually →
match function names manually → form a hypothesis → ~30–60 minutes on average [After Profiles Alpha]
APM shows a slow span → click → call stack appears in the same view →
container.id auto-attaches K8s context → ~2–5 minutes on average
[Old workflow]
APM shows a slow span → log into Pyroscope separately → align times manually →
match function names manually → form a hypothesis → ~30–60 minutes on average [After Profiles Alpha]
APM shows a slow span → click → call stack appears in the same view →
container.id auto-attaches K8s context → ~2–5 minutes on average