6 format: '%(asctime)s - %(name)s - %(message)s'
13 class: logging.StreamHandler
15 formatter: console_stdout
16 stream: ext://sys.stdout
18 class: logging.StreamHandler
20 formatter: console_stderr
21 stream: ext://sys.stderr
23 class: logging.handlers.RotatingFileHandler
26 filename: /tmp/metric.prom
35 handlers: [console_stdout, console_stderr]
44 documentation: Cycles processed by CPUs
52 name: 0x3C # INTEL_CORE_E_CPU_CLK_UNHALTED_THREAD_P
56 #include <linux/ptrace.h>
57 #include <uapi/linux/bpf_perf_event.h>
59 const int max_cpus = 256;
64 char name[TASK_COMM_LEN];
67 BPF_HASH(cpu_cycle, struct key_t);
69 static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
70 key->cpu = bpf_get_smp_processor_id();
71 key->pid = bpf_get_current_pid_tgid();
72 bpf_get_current_comm(&(key->name), sizeof(key->name));
75 int on_cpu_cycle(struct bpf_perf_event_data *ctx) {
76 struct key_t key = {};
79 cpu_cycle.increment(key, ctx->sample_period);
85 - name: cpu_instruction
86 documentation: Instructions retired by CPUs
94 name: 0xC0 # INTEL_CORE_E_INST_RETIRED_ANY_P
95 target: on_cpu_instruction
96 table: cpu_instruction
98 #include <linux/ptrace.h>
99 #include <uapi/linux/bpf_perf_event.h>
101 const int max_cpus = 256;
106 char name[TASK_COMM_LEN];
109 BPF_HASH(cpu_instruction, struct key_t);
111 static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
112 key->cpu = bpf_get_smp_processor_id();
113 key->pid = bpf_get_current_pid_tgid();
114 bpf_get_current_comm(&(key->name), sizeof(key->name));
117 int on_cpu_instruction(struct bpf_perf_event_data *ctx) {
118 struct key_t key = {};
121 cpu_instruction.increment(key, ctx->sample_period);
127 - name: cache_references
128 documentation: Cache references
135 - type: 0x0 # HARDWARE
136 name: 0x2 # PERF_COUNT_HW_CACHE_REFERENCES
137 target: on_cache_reference
138 table: cache_references
140 #include <linux/ptrace.h>
141 #include <uapi/linux/bpf_perf_event.h>
143 const int max_cpus = 256;
148 char name[TASK_COMM_LEN];
151 BPF_HASH(cache_references, struct key_t);
153 static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
154 key->cpu = bpf_get_smp_processor_id();
155 key->pid = bpf_get_current_pid_tgid();
156 bpf_get_current_comm(&(key->name), sizeof(key->name));
159 int on_cache_reference(struct bpf_perf_event_data *ctx) {
160 struct key_t key = {};
163 cache_references.increment(key, ctx->sample_period);
170 documentation: Cache misses
177 - type: 0x0 # HARDWARE
178 name: 0x3 # PERF_COUNT_HW_CACHE_MISSES
179 target: on_cache_miss
182 #include <linux/ptrace.h>
183 #include <uapi/linux/bpf_perf_event.h>
185 const int max_cpus = 256;
190 char name[TASK_COMM_LEN];
193 BPF_HASH(cache_miss, struct key_t);
195 static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
196 key->cpu = bpf_get_smp_processor_id();
197 key->pid = bpf_get_current_pid_tgid();
198 bpf_get_current_comm(&(key->name), sizeof(key->name));
201 int on_cache_miss(struct bpf_perf_event_data *ctx) {
202 struct key_t key = {};
205 cache_miss.increment(key, ctx->sample_period);
211 # - name: branch_instruction
212 # documentation: Instructions retired by branch
219 # - type: 0x0 # HARDWARE
220 # name: 0x4 # PERF_COUNT_HW_BRANCH_INSTRUCTION
221 # target: on_branch_instruction
222 # table: branch_instruction
224 # #include <linux/ptrace.h>
225 # #include <uapi/linux/bpf_perf_event.h>
227 # const int max_cpus = 256;
232 # char name[TASK_COMM_LEN];
235 # BPF_HASH(branch_instruction, struct key_t);
237 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
238 # key->cpu = bpf_get_smp_processor_id();
239 # key->pid = bpf_get_current_pid_tgid();
240 # bpf_get_current_comm(&(key->name), sizeof(key->name));
243 # int on_branch_instruction(struct bpf_perf_event_data *ctx) {
244 # struct key_t key = {};
247 # branch_instruction.increment(key, ctx->sample_period);
253 # - name: branch_misses (not supported by CPU)
254 # documentation: Last level miss operations by type
261 # - type: 0x0 # HARDWARE
262 # name: 0x5 # PERF_COUNT_HW_BRANCH_MISSES
263 # target: on_branch_misses
264 # table: branch_misses
266 # #include <linux/ptrace.h>
267 # #include <uapi/linux/bpf_perf_event.h>
269 # const int max_cpus = 256;
274 # char name[TASK_COMM_LEN];
277 # BPF_HASH(branch_misses, struct key_t);
279 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
280 # key->cpu = bpf_get_smp_processor_id();
281 # key->pid = bpf_get_current_pid_tgid();
282 # bpf_get_current_comm(&(key->name), sizeof(key->name));
285 # int on_branch_misses(struct bpf_perf_event_data *ctx) {
286 # struct key_t key = {};
289 # branch_misses.increment(key, ctx->sample_period);
296 # documentation: Count of bus cycles
303 # - type: 0x0 # HARDWARE
304 # name: 0x6 # PERF_COUNT_HW_BUS_CYCLES
305 # target: on_bus_cycles
308 # #include <linux/ptrace.h>
309 # #include <uapi/linux/bpf_perf_event.h>
311 # const int max_cpus = 256;
316 # char name[TASK_COMM_LEN];
319 # BPF_HASH(bus_cycles, struct key_t);
321 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
322 # key->cpu = bpf_get_smp_processor_id();
323 # key->pid = bpf_get_current_pid_tgid();
324 # bpf_get_current_comm(&(key->name), sizeof(key->name));
326 # int on_bus_cycles(struct bpf_perf_event_data *ctx) {
327 # struct key_t key = {};
330 # bus_cycles.increment(key, ctx->sample_period);
336 # - name: stalled_cycles_frontend (not supported by CPU)
337 # documentation: Frontend stalled cycles
344 # - type: 0x0 # HARDWARE
345 # name: 0x7 # PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
346 # target: on_stalled_cycles_frontend
347 # table: stalled_cycles_frontend
349 # #include <linux/ptrace.h>
350 # #include <uapi/linux/bpf_perf_event.h>
352 # const int max_cpus = 256;
357 # char name[TASK_COMM_LEN];
360 # BPF_HASH(stalled_cycles_frontend, struct key_t);
362 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
363 # key->cpu = bpf_get_smp_processor_id();
364 # key->pid = bpf_get_current_pid_tgid();
365 # bpf_get_current_comm(&(key->name), sizeof(key->name));
368 # int on_stalled_cycles_frontend(struct bpf_perf_event_data *ctx) {
369 # struct key_t key = {};
372 # stalled_cycles_frontend.increment(key, ctx->sample_period);
378 # - name: stalled_cycles_backend
379 # documentation: Backend stalled cycles
386 # - type: 0x0 # HARDWARE
387 # name: 0x8 # PERF_COUNT_HW_STALLED_CYCLES_BACKEND
388 # target: on_stalled_cycles_backend
389 # table: stalled_cycles_backend
391 # #include <linux/ptrace.h>
392 # #include <uapi/linux/bpf_perf_event.h>
394 # const int max_cpus = 256;
399 # char name[TASK_COMM_LEN];
402 # BPF_HASH(stalled_cycles_backend, struct key_t);
404 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
405 # key->cpu = bpf_get_smp_processor_id();
406 # key->pid = bpf_get_current_pid_tgid();
407 # bpf_get_current_comm(&(key->name), sizeof(key->name));
410 # int on_stalled_cycles_backend(struct bpf_perf_event_data *ctx) {
411 # struct key_t key = {};
414 # stalled_cycles_backend.increment(key, ctx->sample_period);
420 # - name: referenced_cpu_cycles
421 # documentation: Referenced CPU cycles
428 # - type: 0x0 # HARDWARE
429 # name: 0x9 # PERF_COUNT_HW_REF_CPU_CYCLES
430 # target: on_referenced_cpu_cycles
431 # table: referenced_cpu_cycles
433 # #include <linux/ptrace.h>
434 # #include <uapi/linux/bpf_perf_event.h>
436 # const int max_cpus = 256;
441 # char name[TASK_COMM_LEN];
444 # BPF_HASH(referenced_cpu_cycles, struct key_t);
446 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
447 # key->cpu = bpf_get_smp_processor_id();
448 # key->pid = bpf_get_current_pid_tgid();
449 # bpf_get_current_comm(&(key->name), sizeof(key->name));
452 # int on_referenced_cpu_cycles(struct bpf_perf_event_data *ctx) {
453 # struct key_t key = {};
456 # referenced_cpu_cycles.increment(key, ctx->sample_period);
462 # - name: sw_cpu_clock
463 # documentation: SW CPU clock
470 # - type: 0x1 # SOFTWARE
471 # name: 0x0 # PERF_COUNT_SW_CPU_CLOCK
472 # target: on_sw_cpu_clock
473 # table: sw_cpu_clock
475 # #include <linux/ptrace.h>
476 # #include <uapi/linux/bpf_perf_event.h>
478 # const int max_cpus = 256;
483 # char name[TASK_COMM_LEN];
486 # BPF_HASH(sw_cpu_clock, struct key_t);
488 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
489 # key->cpu = bpf_get_smp_processor_id();
490 # key->pid = bpf_get_current_pid_tgid();
491 # bpf_get_current_comm(&(key->name), sizeof(key->name));
494 # int on_sw_cpu_clock(struct bpf_perf_event_data *ctx) {
495 # struct key_t key = {};
498 # sw_cpu_clock.increment(key, ctx->sample_period);
504 # - name: sw_task_clock
505 # documentation: SW task clock
512 # - type: 0x1 # SOFTWARE
513 # name: 0x1 # PERF_COUNT_SW_TASK_CLOCK
514 # target: on_sw_task_clock
515 # table: sw_task_clock
517 # #include <linux/ptrace.h>
518 # #include <uapi/linux/bpf_perf_event.h>
520 # const int max_cpus = 256;
525 # char name[TASK_COMM_LEN];
528 # BPF_HASH(sw_task_clock, struct key_t);
530 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
531 # key->cpu = bpf_get_smp_processor_id();
532 # key->pid = bpf_get_current_pid_tgid();
533 # bpf_get_current_comm(&(key->name), sizeof(key->name));
536 # int on_sw_task_clock(struct bpf_perf_event_data *ctx) {
537 # struct key_t key = {};
540 # sw_task_clock.increment(key, ctx->sample_period);
546 # - name: sw_page_faults
547 # documentation: SW page faults
554 # - type: 0x1 # SOFTWARE
555 # name: 0x2 # PERF_COUNT_SW_PAGE_FAULTS
556 # target: on_sw_page_faults
557 # table: sw_page_faults
559 # #include <linux/ptrace.h>
560 # #include <uapi/linux/bpf_perf_event.h>
562 # const int max_cpus = 256;
567 # char name[TASK_COMM_LEN];
570 # BPF_HASH(sw_page_faults, struct key_t);
572 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
573 # key->cpu = bpf_get_smp_processor_id();
574 # key->pid = bpf_get_current_pid_tgid();
575 # bpf_get_current_comm(&(key->name), sizeof(key->name));
578 # int on_sw_page_faults(struct bpf_perf_event_data *ctx) {
579 # struct key_t key = {};
582 # sw_page_faults.increment(key, ctx->sample_period);
588 - name: sw_context_switches
589 documentation: SW context switches
596 - type: 0x1 # SOFTWARE
597 name: 0x3 # PERF_COUNT_SW_CONTEXT_SWITCHES
598 target: on_sw_context_switches
599 table: sw_context_switches
601 #include <linux/ptrace.h>
602 #include <uapi/linux/bpf_perf_event.h>
604 const int max_cpus = 256;
609 char name[TASK_COMM_LEN];
612 BPF_HASH(sw_context_switches, struct key_t);
614 static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
615 key->cpu = bpf_get_smp_processor_id();
616 key->pid = bpf_get_current_pid_tgid();
617 bpf_get_current_comm(&(key->name), sizeof(key->name));
620 int on_sw_context_switches(struct bpf_perf_event_data *ctx) {
621 struct key_t key = {};
624 sw_context_switches.increment(key, ctx->sample_period);
630 # - name: sw_cpu_migrations
631 # documentation: SW cpu migrations
638 # - type: 0x1 # SOFTWARE
639 # name: 0x4 # PERF_COUNT_SW_CPU_MIGRATIONS
640 # target: on_sw_cpu_migrations
641 # table: sw_cpu_migrations
643 # #include <linux/ptrace.h>
644 # #include <uapi/linux/bpf_perf_event.h>
646 # const int max_cpus = 256;
651 # char name[TASK_COMM_LEN];
654 # BPF_HASH(sw_cpu_migrations, struct key_t);
656 # static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
657 # key->cpu = bpf_get_smp_processor_id();
658 # key->pid = bpf_get_current_pid_tgid();
659 # bpf_get_current_comm(&(key->name), sizeof(key->name));
662 # int on_sw_cpu_migrations(struct bpf_perf_event_data *ctx) {
663 # struct key_t key = {};
666 # sw_cpu_migrations.increment(key, ctx->sample_period);
672 - name: sw_page_faults_min
673 documentation: SW page faults minor
680 - type: 0x1 # SOFTWARE
681 name: 0x5 # PERF_COUNT_SW_PAGE_FAULTS_MIN
682 target: on_sw_page_faults_min
683 table: sw_page_faults_min
685 #include <linux/ptrace.h>
686 #include <uapi/linux/bpf_perf_event.h>
688 const int max_cpus = 256;
693 char name[TASK_COMM_LEN];
696 BPF_HASH(sw_page_faults_min, struct key_t);
698 static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
699 key->cpu = bpf_get_smp_processor_id();
700 key->pid = bpf_get_current_pid_tgid();
701 bpf_get_current_comm(&(key->name), sizeof(key->name));
704 int on_sw_page_faults_min(struct bpf_perf_event_data *ctx) {
705 struct key_t key = {};
708 sw_page_faults_min.increment(key, ctx->sample_period);
714 - name: sw_page_faults_maj
715 documentation: SW page faults major
722 - type: 0x1 # SOFTWARE
723 name: 0x6 # PERF_COUNT_SW_PAGE_FAULTS_MAJ
724 target: on_sw_page_faults_maj
725 table: sw_page_faults_maj
727 #include <linux/ptrace.h>
728 #include <uapi/linux/bpf_perf_event.h>
730 const int max_cpus = 256;
735 char name[TASK_COMM_LEN];
738 BPF_HASH(sw_page_faults_maj, struct key_t);
740 static inline __attribute__((always_inline)) void get_key(struct key_t* key) {
741 key->cpu = bpf_get_smp_processor_id();
742 key->pid = bpf_get_current_pid_tgid();
743 bpf_get_current_comm(&(key->name), sizeof(key->name));
746 int on_sw_page_faults_maj(struct bpf_perf_event_data *ctx) {
747 struct key_t key = {};
750 sw_page_faults_maj.increment(key, ctx->sample_period);