From beaf4072f5f8f9ce2086a19aa530438631a132a1 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 22 Nov 2023 11:23:19 +0000 Subject: [PATCH] tetragon: Remove generic_kprobe_process_event* programs There's no need to keep separate kprobe programs for each generic_kprobe_process_event4X call, we can just tail call into single program with increased index. The number of loaded programs gets reduced, so we need to fix TestLoad* tests as well. I had to add several &= masks to make it verifiable under 5.4, can't find better solution yet. Signed-off-by: Jiri Olsa --- bpf/process/bpf_generic_kprobe.c | 46 +++++------------------- bpf/process/bpf_generic_tracepoint.c | 48 +++++--------------------- bpf/process/bpf_generic_uprobe.c | 43 +++++------------------ bpf/process/generic_calls.h | 21 +++++++---- bpf/process/types/basic.h | 13 +++---- pkg/sensors/tracing/kprobe_test.go | 45 +++++++++++------------- pkg/sensors/tracing/tracepoint_test.go | 28 +++++++-------- pkg/sensors/tracing/uprobe_test.go | 27 +++++++-------- 8 files changed, 91 insertions(+), 180 deletions(-) diff --git a/bpf/process/bpf_generic_kprobe.c b/bpf/process/bpf_generic_kprobe.c index 0793f1b0a46..3bd1df2d66d 100644 --- a/bpf/process/bpf_generic_kprobe.c +++ b/bpf/process/bpf_generic_kprobe.c @@ -113,7 +113,7 @@ generic_kprobe_start_process_filter(void *ctx) #endif /* Tail call into filters. */ - tail_call(ctx, &kprobe_calls, 5); + tail_call(ctx, &kprobe_calls, 2); return 0; } @@ -155,7 +155,7 @@ generic_kprobe_event(struct pt_regs *ctx) } __attribute__((section("kprobe/0"), used)) int -generic_kprobe_process_event0(void *ctx) +generic_kprobe_setup_event(void *ctx) { return generic_process_event_and_setup( ctx, (struct bpf_map_def *)&process_call_heap, @@ -165,9 +165,9 @@ generic_kprobe_process_event0(void *ctx) } __attribute__((section("kprobe/1"), used)) int -generic_kprobe_process_event1(void *ctx) +generic_kprobe_process_event(void *ctx) { - return generic_process_event(ctx, 1, + return generic_process_event(ctx, (struct bpf_map_def *)&process_call_heap, (struct bpf_map_def *)&kprobe_calls, (struct bpf_map_def *)&config_map, @@ -175,36 +175,6 @@ generic_kprobe_process_event1(void *ctx) } __attribute__((section("kprobe/2"), used)) int -generic_kprobe_process_event2(void *ctx) -{ - return generic_process_event(ctx, 2, - (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&kprobe_calls, - (struct bpf_map_def *)&config_map, - (struct bpf_map_def *)data_heap_ptr); -} - -__attribute__((section("kprobe/3"), used)) int -generic_kprobe_process_event3(void *ctx) -{ - return generic_process_event(ctx, 3, - (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&kprobe_calls, - (struct bpf_map_def *)&config_map, - (struct bpf_map_def *)data_heap_ptr); -} - -__attribute__((section("kprobe/4"), used)) int -generic_kprobe_process_event4(void *ctx) -{ - return generic_process_event(ctx, 4, - (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&kprobe_calls, - (struct bpf_map_def *)&config_map, - (struct bpf_map_def *)data_heap_ptr); -} - -__attribute__((section("kprobe/5"), used)) int generic_kprobe_process_filter(void *ctx) { struct msg_generic_kprobe *msg; @@ -217,7 +187,7 @@ generic_kprobe_process_filter(void *ctx) ret = generic_process_filter(&msg->sel, &msg->current, &msg->ns, &msg->caps, &filter_map, msg->idx); if (ret == PFILTER_CONTINUE) - tail_call(ctx, &kprobe_calls, 5); + tail_call(ctx, &kprobe_calls, 2); else if (ret == PFILTER_ACCEPT) tail_call(ctx, &kprobe_calls, 0); /* If filter does not accept drop it. Ideally we would @@ -229,7 +199,7 @@ generic_kprobe_process_filter(void *ctx) // Filter tailcalls: kprobe/6...kprobe/10 // see also: MIN_FILTER_TAILCALL, MAX_FILTER_TAILCALL -__attribute__((section("kprobe/6"), used)) int +__attribute__((section("kprobe/3"), used)) int generic_kprobe_filter_arg(void *ctx) { return filter_read_arg(ctx, (struct bpf_map_def *)&process_call_heap, @@ -238,7 +208,7 @@ generic_kprobe_filter_arg(void *ctx) (struct bpf_map_def *)&config_map); } -__attribute__((section("kprobe/7"), used)) int +__attribute__((section("kprobe/4"), used)) int generic_kprobe_actions(void *ctx) { return generic_actions(ctx, (struct bpf_map_def *)&process_call_heap, @@ -247,7 +217,7 @@ generic_kprobe_actions(void *ctx) (struct bpf_map_def *)&override_tasks); } -__attribute__((section("kprobe/8"), used)) int +__attribute__((section("kprobe/5"), used)) int generic_kprobe_output(void *ctx) { return generic_output(ctx, (struct bpf_map_def *)&process_call_heap, MSG_OP_GENERIC_KPROBE); diff --git a/bpf/process/bpf_generic_tracepoint.c b/bpf/process/bpf_generic_tracepoint.c index 64a9dedea66..36a84959f06 100644 --- a/bpf/process/bpf_generic_tracepoint.c +++ b/bpf/process/bpf_generic_tracepoint.c @@ -196,51 +196,19 @@ generic_tracepoint_event(struct generic_tracepoint_event_arg *ctx) #ifdef __CAP_CHANGES_FILTER msg->sel.match_cap = 0; #endif - tail_call(ctx, &tp_calls, 5); + tail_call(ctx, &tp_calls, 2); return 0; } -__attribute__((section("tracepoint/0"), used)) int -generic_tracepoint_event0(void *ctx) -{ - return generic_process_event(ctx, 0, (struct bpf_map_def *)&tp_heap, - (struct bpf_map_def *)&tp_calls, - (struct bpf_map_def *)&config_map, 0); -} - __attribute__((section("tracepoint/1"), used)) int -generic_tracepoint_event1(void *ctx) +generic_tracepoint_process_event(void *ctx) { - return generic_process_event(ctx, 1, (struct bpf_map_def *)&tp_heap, + return generic_process_event(ctx, (struct bpf_map_def *)&tp_heap, (struct bpf_map_def *)&tp_calls, (struct bpf_map_def *)&config_map, 0); } __attribute__((section("tracepoint/2"), used)) int -generic_tracepoint_event2(void *ctx) -{ - return generic_process_event(ctx, 2, (struct bpf_map_def *)&tp_heap, - (struct bpf_map_def *)&tp_calls, - (struct bpf_map_def *)&config_map, 0); -} - -__attribute__((section("tracepoint/3"), used)) int -generic_tracepoint_event3(void *ctx) -{ - return generic_process_event(ctx, 3, (struct bpf_map_def *)&tp_heap, - (struct bpf_map_def *)&tp_calls, - (struct bpf_map_def *)&config_map, 0); -} - -__attribute__((section("tracepoint/4"), used)) int -generic_tracepoint_event4(void *ctx) -{ - return generic_process_event(ctx, 4, (struct bpf_map_def *)&tp_heap, - (struct bpf_map_def *)&tp_calls, - (struct bpf_map_def *)&config_map, 0); -} - -__attribute__((section("tracepoint/5"), used)) int generic_tracepoint_filter(void *ctx) { struct msg_generic_kprobe *msg; @@ -253,9 +221,9 @@ generic_tracepoint_filter(void *ctx) ret = generic_process_filter(&msg->sel, &msg->current, &msg->ns, &msg->caps, &filter_map, msg->idx); if (ret == PFILTER_CONTINUE) - tail_call(ctx, &tp_calls, 5); + tail_call(ctx, &tp_calls, 2); else if (ret == PFILTER_ACCEPT) - tail_call(ctx, &tp_calls, 0); + tail_call(ctx, &tp_calls, 1); /* If filter does not accept drop it. Ideally we would * log error codes for later review, TBD. */ @@ -265,7 +233,7 @@ generic_tracepoint_filter(void *ctx) // Filter tailcalls: tracepoint/6...tracepoint/10 // see also: MIN_FILTER_TAILCALL, MAX_FILTER_TAILCALL -__attribute__((section("tracepoint/6"), used)) int +__attribute__((section("tracepoint/3"), used)) int generic_tracepoint_arg(void *ctx) { return filter_read_arg(ctx, (struct bpf_map_def *)&tp_heap, @@ -274,7 +242,7 @@ generic_tracepoint_arg(void *ctx) (struct bpf_map_def *)&config_map); } -__attribute__((section("tracepoint/7"), used)) int +__attribute__((section("tracepoint/4"), used)) int generic_tracepoint_actions(void *ctx) { return generic_actions(ctx, (struct bpf_map_def *)&tp_heap, @@ -283,7 +251,7 @@ generic_tracepoint_actions(void *ctx) (void *)0); } -__attribute__((section("tracepoint/8"), used)) int +__attribute__((section("tracepoint/5"), used)) int generic_tracepoint_output(void *ctx) { return generic_output(ctx, (struct bpf_map_def *)&tp_heap, MSG_OP_GENERIC_TRACEPOINT); diff --git a/bpf/process/bpf_generic_uprobe.c b/bpf/process/bpf_generic_uprobe.c index 80ad2110424..5f414c9e760 100644 --- a/bpf/process/bpf_generic_uprobe.c +++ b/bpf/process/bpf_generic_uprobe.c @@ -89,7 +89,7 @@ generic_uprobe_start_process_filter(void *ctx) if (!generic_process_filter_binary(config)) return 0; /* Tail call into filters. */ - tail_call(ctx, &uprobe_calls, 5); + tail_call(ctx, &uprobe_calls, 2); return 0; } @@ -100,7 +100,7 @@ generic_uprobe_event(struct pt_regs *ctx) } __attribute__((section("uprobe/0"), used)) int -generic_uprobe_process_event0(void *ctx) +generic_uprobe_setup_event(void *ctx) { return generic_process_event_and_setup( ctx, (struct bpf_map_def *)&process_call_heap, @@ -109,42 +109,15 @@ generic_uprobe_process_event0(void *ctx) } __attribute__((section("uprobe/1"), used)) int -generic_uprobe_process_event1(void *ctx) +generic_uprobe_process_event(void *ctx) { - return generic_process_event(ctx, 1, + return generic_process_event(ctx, (struct bpf_map_def *)&process_call_heap, (struct bpf_map_def *)&uprobe_calls, (struct bpf_map_def *)&config_map, 0); } __attribute__((section("uprobe/2"), used)) int -generic_uprobe_process_event2(void *ctx) -{ - return generic_process_event(ctx, 2, - (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&uprobe_calls, - (struct bpf_map_def *)&config_map, 0); -} - -__attribute__((section("uprobe/3"), used)) int -generic_uprobe_process_event3(void *ctx) -{ - return generic_process_event(ctx, 3, - (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&uprobe_calls, - (struct bpf_map_def *)&config_map, 0); -} - -__attribute__((section("uprobe/4"), used)) int -generic_uprobe_process_event4(void *ctx) -{ - return generic_process_event(ctx, 4, - (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&uprobe_calls, - (struct bpf_map_def *)&config_map, 0); -} - -__attribute__((section("uprobe/5"), used)) int generic_uprobe_process_filter(void *ctx) { struct msg_generic_kprobe *msg; @@ -157,7 +130,7 @@ generic_uprobe_process_filter(void *ctx) ret = generic_process_filter(&msg->sel, &msg->current, &msg->ns, &msg->caps, &filter_map, msg->idx); if (ret == PFILTER_CONTINUE) - tail_call(ctx, &uprobe_calls, 5); + tail_call(ctx, &uprobe_calls, 2); else if (ret == PFILTER_ACCEPT) tail_call(ctx, &uprobe_calls, 0); /* If filter does not accept drop it. Ideally we would @@ -166,7 +139,7 @@ generic_uprobe_process_filter(void *ctx) return PFILTER_REJECT; } -__attribute__((section("uprobe/6"), used)) int +__attribute__((section("uprobe/3"), used)) int generic_uprobe_filter_arg(void *ctx) { return filter_read_arg(ctx, (struct bpf_map_def *)&process_call_heap, @@ -175,7 +148,7 @@ generic_uprobe_filter_arg(void *ctx) (struct bpf_map_def *)&config_map); } -__attribute__((section("uprobe/7"), used)) int +__attribute__((section("uprobe/4"), used)) int generic_uprobe_actions(void *ctx) { return generic_actions(ctx, (struct bpf_map_def *)&process_call_heap, @@ -184,7 +157,7 @@ generic_uprobe_actions(void *ctx) (void *)0); } -__attribute__((section("uprobe/8"), used)) int +__attribute__((section("uprobe/5"), used)) int generic_uprobe_output(void *ctx) { return generic_output(ctx, (struct bpf_map_def *)&process_call_heap, MSG_OP_GENERIC_UPROBE); diff --git a/bpf/process/generic_calls.h b/bpf/process/generic_calls.h index aad8c6ae7b5..20c80507546 100644 --- a/bpf/process/generic_calls.h +++ b/bpf/process/generic_calls.h @@ -5,18 +5,19 @@ #define __GENERIC_CALLS_H__ #include "bpf_tracing.h" +#include "types/basic.h" #define MAX_TOTAL 9000 static inline __attribute__((always_inline)) int -generic_process_event(void *ctx, int index, struct bpf_map_def *heap_map, +generic_process_event(void *ctx, struct bpf_map_def *heap_map, struct bpf_map_def *tailcals, struct bpf_map_def *config_map, struct bpf_map_def *data_heap) { struct msg_generic_kprobe *e; struct event_config *config; + int index, zero = 0; unsigned long a; - int zero = 0; long ty, total; e = map_lookup_elem(heap_map, &zero); @@ -27,6 +28,11 @@ generic_process_event(void *ctx, int index, struct bpf_map_def *heap_map, if (!config) return 0; + index = e->filter_tailcall_index; + asm volatile("%[index] &= %0 ;\n" + : [index] "+r"(index) + : "i"(MAX_SELECTORS_MASK)); + a = (&e->a0)[index]; total = e->common.size; @@ -53,11 +59,14 @@ generic_process_event(void *ctx, int index, struct bpf_map_def *heap_map, } e->common.size = total; /* Continue to process other arguments. */ - if (index < 4) - tail_call(ctx, tailcals, index + 1); + if (index < 4) { + e->filter_tailcall_index = index + 1; + tail_call(ctx, tailcals, 1); + } /* Last argument, go send.. */ - tail_call(ctx, tailcals, 6); + e->filter_tailcall_index = 0; + tail_call(ctx, tailcals, 3); return 0; } @@ -146,7 +155,7 @@ generic_process_event_and_setup(struct pt_regs *ctx, generic_process_init(e, MSG_OP_GENERIC_UPROBE, config); #endif - return generic_process_event(ctx, 0, heap_map, tailcals, config_map, data_heap); + return generic_process_event(ctx, heap_map, tailcals, config_map, data_heap); } #endif /* __GENERIC_CALLS_H__ */ diff --git a/bpf/process/types/basic.h b/bpf/process/types/basic.h index f878e4f7edf..66b5566f333 100644 --- a/bpf/process/types/basic.h +++ b/bpf/process/types/basic.h @@ -215,7 +215,7 @@ static inline __attribute__((always_inline)) int return_error(int *s, int err) } static inline __attribute__((always_inline)) char * -args_off(struct msg_generic_kprobe *e, long off) +args_off(struct msg_generic_kprobe *e, unsigned long off) { asm volatile("%[off] &= 0x3fff;\n" ::[off] "+r"(off) :); @@ -2182,7 +2182,7 @@ filter_read_arg(void *ctx, struct bpf_map_def *heap, index++; if (index <= MAX_SELECTORS && e->sel.active[index & MAX_SELECTORS_MASK]) { e->filter_tailcall_index = index; - tail_call(ctx, tailcalls, MIN_FILTER_TAILCALL); + tail_call(ctx, tailcalls, 3); } // reject if we did not attempt to tailcall, or if tailcall failed. return filter_args_reject(e->func_id); @@ -2192,10 +2192,10 @@ filter_read_arg(void *ctx, struct bpf_map_def *heap, // otherwise pass==1 indicates using default action. if (pass > 1) { e->pass = pass; - tail_call(ctx, tailcalls, 7); + tail_call(ctx, tailcalls, 4); } - tail_call(ctx, tailcalls, 8); + tail_call(ctx, tailcalls, 5); return 1; } @@ -2237,7 +2237,7 @@ generic_actions(void *ctx, struct bpf_map_def *heap, postit = do_actions(ctx, e, actions, override_tasks); if (postit) - tail_call(ctx, tailcalls, 8); + tail_call(ctx, tailcalls, 5); return 1; } @@ -2318,10 +2318,11 @@ read_call_arg(void *ctx, struct msg_generic_kprobe *e, int index, int type, if (orig_off >= 16383 - min_size) { return 0; } + orig_off &= 16383; args = args_off(e, orig_off); /* Cache args offset for filter use later */ - e->argsoff[index] = orig_off; + e->argsoff[index & MAX_SELECTORS_MASK] = orig_off; switch (type) { case iov_iter_type: diff --git a/pkg/sensors/tracing/kprobe_test.go b/pkg/sensors/tracing/kprobe_test.go index bca5fec54fb..ecb6a2cb981 100644 --- a/pkg/sensors/tracing/kprobe_test.go +++ b/pkg/sensors/tracing/kprobe_test.go @@ -3902,42 +3902,39 @@ func TestLoadKprobeSensor(t *testing.T) { var sensorProgs = []tus.SensorProg{ // kprobe 0: tus.SensorProg{Name: "generic_kprobe_event", Type: ebpf.Kprobe}, - 1: tus.SensorProg{Name: "generic_kprobe_process_event0", Type: ebpf.Kprobe}, - 2: tus.SensorProg{Name: "generic_kprobe_process_event1", Type: ebpf.Kprobe}, - 3: tus.SensorProg{Name: "generic_kprobe_process_event2", Type: ebpf.Kprobe}, - 4: tus.SensorProg{Name: "generic_kprobe_process_event3", Type: ebpf.Kprobe}, - 5: tus.SensorProg{Name: "generic_kprobe_process_event4", Type: ebpf.Kprobe}, - 6: tus.SensorProg{Name: "generic_kprobe_filter_arg", Type: ebpf.Kprobe}, - 7: tus.SensorProg{Name: "generic_kprobe_process_filter", Type: ebpf.Kprobe}, - 8: tus.SensorProg{Name: "generic_kprobe_actions", Type: ebpf.Kprobe}, - 9: tus.SensorProg{Name: "generic_kprobe_output", Type: ebpf.Kprobe}, + 1: tus.SensorProg{Name: "generic_kprobe_setup_event", Type: ebpf.Kprobe}, + 2: tus.SensorProg{Name: "generic_kprobe_process_event", Type: ebpf.Kprobe}, + 3: tus.SensorProg{Name: "generic_kprobe_filter_arg", Type: ebpf.Kprobe}, + 4: tus.SensorProg{Name: "generic_kprobe_process_filter", Type: ebpf.Kprobe}, + 5: tus.SensorProg{Name: "generic_kprobe_actions", Type: ebpf.Kprobe}, + 6: tus.SensorProg{Name: "generic_kprobe_output", Type: ebpf.Kprobe}, // retkprobe - 10: tus.SensorProg{Name: "generic_retkprobe_event", Type: ebpf.Kprobe}, - 11: tus.SensorProg{Name: "generic_retkprobe_output", Type: ebpf.Kprobe}, + 7: tus.SensorProg{Name: "generic_retkprobe_event", Type: ebpf.Kprobe}, + 8: tus.SensorProg{Name: "generic_retkprobe_output", Type: ebpf.Kprobe}, } var sensorMaps = []tus.SensorMap{ // all kprobe programs - tus.SensorMap{Name: "process_call_heap", Progs: []uint{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}}, + tus.SensorMap{Name: "process_call_heap", Progs: []uint{0, 1, 2, 3, 4, 5, 6, 7, 8}}, // all but generic_kprobe_output - tus.SensorMap{Name: "kprobe_calls", Progs: []uint{0, 1, 2, 3, 4, 5, 6, 7, 8}}, + tus.SensorMap{Name: "kprobe_calls", Progs: []uint{0, 1, 2, 3, 4, 5}}, // generic_retkprobe_event - tus.SensorMap{Name: "retkprobe_calls", Progs: []uint{10}}, + tus.SensorMap{Name: "retkprobe_calls", Progs: []uint{7}}, - // generic_kprobe_process_filter,generic_kprobe_filter_arg*, + // generic_kprobe_process_filter,generic_kprobe_filter_arg, // generic_kprobe_actions,generic_kprobe_output - tus.SensorMap{Name: "filter_map", Progs: []uint{6, 7, 8}}, + tus.SensorMap{Name: "filter_map", Progs: []uint{3, 4, 5}}, // generic_kprobe_actions - tus.SensorMap{Name: "override_tasks", Progs: []uint{8}}, + tus.SensorMap{Name: "override_tasks", Progs: []uint{5}}, // all kprobe but generic_kprobe_process_filter,generic_retkprobe_event - tus.SensorMap{Name: "config_map", Progs: []uint{0, 1, 2, 3, 4, 5, 6}}, + tus.SensorMap{Name: "config_map", Progs: []uint{0, 1, 2, 3}}, // generic_kprobe_process_event*,generic_kprobe_actions,retkprobe - tus.SensorMap{Name: "fdinstall_map", Progs: []uint{1, 2, 3, 4, 5, 8, 10}}, + tus.SensorMap{Name: "fdinstall_map", Progs: []uint{1, 2, 5, 7}}, // generic_kprobe_event tus.SensorMap{Name: "tg_conf_map", Progs: []uint{0}}, @@ -3945,19 +3942,19 @@ func TestLoadKprobeSensor(t *testing.T) { if kernels.EnableLargeProgs() { // shared with base sensor - sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 6, 7, 8, 9, 10}}) + sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 3, 4, 5, 6, 7}}) // generic_kprobe_process_event*,generic_kprobe_output,generic_retkprobe_output - sensorMaps = append(sensorMaps, tus.SensorMap{Name: "tcpmon_map", Progs: []uint{1, 2, 3, 4, 5, 9, 11}}) + sensorMaps = append(sensorMaps, tus.SensorMap{Name: "tcpmon_map", Progs: []uint{1, 2, 6, 8}}) // generic_kprobe_process_event*,generic_kprobe_actions,retkprobe - sensorMaps = append(sensorMaps, tus.SensorMap{Name: "socktrack_map", Progs: []uint{1, 2, 3, 4, 5, 8, 10}}) + sensorMaps = append(sensorMaps, tus.SensorMap{Name: "socktrack_map", Progs: []uint{1, 2, 5, 7}}) } else { // shared with base sensor - sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 6, 7, 10}}) + sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 3, 4, 7}}) // generic_kprobe_output,generic_retkprobe_output - sensorMaps = append(sensorMaps, tus.SensorMap{Name: "tcpmon_map", Progs: []uint{9, 11}}) + sensorMaps = append(sensorMaps, tus.SensorMap{Name: "tcpmon_map", Progs: []uint{6, 8}}) } readHook := ` diff --git a/pkg/sensors/tracing/tracepoint_test.go b/pkg/sensors/tracing/tracepoint_test.go index 8080f274a02..d7631a78a3d 100644 --- a/pkg/sensors/tracing/tracepoint_test.go +++ b/pkg/sensors/tracing/tracepoint_test.go @@ -420,34 +420,30 @@ func TestLoadTracepointSensor(t *testing.T) { var sensorProgs = []tus.SensorProg{ 0: tus.SensorProg{Name: "generic_tracepoint_event", Type: ebpf.TracePoint}, 1: tus.SensorProg{Name: "generic_tracepoint_arg", Type: ebpf.TracePoint}, - 2: tus.SensorProg{Name: "generic_tracepoint_event0", Type: ebpf.TracePoint}, - 3: tus.SensorProg{Name: "generic_tracepoint_event1", Type: ebpf.TracePoint}, - 4: tus.SensorProg{Name: "generic_tracepoint_event2", Type: ebpf.TracePoint}, - 5: tus.SensorProg{Name: "generic_tracepoint_event3", Type: ebpf.TracePoint}, - 6: tus.SensorProg{Name: "generic_tracepoint_event4", Type: ebpf.TracePoint}, - 7: tus.SensorProg{Name: "generic_tracepoint_filter", Type: ebpf.TracePoint}, - 8: tus.SensorProg{Name: "generic_tracepoint_actions", Type: ebpf.TracePoint}, - 9: tus.SensorProg{Name: "generic_tracepoint_output", Type: ebpf.TracePoint}, + 2: tus.SensorProg{Name: "generic_tracepoint_process_event", Type: ebpf.TracePoint}, + 3: tus.SensorProg{Name: "generic_tracepoint_filter", Type: ebpf.TracePoint}, + 4: tus.SensorProg{Name: "generic_tracepoint_actions", Type: ebpf.TracePoint}, + 5: tus.SensorProg{Name: "generic_tracepoint_output", Type: ebpf.TracePoint}, } var sensorMaps = []tus.SensorMap{ // all programs - tus.SensorMap{Name: "tp_heap", Progs: []uint{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}, + tus.SensorMap{Name: "tp_heap", Progs: []uint{0, 1, 2, 3, 4, 5}}, // all but generic_tracepoint_output - tus.SensorMap{Name: "tp_calls", Progs: []uint{0, 1, 2, 3, 4, 5, 6, 7, 8}}, + tus.SensorMap{Name: "tp_calls", Progs: []uint{0, 1, 2, 3, 4}}, // only generic_tracepoint_event* - tus.SensorMap{Name: "buffer_heap_map", Progs: []uint{2, 3, 4, 5, 6}}, + tus.SensorMap{Name: "buffer_heap_map", Progs: []uint{2}}, // all but generic_tracepoint_event,generic_tracepoint_filter - tus.SensorMap{Name: "retprobe_map", Progs: []uint{1, 2, 3, 4, 5, 6}}, + tus.SensorMap{Name: "retprobe_map", Progs: []uint{1, 2}}, // generic_tracepoint_output - tus.SensorMap{Name: "tcpmon_map", Progs: []uint{9}}, + tus.SensorMap{Name: "tcpmon_map", Progs: []uint{5}}, // all kprobe but generic_tracepoint_filter - tus.SensorMap{Name: "config_map", Progs: []uint{0, 1, 2, 3, 4, 5, 6}}, + tus.SensorMap{Name: "config_map", Progs: []uint{0, 1, 2}}, // generic_tracepoint_event tus.SensorMap{Name: "tg_conf_map", Progs: []uint{0}}, @@ -455,10 +451,10 @@ func TestLoadTracepointSensor(t *testing.T) { if kernels.EnableLargeProgs() { // shared with base sensor - sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 1, 7, 8, 9}}) + sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 1, 3, 4, 5}}) } else { // shared with base sensor - sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 1, 7}}) + sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 1, 3}}) } readHook := ` diff --git a/pkg/sensors/tracing/uprobe_test.go b/pkg/sensors/tracing/uprobe_test.go index 4877689d152..dccda614e82 100644 --- a/pkg/sensors/tracing/uprobe_test.go +++ b/pkg/sensors/tracing/uprobe_test.go @@ -29,37 +29,34 @@ func TestLoadUprobeSensor(t *testing.T) { var sensorProgs = []tus.SensorProg{ // uprobe 0: tus.SensorProg{Name: "generic_uprobe_event", Type: ebpf.Kprobe}, - 1: tus.SensorProg{Name: "generic_uprobe_process_event0", Type: ebpf.Kprobe}, - 2: tus.SensorProg{Name: "generic_uprobe_process_event1", Type: ebpf.Kprobe}, - 3: tus.SensorProg{Name: "generic_uprobe_process_event2", Type: ebpf.Kprobe}, - 4: tus.SensorProg{Name: "generic_uprobe_process_event3", Type: ebpf.Kprobe}, - 5: tus.SensorProg{Name: "generic_uprobe_process_event4", Type: ebpf.Kprobe}, - 6: tus.SensorProg{Name: "generic_uprobe_filter_arg", Type: ebpf.Kprobe}, - 7: tus.SensorProg{Name: "generic_uprobe_process_filter", Type: ebpf.Kprobe}, - 8: tus.SensorProg{Name: "generic_uprobe_actions", Type: ebpf.Kprobe}, - 9: tus.SensorProg{Name: "generic_uprobe_output", Type: ebpf.Kprobe}, + 1: tus.SensorProg{Name: "generic_uprobe_setup_event", Type: ebpf.Kprobe}, + 2: tus.SensorProg{Name: "generic_uprobe_process_event", Type: ebpf.Kprobe}, + 3: tus.SensorProg{Name: "generic_uprobe_filter_arg", Type: ebpf.Kprobe}, + 4: tus.SensorProg{Name: "generic_uprobe_process_filter", Type: ebpf.Kprobe}, + 5: tus.SensorProg{Name: "generic_uprobe_actions", Type: ebpf.Kprobe}, + 6: tus.SensorProg{Name: "generic_uprobe_output", Type: ebpf.Kprobe}, } var sensorMaps = []tus.SensorMap{ // all uprobe programs - tus.SensorMap{Name: "process_call_heap", Progs: []uint{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}, + tus.SensorMap{Name: "process_call_heap", Progs: []uint{0, 1, 2, 3, 4, 5, 6}}, // all but generic_uprobe_output - tus.SensorMap{Name: "uprobe_calls", Progs: []uint{0, 1, 2, 3, 4, 5, 6, 7, 8}}, + tus.SensorMap{Name: "uprobe_calls", Progs: []uint{0, 1, 2, 3, 4, 5}}, // generic_uprobe_process_filter,generic_uprobe_filter_arg*,generic_uprobe_actions - tus.SensorMap{Name: "filter_map", Progs: []uint{6, 7, 8}}, + tus.SensorMap{Name: "filter_map", Progs: []uint{3, 4, 5}}, // generic_uprobe_output - tus.SensorMap{Name: "tcpmon_map", Progs: []uint{9}}, + tus.SensorMap{Name: "tcpmon_map", Progs: []uint{6}}, } if kernels.EnableLargeProgs() { // shared with base sensor - sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 6, 7, 8, 9}}) + sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 3, 4, 5, 6}}) } else { // shared with base sensor - sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 6, 7}}) + sensorMaps = append(sensorMaps, tus.SensorMap{Name: "execve_map", Progs: []uint{0, 3, 4}}) } nopHook := `