Skip to content

Commit

Permalink
tetragon: Remove generic_kprobe_process_event* programs
Browse files Browse the repository at this point in the history
There's no need to keep separate kprobe programs for each
generic_kprobe_process_event4X call, we can just tail call
into single program with increased index.

The number of loaded programs gets reduced, so we need to
fix TestLoad* tests as well.

I had to add several &= masks to make it verifiable under 5.4,
can't find better solution yet.

Signed-off-by: Jiri Olsa <[email protected]>
  • Loading branch information
olsajiri committed Nov 27, 2023
1 parent 87ef33c commit beaf407
Show file tree
Hide file tree
Showing 8 changed files with 91 additions and 180 deletions.
46 changes: 8 additions & 38 deletions bpf/process/bpf_generic_kprobe.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ generic_kprobe_start_process_filter(void *ctx)
#endif

/* Tail call into filters. */
tail_call(ctx, &kprobe_calls, 5);
tail_call(ctx, &kprobe_calls, 2);
return 0;
}

Expand Down Expand Up @@ -155,7 +155,7 @@ generic_kprobe_event(struct pt_regs *ctx)
}

__attribute__((section("kprobe/0"), used)) int
generic_kprobe_process_event0(void *ctx)
generic_kprobe_setup_event(void *ctx)
{
return generic_process_event_and_setup(
ctx, (struct bpf_map_def *)&process_call_heap,
Expand All @@ -165,46 +165,16 @@ generic_kprobe_process_event0(void *ctx)
}

__attribute__((section("kprobe/1"), used)) int
generic_kprobe_process_event1(void *ctx)
generic_kprobe_process_event(void *ctx)
{
return generic_process_event(ctx, 1,
return generic_process_event(ctx,
(struct bpf_map_def *)&process_call_heap,
(struct bpf_map_def *)&kprobe_calls,
(struct bpf_map_def *)&config_map,
(struct bpf_map_def *)data_heap_ptr);
}

__attribute__((section("kprobe/2"), used)) int
generic_kprobe_process_event2(void *ctx)
{
return generic_process_event(ctx, 2,
(struct bpf_map_def *)&process_call_heap,
(struct bpf_map_def *)&kprobe_calls,
(struct bpf_map_def *)&config_map,
(struct bpf_map_def *)data_heap_ptr);
}

__attribute__((section("kprobe/3"), used)) int
generic_kprobe_process_event3(void *ctx)
{
return generic_process_event(ctx, 3,
(struct bpf_map_def *)&process_call_heap,
(struct bpf_map_def *)&kprobe_calls,
(struct bpf_map_def *)&config_map,
(struct bpf_map_def *)data_heap_ptr);
}

__attribute__((section("kprobe/4"), used)) int
generic_kprobe_process_event4(void *ctx)
{
return generic_process_event(ctx, 4,
(struct bpf_map_def *)&process_call_heap,
(struct bpf_map_def *)&kprobe_calls,
(struct bpf_map_def *)&config_map,
(struct bpf_map_def *)data_heap_ptr);
}

__attribute__((section("kprobe/5"), used)) int
generic_kprobe_process_filter(void *ctx)
{
struct msg_generic_kprobe *msg;
Expand All @@ -217,7 +187,7 @@ generic_kprobe_process_filter(void *ctx)
ret = generic_process_filter(&msg->sel, &msg->current, &msg->ns,
&msg->caps, &filter_map, msg->idx);
if (ret == PFILTER_CONTINUE)
tail_call(ctx, &kprobe_calls, 5);
tail_call(ctx, &kprobe_calls, 2);
else if (ret == PFILTER_ACCEPT)
tail_call(ctx, &kprobe_calls, 0);
/* If filter does not accept drop it. Ideally we would
Expand All @@ -229,7 +199,7 @@ generic_kprobe_process_filter(void *ctx)
// Filter tailcalls: kprobe/6...kprobe/10
// see also: MIN_FILTER_TAILCALL, MAX_FILTER_TAILCALL

__attribute__((section("kprobe/6"), used)) int
__attribute__((section("kprobe/3"), used)) int
generic_kprobe_filter_arg(void *ctx)
{
return filter_read_arg(ctx, (struct bpf_map_def *)&process_call_heap,
Expand All @@ -238,7 +208,7 @@ generic_kprobe_filter_arg(void *ctx)
(struct bpf_map_def *)&config_map);
}

__attribute__((section("kprobe/7"), used)) int
__attribute__((section("kprobe/4"), used)) int
generic_kprobe_actions(void *ctx)
{
return generic_actions(ctx, (struct bpf_map_def *)&process_call_heap,
Expand All @@ -247,7 +217,7 @@ generic_kprobe_actions(void *ctx)
(struct bpf_map_def *)&override_tasks);
}

__attribute__((section("kprobe/8"), used)) int
__attribute__((section("kprobe/5"), used)) int
generic_kprobe_output(void *ctx)
{
return generic_output(ctx, (struct bpf_map_def *)&process_call_heap, MSG_OP_GENERIC_KPROBE);
Expand Down
48 changes: 8 additions & 40 deletions bpf/process/bpf_generic_tracepoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -196,51 +196,19 @@ generic_tracepoint_event(struct generic_tracepoint_event_arg *ctx)
#ifdef __CAP_CHANGES_FILTER
msg->sel.match_cap = 0;
#endif
tail_call(ctx, &tp_calls, 5);
tail_call(ctx, &tp_calls, 2);
return 0;
}

__attribute__((section("tracepoint/0"), used)) int
generic_tracepoint_event0(void *ctx)
{
return generic_process_event(ctx, 0, (struct bpf_map_def *)&tp_heap,
(struct bpf_map_def *)&tp_calls,
(struct bpf_map_def *)&config_map, 0);
}

__attribute__((section("tracepoint/1"), used)) int
generic_tracepoint_event1(void *ctx)
generic_tracepoint_process_event(void *ctx)
{
return generic_process_event(ctx, 1, (struct bpf_map_def *)&tp_heap,
return generic_process_event(ctx, (struct bpf_map_def *)&tp_heap,
(struct bpf_map_def *)&tp_calls,
(struct bpf_map_def *)&config_map, 0);
}

__attribute__((section("tracepoint/2"), used)) int
generic_tracepoint_event2(void *ctx)
{
return generic_process_event(ctx, 2, (struct bpf_map_def *)&tp_heap,
(struct bpf_map_def *)&tp_calls,
(struct bpf_map_def *)&config_map, 0);
}

__attribute__((section("tracepoint/3"), used)) int
generic_tracepoint_event3(void *ctx)
{
return generic_process_event(ctx, 3, (struct bpf_map_def *)&tp_heap,
(struct bpf_map_def *)&tp_calls,
(struct bpf_map_def *)&config_map, 0);
}

__attribute__((section("tracepoint/4"), used)) int
generic_tracepoint_event4(void *ctx)
{
return generic_process_event(ctx, 4, (struct bpf_map_def *)&tp_heap,
(struct bpf_map_def *)&tp_calls,
(struct bpf_map_def *)&config_map, 0);
}

__attribute__((section("tracepoint/5"), used)) int
generic_tracepoint_filter(void *ctx)
{
struct msg_generic_kprobe *msg;
Expand All @@ -253,9 +221,9 @@ generic_tracepoint_filter(void *ctx)
ret = generic_process_filter(&msg->sel, &msg->current, &msg->ns,
&msg->caps, &filter_map, msg->idx);
if (ret == PFILTER_CONTINUE)
tail_call(ctx, &tp_calls, 5);
tail_call(ctx, &tp_calls, 2);
else if (ret == PFILTER_ACCEPT)
tail_call(ctx, &tp_calls, 0);
tail_call(ctx, &tp_calls, 1);
/* If filter does not accept drop it. Ideally we would
* log error codes for later review, TBD.
*/
Expand All @@ -265,7 +233,7 @@ generic_tracepoint_filter(void *ctx)
// Filter tailcalls: tracepoint/6...tracepoint/10
// see also: MIN_FILTER_TAILCALL, MAX_FILTER_TAILCALL

__attribute__((section("tracepoint/6"), used)) int
__attribute__((section("tracepoint/3"), used)) int
generic_tracepoint_arg(void *ctx)
{
return filter_read_arg(ctx, (struct bpf_map_def *)&tp_heap,
Expand All @@ -274,7 +242,7 @@ generic_tracepoint_arg(void *ctx)
(struct bpf_map_def *)&config_map);
}

__attribute__((section("tracepoint/7"), used)) int
__attribute__((section("tracepoint/4"), used)) int
generic_tracepoint_actions(void *ctx)
{
return generic_actions(ctx, (struct bpf_map_def *)&tp_heap,
Expand All @@ -283,7 +251,7 @@ generic_tracepoint_actions(void *ctx)
(void *)0);
}

__attribute__((section("tracepoint/8"), used)) int
__attribute__((section("tracepoint/5"), used)) int
generic_tracepoint_output(void *ctx)
{
return generic_output(ctx, (struct bpf_map_def *)&tp_heap, MSG_OP_GENERIC_TRACEPOINT);
Expand Down
43 changes: 8 additions & 35 deletions bpf/process/bpf_generic_uprobe.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ generic_uprobe_start_process_filter(void *ctx)
if (!generic_process_filter_binary(config))
return 0;
/* Tail call into filters. */
tail_call(ctx, &uprobe_calls, 5);
tail_call(ctx, &uprobe_calls, 2);
return 0;
}

Expand All @@ -100,7 +100,7 @@ generic_uprobe_event(struct pt_regs *ctx)
}

__attribute__((section("uprobe/0"), used)) int
generic_uprobe_process_event0(void *ctx)
generic_uprobe_setup_event(void *ctx)
{
return generic_process_event_and_setup(
ctx, (struct bpf_map_def *)&process_call_heap,
Expand All @@ -109,42 +109,15 @@ generic_uprobe_process_event0(void *ctx)
}

__attribute__((section("uprobe/1"), used)) int
generic_uprobe_process_event1(void *ctx)
generic_uprobe_process_event(void *ctx)
{
return generic_process_event(ctx, 1,
return generic_process_event(ctx,
(struct bpf_map_def *)&process_call_heap,
(struct bpf_map_def *)&uprobe_calls,
(struct bpf_map_def *)&config_map, 0);
}

__attribute__((section("uprobe/2"), used)) int
generic_uprobe_process_event2(void *ctx)
{
return generic_process_event(ctx, 2,
(struct bpf_map_def *)&process_call_heap,
(struct bpf_map_def *)&uprobe_calls,
(struct bpf_map_def *)&config_map, 0);
}

__attribute__((section("uprobe/3"), used)) int
generic_uprobe_process_event3(void *ctx)
{
return generic_process_event(ctx, 3,
(struct bpf_map_def *)&process_call_heap,
(struct bpf_map_def *)&uprobe_calls,
(struct bpf_map_def *)&config_map, 0);
}

__attribute__((section("uprobe/4"), used)) int
generic_uprobe_process_event4(void *ctx)
{
return generic_process_event(ctx, 4,
(struct bpf_map_def *)&process_call_heap,
(struct bpf_map_def *)&uprobe_calls,
(struct bpf_map_def *)&config_map, 0);
}

__attribute__((section("uprobe/5"), used)) int
generic_uprobe_process_filter(void *ctx)
{
struct msg_generic_kprobe *msg;
Expand All @@ -157,7 +130,7 @@ generic_uprobe_process_filter(void *ctx)
ret = generic_process_filter(&msg->sel, &msg->current, &msg->ns,
&msg->caps, &filter_map, msg->idx);
if (ret == PFILTER_CONTINUE)
tail_call(ctx, &uprobe_calls, 5);
tail_call(ctx, &uprobe_calls, 2);
else if (ret == PFILTER_ACCEPT)
tail_call(ctx, &uprobe_calls, 0);
/* If filter does not accept drop it. Ideally we would
Expand All @@ -166,7 +139,7 @@ generic_uprobe_process_filter(void *ctx)
return PFILTER_REJECT;
}

__attribute__((section("uprobe/6"), used)) int
__attribute__((section("uprobe/3"), used)) int
generic_uprobe_filter_arg(void *ctx)
{
return filter_read_arg(ctx, (struct bpf_map_def *)&process_call_heap,
Expand All @@ -175,7 +148,7 @@ generic_uprobe_filter_arg(void *ctx)
(struct bpf_map_def *)&config_map);
}

__attribute__((section("uprobe/7"), used)) int
__attribute__((section("uprobe/4"), used)) int
generic_uprobe_actions(void *ctx)
{
return generic_actions(ctx, (struct bpf_map_def *)&process_call_heap,
Expand All @@ -184,7 +157,7 @@ generic_uprobe_actions(void *ctx)
(void *)0);
}

__attribute__((section("uprobe/8"), used)) int
__attribute__((section("uprobe/5"), used)) int
generic_uprobe_output(void *ctx)
{
return generic_output(ctx, (struct bpf_map_def *)&process_call_heap, MSG_OP_GENERIC_UPROBE);
Expand Down
21 changes: 15 additions & 6 deletions bpf/process/generic_calls.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,19 @@
#define __GENERIC_CALLS_H__

#include "bpf_tracing.h"
#include "types/basic.h"

#define MAX_TOTAL 9000

static inline __attribute__((always_inline)) int
generic_process_event(void *ctx, int index, struct bpf_map_def *heap_map,
generic_process_event(void *ctx, struct bpf_map_def *heap_map,
struct bpf_map_def *tailcals, struct bpf_map_def *config_map,
struct bpf_map_def *data_heap)
{
struct msg_generic_kprobe *e;
struct event_config *config;
int index, zero = 0;
unsigned long a;
int zero = 0;
long ty, total;

e = map_lookup_elem(heap_map, &zero);
Expand All @@ -27,6 +28,11 @@ generic_process_event(void *ctx, int index, struct bpf_map_def *heap_map,
if (!config)
return 0;

index = e->filter_tailcall_index;
asm volatile("%[index] &= %0 ;\n"
: [index] "+r"(index)
: "i"(MAX_SELECTORS_MASK));

a = (&e->a0)[index];
total = e->common.size;

Expand All @@ -53,11 +59,14 @@ generic_process_event(void *ctx, int index, struct bpf_map_def *heap_map,
}
e->common.size = total;
/* Continue to process other arguments. */
if (index < 4)
tail_call(ctx, tailcals, index + 1);
if (index < 4) {
e->filter_tailcall_index = index + 1;
tail_call(ctx, tailcals, 1);
}

/* Last argument, go send.. */
tail_call(ctx, tailcals, 6);
e->filter_tailcall_index = 0;
tail_call(ctx, tailcals, 3);
return 0;
}

Expand Down Expand Up @@ -146,7 +155,7 @@ generic_process_event_and_setup(struct pt_regs *ctx,
generic_process_init(e, MSG_OP_GENERIC_UPROBE, config);
#endif

return generic_process_event(ctx, 0, heap_map, tailcals, config_map, data_heap);
return generic_process_event(ctx, heap_map, tailcals, config_map, data_heap);
}

#endif /* __GENERIC_CALLS_H__ */
Loading

0 comments on commit beaf407

Please sign in to comment.