|
1 |
| -/* SPDX-License-Identifier: GPL-2.0 */ |
| 1 | +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ |
2 | 2 | #ifndef __BPF_TRACE_HELPERS_H
|
3 | 3 | #define __BPF_TRACE_HELPERS_H
|
4 | 4 |
|
5 |
| -#include "bpf_helpers.h" |
6 |
| - |
7 |
| -#define __BPF_MAP_0(i, m, v, ...) v |
8 |
| -#define __BPF_MAP_1(i, m, v, t, a, ...) m(t, a, ctx[i]) |
9 |
| -#define __BPF_MAP_2(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_1(i+1, m, v, __VA_ARGS__) |
10 |
| -#define __BPF_MAP_3(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_2(i+1, m, v, __VA_ARGS__) |
11 |
| -#define __BPF_MAP_4(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_3(i+1, m, v, __VA_ARGS__) |
12 |
| -#define __BPF_MAP_5(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_4(i+1, m, v, __VA_ARGS__) |
13 |
| -#define __BPF_MAP_6(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_5(i+1, m, v, __VA_ARGS__) |
14 |
| -#define __BPF_MAP_7(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_6(i+1, m, v, __VA_ARGS__) |
15 |
| -#define __BPF_MAP_8(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_7(i+1, m, v, __VA_ARGS__) |
16 |
| -#define __BPF_MAP_9(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_8(i+1, m, v, __VA_ARGS__) |
17 |
| -#define __BPF_MAP_10(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_9(i+1, m, v, __VA_ARGS__) |
18 |
| -#define __BPF_MAP_11(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_10(i+1, m, v, __VA_ARGS__) |
19 |
| -#define __BPF_MAP_12(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_11(i+1, m, v, __VA_ARGS__) |
20 |
| -#define __BPF_MAP(n, ...) __BPF_MAP_##n(0, __VA_ARGS__) |
21 |
| - |
22 |
| -/* BPF sizeof(void *) is always 8, so no need to cast to long first |
23 |
| - * for ptr to avoid compiler warning. |
| 5 | +#include <bpf_helpers.h> |
| 6 | + |
| 7 | +#define ___bpf_concat(a, b) a ## b |
| 8 | +#define ___bpf_apply(fn, n) ___bpf_concat(fn, n) |
| 9 | +#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N |
| 10 | +#define ___bpf_narg(...) \ |
| 11 | + ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) |
| 12 | +#define ___bpf_empty(...) \ |
| 13 | + ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) |
| 14 | + |
| 15 | +#define ___bpf_ctx_cast0() ctx |
| 16 | +#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0] |
| 17 | +#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1] |
| 18 | +#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2] |
| 19 | +#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3] |
| 20 | +#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4] |
| 21 | +#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5] |
| 22 | +#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6] |
| 23 | +#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7] |
| 24 | +#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8] |
| 25 | +#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9] |
| 26 | +#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10] |
| 27 | +#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11] |
| 28 | +#define ___bpf_ctx_cast(args...) \ |
| 29 | + ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) |
| 30 | + |
| 31 | +/* |
| 32 | + * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and |
| 33 | + * similar kinds of BPF programs, that accept input arguments as a single |
| 34 | + * pointer to untyped u64 array, where each u64 can actually be a typed |
| 35 | + * pointer or integer of different size. Instead of requring user to write |
| 36 | + * manual casts and work with array elements by index, BPF_PROG macro |
| 37 | + * allows user to declare a list of named and typed input arguments in the |
| 38 | + * same syntax as for normal C function. All the casting is hidden and |
| 39 | + * performed transparently, while user code can just assume working with |
| 40 | + * function arguments of specified type and name. |
| 41 | + * |
| 42 | + * Original raw context argument is preserved as well as 'ctx' argument. |
| 43 | + * This is useful when using BPF helpers that expect original context |
| 44 | + * as one of the parameters (e.g., for bpf_perf_event_output()). |
24 | 45 | */
|
25 |
| -#define __BPF_CAST(t, a, ctx) (t) ctx |
26 |
| -#define __BPF_V void |
27 |
| -#define __BPF_N |
28 |
| - |
29 |
| -#define __BPF_DECL_ARGS(t, a, ctx) t a |
30 |
| - |
31 |
| -#define BPF_TRACE_x(x, sec_name, fname, ret_type, ...) \ |
32 |
| -static __always_inline ret_type \ |
33 |
| -____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ |
34 |
| - \ |
35 |
| -SEC(sec_name) \ |
36 |
| -ret_type fname(__u64 *ctx) \ |
37 |
| -{ \ |
38 |
| - return ____##fname(__BPF_MAP(x, __BPF_CAST, __BPF_N, __VA_ARGS__));\ |
39 |
| -} \ |
40 |
| - \ |
41 |
| -static __always_inline \ |
42 |
| -ret_type ____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) |
43 |
| - |
44 |
| -#define BPF_TRACE_0(sec, fname, ...) BPF_TRACE_x(0, sec, fname, int, __VA_ARGS__) |
45 |
| -#define BPF_TRACE_1(sec, fname, ...) BPF_TRACE_x(1, sec, fname, int, __VA_ARGS__) |
46 |
| -#define BPF_TRACE_2(sec, fname, ...) BPF_TRACE_x(2, sec, fname, int, __VA_ARGS__) |
47 |
| -#define BPF_TRACE_3(sec, fname, ...) BPF_TRACE_x(3, sec, fname, int, __VA_ARGS__) |
48 |
| -#define BPF_TRACE_4(sec, fname, ...) BPF_TRACE_x(4, sec, fname, int, __VA_ARGS__) |
49 |
| -#define BPF_TRACE_5(sec, fname, ...) BPF_TRACE_x(5, sec, fname, int, __VA_ARGS__) |
50 |
| -#define BPF_TRACE_6(sec, fname, ...) BPF_TRACE_x(6, sec, fname, int, __VA_ARGS__) |
51 |
| -#define BPF_TRACE_7(sec, fname, ...) BPF_TRACE_x(7, sec, fname, int, __VA_ARGS__) |
52 |
| -#define BPF_TRACE_8(sec, fname, ...) BPF_TRACE_x(8, sec, fname, int, __VA_ARGS__) |
53 |
| -#define BPF_TRACE_9(sec, fname, ...) BPF_TRACE_x(9, sec, fname, int, __VA_ARGS__) |
54 |
| -#define BPF_TRACE_10(sec, fname, ...) BPF_TRACE_x(10, sec, fname, int, __VA_ARGS__) |
55 |
| -#define BPF_TRACE_11(sec, fname, ...) BPF_TRACE_x(11, sec, fname, int, __VA_ARGS__) |
56 |
| -#define BPF_TRACE_12(sec, fname, ...) BPF_TRACE_x(12, sec, fname, int, __VA_ARGS__) |
| 46 | +#define BPF_PROG(name, args...) \ |
| 47 | +name(unsigned long long *ctx); \ |
| 48 | +static __always_inline typeof(name(0)) \ |
| 49 | +____##name(unsigned long long *ctx, ##args); \ |
| 50 | +typeof(name(0)) name(unsigned long long *ctx) \ |
| 51 | +{ \ |
| 52 | + _Pragma("GCC diagnostic push") \ |
| 53 | + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ |
| 54 | + return ____##name(___bpf_ctx_cast(args)); \ |
| 55 | + _Pragma("GCC diagnostic pop") \ |
| 56 | +} \ |
| 57 | +static __always_inline typeof(name(0)) \ |
| 58 | +____##name(unsigned long long *ctx, ##args) |
| 59 | + |
| 60 | +struct pt_regs; |
| 61 | + |
| 62 | +#define ___bpf_kprobe_args0() ctx |
| 63 | +#define ___bpf_kprobe_args1(x) \ |
| 64 | + ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx) |
| 65 | +#define ___bpf_kprobe_args2(x, args...) \ |
| 66 | + ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx) |
| 67 | +#define ___bpf_kprobe_args3(x, args...) \ |
| 68 | + ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx) |
| 69 | +#define ___bpf_kprobe_args4(x, args...) \ |
| 70 | + ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx) |
| 71 | +#define ___bpf_kprobe_args5(x, args...) \ |
| 72 | + ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx) |
| 73 | +#define ___bpf_kprobe_args(args...) \ |
| 74 | + ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) |
57 | 75 |
|
| 76 | +/* |
| 77 | + * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for |
| 78 | + * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific |
| 79 | + * low-level way of getting kprobe input arguments from struct pt_regs, and |
| 80 | + * provides a familiar typed and named function arguments syntax and |
| 81 | + * semantics of accessing kprobe input paremeters. |
| 82 | + * |
| 83 | + * Original struct pt_regs* context is preserved as 'ctx' argument. This might |
| 84 | + * be necessary when using BPF helpers like bpf_perf_event_output(). |
| 85 | + */ |
| 86 | +#define BPF_KPROBE(name, args...) \ |
| 87 | +name(struct pt_regs *ctx); \ |
| 88 | +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\ |
| 89 | +typeof(name(0)) name(struct pt_regs *ctx) \ |
| 90 | +{ \ |
| 91 | + _Pragma("GCC diagnostic push") \ |
| 92 | + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ |
| 93 | + return ____##name(___bpf_kprobe_args(args)); \ |
| 94 | + _Pragma("GCC diagnostic pop") \ |
| 95 | +} \ |
| 96 | +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) |
| 97 | + |
| 98 | +#define ___bpf_kretprobe_args0() ctx |
| 99 | +#define ___bpf_kretprobe_argsN(x, args...) \ |
| 100 | + ___bpf_kprobe_args(args), (void *)PT_REGS_RET(ctx) |
| 101 | +#define ___bpf_kretprobe_args(args...) \ |
| 102 | + ___bpf_apply(___bpf_kretprobe_args, ___bpf_empty(args))(args) |
| 103 | + |
| 104 | +/* |
| 105 | + * BPF_KRETPROBE is similar to BPF_KPROBE, except, in addition to listing all |
| 106 | + * input kprobe arguments, one last extra argument has to be specified, which |
| 107 | + * captures kprobe return value. |
| 108 | + */ |
| 109 | +#define BPF_KRETPROBE(name, args...) \ |
| 110 | +name(struct pt_regs *ctx); \ |
| 111 | +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\ |
| 112 | +typeof(name(0)) name(struct pt_regs *ctx) \ |
| 113 | +{ \ |
| 114 | + _Pragma("GCC diagnostic push") \ |
| 115 | + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ |
| 116 | + return ____##name(___bpf_kretprobe_args(args)); \ |
| 117 | + _Pragma("GCC diagnostic pop") \ |
| 118 | +} \ |
| 119 | +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) |
58 | 120 | #endif
|
0 commit comments