Skip to content

Commit f92c1e1

Browse files
Jiri OlsaAlexei Starovoitov
Jiri Olsa
authored and
Alexei Starovoitov
committed
bpf: Add get_func_[arg|ret|arg_cnt] helpers
Adding following helpers for tracing programs: Get n-th argument of the traced function: long bpf_get_func_arg(void *ctx, u32 n, u64 *value) Get return value of the traced function: long bpf_get_func_ret(void *ctx, u64 *value) Get arguments count of the traced function: long bpf_get_func_arg_cnt(void *ctx) The trampoline now stores number of arguments on ctx-8 address, so it's easy to verify argument index and find return value argument's position. Moving function ip address on the trampoline stack behind the number of functions arguments, so it's now stored on ctx-16 address if it's needed. All helpers above are inlined by verifier. Also bit unrelated small change - using newly added function bpf_prog_has_trampoline in check_get_func_ip. Signed-off-by: Jiri Olsa <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 5edf6a1 commit f92c1e1

File tree

7 files changed

+209
-7
lines changed

7 files changed

+209
-7
lines changed

arch/x86/net/bpf_jit_comp.c

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1941,7 +1941,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
19411941
void *orig_call)
19421942
{
19431943
int ret, i, nr_args = m->nr_args;
1944-
int regs_off, ip_off, stack_size = nr_args * 8;
1944+
int regs_off, ip_off, args_off, stack_size = nr_args * 8;
19451945
struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
19461946
struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
19471947
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
@@ -1968,6 +1968,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
19681968
* [ ... ]
19691969
* RBP - regs_off [ reg_arg1 ] program's ctx pointer
19701970
*
1971+
* RBP - args_off [ args count ] always
1972+
*
19711973
* RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
19721974
*/
19731975

@@ -1978,6 +1980,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
19781980

19791981
regs_off = stack_size;
19801982

1983+
/* args count */
1984+
stack_size += 8;
1985+
args_off = stack_size;
1986+
19811987
if (flags & BPF_TRAMP_F_IP_ARG)
19821988
stack_size += 8; /* room for IP address argument */
19831989

@@ -1996,6 +2002,13 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
19962002
EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
19972003
EMIT1(0x53); /* push rbx */
19982004

2005+
/* Store number of arguments of the traced function:
2006+
* mov rax, nr_args
2007+
* mov QWORD PTR [rbp - args_off], rax
2008+
*/
2009+
emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args);
2010+
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
2011+
19992012
if (flags & BPF_TRAMP_F_IP_ARG) {
20002013
/* Store IP address of the traced function:
20012014
* mov rax, QWORD PTR [rbp + 8]

include/linux/bpf.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -777,6 +777,7 @@ void bpf_ksym_add(struct bpf_ksym *ksym);
777777
void bpf_ksym_del(struct bpf_ksym *ksym);
778778
int bpf_jit_charge_modmem(u32 pages);
779779
void bpf_jit_uncharge_modmem(u32 pages);
780+
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
780781
#else
781782
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
782783
struct bpf_trampoline *tr)
@@ -805,6 +806,10 @@ static inline bool is_bpf_image_address(unsigned long address)
805806
{
806807
return false;
807808
}
809+
static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
810+
{
811+
return false;
812+
}
808813
#endif
809814

810815
struct bpf_func_info_aux {

include/uapi/linux/bpf.h

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4993,6 +4993,31 @@ union bpf_attr {
49934993
* An integer less than, equal to, or greater than zero
49944994
* if the first **s1_sz** bytes of **s1** is found to be
49954995
* less than, to match, or be greater than **s2**.
4996+
*
4997+
* long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
4998+
* Description
4999+
* Get **n**-th argument (zero based) of the traced function (for tracing programs)
5000+
* returned in **value**.
5001+
*
5002+
* Return
5003+
* 0 on success.
5004+
* **-EINVAL** if n >= arguments count of traced function.
5005+
*
5006+
* long bpf_get_func_ret(void *ctx, u64 *value)
5007+
* Description
5008+
* Get return value of the traced function (for tracing programs)
5009+
* in **value**.
5010+
*
5011+
* Return
5012+
* 0 on success.
5013+
* **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
5014+
*
5015+
* long bpf_get_func_arg_cnt(void *ctx)
5016+
* Description
5017+
* Get number of arguments of the traced function (for tracing programs).
5018+
*
5019+
* Return
5020+
* The number of arguments of the traced function.
49965021
*/
49975022
#define __BPF_FUNC_MAPPER(FN) \
49985023
FN(unspec), \
@@ -5178,6 +5203,9 @@ union bpf_attr {
51785203
FN(find_vma), \
51795204
FN(loop), \
51805205
FN(strncmp), \
5206+
FN(get_func_arg), \
5207+
FN(get_func_ret), \
5208+
FN(get_func_arg_cnt), \
51815209
/* */
51825210

51835211
/* integer value in 'imm' field of BPF_CALL instruction selects which helper

kernel/bpf/trampoline.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,14 @@ static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
2727
/* serializes access to trampoline_table */
2828
static DEFINE_MUTEX(trampoline_mutex);
2929

30+
bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
31+
{
32+
enum bpf_attach_type eatype = prog->expected_attach_type;
33+
34+
return eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
35+
eatype == BPF_MODIFY_RETURN;
36+
}
37+
3038
void *bpf_jit_alloc_exec_page(void)
3139
{
3240
void *image;

kernel/bpf/verifier.c

Lines changed: 72 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6395,13 +6395,11 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
63956395

63966396
static int check_get_func_ip(struct bpf_verifier_env *env)
63976397
{
6398-
enum bpf_attach_type eatype = env->prog->expected_attach_type;
63996398
enum bpf_prog_type type = resolve_prog_type(env->prog);
64006399
int func_id = BPF_FUNC_get_func_ip;
64016400

64026401
if (type == BPF_PROG_TYPE_TRACING) {
6403-
if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT &&
6404-
eatype != BPF_MODIFY_RETURN) {
6402+
if (!bpf_prog_has_trampoline(env->prog)) {
64056403
verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
64066404
func_id_name(func_id), func_id);
64076405
return -ENOTSUPP;
@@ -12997,6 +12995,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env,
1299712995
static int do_misc_fixups(struct bpf_verifier_env *env)
1299812996
{
1299912997
struct bpf_prog *prog = env->prog;
12998+
enum bpf_attach_type eatype = prog->expected_attach_type;
1300012999
bool expect_blinding = bpf_jit_blinding_enabled(prog);
1300113000
enum bpf_prog_type prog_type = resolve_prog_type(prog);
1300213001
struct bpf_insn *insn = prog->insnsi;
@@ -13367,11 +13366,79 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
1336713366
continue;
1336813367
}
1336913368

13369+
/* Implement bpf_get_func_arg inline. */
13370+
if (prog_type == BPF_PROG_TYPE_TRACING &&
13371+
insn->imm == BPF_FUNC_get_func_arg) {
13372+
/* Load nr_args from ctx - 8 */
13373+
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13374+
insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
13375+
insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
13376+
insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
13377+
insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
13378+
insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
13379+
insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
13380+
insn_buf[7] = BPF_JMP_A(1);
13381+
insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
13382+
cnt = 9;
13383+
13384+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13385+
if (!new_prog)
13386+
return -ENOMEM;
13387+
13388+
delta += cnt - 1;
13389+
env->prog = prog = new_prog;
13390+
insn = new_prog->insnsi + i + delta;
13391+
continue;
13392+
}
13393+
13394+
/* Implement bpf_get_func_ret inline. */
13395+
if (prog_type == BPF_PROG_TYPE_TRACING &&
13396+
insn->imm == BPF_FUNC_get_func_ret) {
13397+
if (eatype == BPF_TRACE_FEXIT ||
13398+
eatype == BPF_MODIFY_RETURN) {
13399+
/* Load nr_args from ctx - 8 */
13400+
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13401+
insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
13402+
insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
13403+
insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
13404+
insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
13405+
insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
13406+
cnt = 6;
13407+
} else {
13408+
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
13409+
cnt = 1;
13410+
}
13411+
13412+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13413+
if (!new_prog)
13414+
return -ENOMEM;
13415+
13416+
delta += cnt - 1;
13417+
env->prog = prog = new_prog;
13418+
insn = new_prog->insnsi + i + delta;
13419+
continue;
13420+
}
13421+
13422+
/* Implement get_func_arg_cnt inline. */
13423+
if (prog_type == BPF_PROG_TYPE_TRACING &&
13424+
insn->imm == BPF_FUNC_get_func_arg_cnt) {
13425+
/* Load nr_args from ctx - 8 */
13426+
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13427+
13428+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
13429+
if (!new_prog)
13430+
return -ENOMEM;
13431+
13432+
env->prog = prog = new_prog;
13433+
insn = new_prog->insnsi + i + delta;
13434+
continue;
13435+
}
13436+
1337013437
/* Implement bpf_get_func_ip inline. */
1337113438
if (prog_type == BPF_PROG_TYPE_TRACING &&
1337213439
insn->imm == BPF_FUNC_get_func_ip) {
13373-
/* Load IP address from ctx - 8 */
13374-
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13440+
/* Load IP address from ctx - 16 */
13441+
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
1337513442

1337613443
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
1337713444
if (!new_prog)

kernel/trace/bpf_trace.c

Lines changed: 54 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1012,7 +1012,7 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = {
10121012
BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
10131013
{
10141014
/* This helper call is inlined by verifier. */
1015-
return ((u64 *)ctx)[-1];
1015+
return ((u64 *)ctx)[-2];
10161016
}
10171017

10181018
static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
@@ -1091,6 +1091,53 @@ static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
10911091
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
10921092
};
10931093

1094+
BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1095+
{
1096+
/* This helper call is inlined by verifier. */
1097+
u64 nr_args = ((u64 *)ctx)[-1];
1098+
1099+
if ((u64) n >= nr_args)
1100+
return -EINVAL;
1101+
*value = ((u64 *)ctx)[n];
1102+
return 0;
1103+
}
1104+
1105+
static const struct bpf_func_proto bpf_get_func_arg_proto = {
1106+
.func = get_func_arg,
1107+
.ret_type = RET_INTEGER,
1108+
.arg1_type = ARG_PTR_TO_CTX,
1109+
.arg2_type = ARG_ANYTHING,
1110+
.arg3_type = ARG_PTR_TO_LONG,
1111+
};
1112+
1113+
BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1114+
{
1115+
/* This helper call is inlined by verifier. */
1116+
u64 nr_args = ((u64 *)ctx)[-1];
1117+
1118+
*value = ((u64 *)ctx)[nr_args];
1119+
return 0;
1120+
}
1121+
1122+
static const struct bpf_func_proto bpf_get_func_ret_proto = {
1123+
.func = get_func_ret,
1124+
.ret_type = RET_INTEGER,
1125+
.arg1_type = ARG_PTR_TO_CTX,
1126+
.arg2_type = ARG_PTR_TO_LONG,
1127+
};
1128+
1129+
BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1130+
{
1131+
/* This helper call is inlined by verifier. */
1132+
return ((u64 *)ctx)[-1];
1133+
}
1134+
1135+
static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1136+
.func = get_func_arg_cnt,
1137+
.ret_type = RET_INTEGER,
1138+
.arg1_type = ARG_PTR_TO_CTX,
1139+
};
1140+
10941141
static const struct bpf_func_proto *
10951142
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
10961143
{
@@ -1629,6 +1676,12 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
16291676
NULL;
16301677
case BPF_FUNC_d_path:
16311678
return &bpf_d_path_proto;
1679+
case BPF_FUNC_get_func_arg:
1680+
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1681+
case BPF_FUNC_get_func_ret:
1682+
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1683+
case BPF_FUNC_get_func_arg_cnt:
1684+
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
16321685
default:
16331686
fn = raw_tp_prog_func_proto(func_id, prog);
16341687
if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)

tools/include/uapi/linux/bpf.h

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4993,6 +4993,31 @@ union bpf_attr {
49934993
* An integer less than, equal to, or greater than zero
49944994
* if the first **s1_sz** bytes of **s1** is found to be
49954995
* less than, to match, or be greater than **s2**.
4996+
*
4997+
* long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
4998+
* Description
4999+
* Get **n**-th argument (zero based) of the traced function (for tracing programs)
5000+
* returned in **value**.
5001+
*
5002+
* Return
5003+
* 0 on success.
5004+
* **-EINVAL** if n >= arguments count of traced function.
5005+
*
5006+
* long bpf_get_func_ret(void *ctx, u64 *value)
5007+
* Description
5008+
* Get return value of the traced function (for tracing programs)
5009+
* in **value**.
5010+
*
5011+
* Return
5012+
* 0 on success.
5013+
* **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
5014+
*
5015+
* long bpf_get_func_arg_cnt(void *ctx)
5016+
* Description
5017+
* Get number of arguments of the traced function (for tracing programs).
5018+
*
5019+
* Return
5020+
* The number of arguments of the traced function.
49965021
*/
49975022
#define __BPF_FUNC_MAPPER(FN) \
49985023
FN(unspec), \
@@ -5178,6 +5203,9 @@ union bpf_attr {
51785203
FN(find_vma), \
51795204
FN(loop), \
51805205
FN(strncmp), \
5206+
FN(get_func_arg), \
5207+
FN(get_func_ret), \
5208+
FN(get_func_arg_cnt), \
51815209
/* */
51825210

51835211
/* integer value in 'imm' field of BPF_CALL instruction selects which helper

0 commit comments

Comments
 (0)