Skip to content

Commit b20062b

Browse files
image-dragonKernel Patches Daemon
authored andcommitted
bpf: implement "jmp" mode for trampoline
Implement the "jmp" mode for the bpf trampoline. For the ftrace_managed case, we need only to set the FTRACE_OPS_FL_JMP on the tr->fops if "jmp" is needed. For the bpf poke case, the new flag BPF_TRAMP_F_JMPED is introduced to store and check if the trampoline is in the "jmp" mode. Signed-off-by: Menglong Dong <[email protected]>
1 parent a488442 commit b20062b

File tree

2 files changed

+50
-9
lines changed

2 files changed

+50
-9
lines changed

include/linux/bpf.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1202,6 +1202,12 @@ struct btf_func_model {
12021202
*/
12031203
#define BPF_TRAMP_F_INDIRECT BIT(8)
12041204

1205+
/*
1206+
* Indicate that the trampoline is using "jmp" instead of "call". This flag
1207+
* is only used in the !ftrace_managed case.
1208+
*/
1209+
#define BPF_TRAMP_F_JMPED BIT(9)
1210+
12051211
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
12061212
* bytes on x86.
12071213
*/

kernel/bpf/trampoline.c

Lines changed: 44 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -175,23 +175,44 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
175175
return tr;
176176
}
177177

178-
static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
178+
static int bpf_text_poke(struct bpf_trampoline *tr, void *old_addr,
179+
void *new_addr)
179180
{
181+
enum bpf_text_poke_type new_t = BPF_MOD_CALL, old_t = BPF_MOD_CALL;
180182
void *ip = tr->func.addr;
181183
int ret;
182184

185+
if (bpf_trampoline_need_jmp(tr->flags))
186+
new_t = BPF_MOD_JUMP;
187+
if (tr->flags & BPF_TRAMP_F_JMPED)
188+
old_t = BPF_MOD_JUMP;
189+
190+
ret = bpf_arch_text_poke_type(ip, old_t, new_t, old_addr, new_addr);
191+
if (!ret) {
192+
if (new_t == BPF_MOD_JUMP)
193+
tr->flags |= BPF_TRAMP_F_JMPED;
194+
else
195+
tr->flags &= ~BPF_TRAMP_F_JMPED;
196+
}
197+
198+
return ret;
199+
}
200+
201+
static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
202+
{
203+
int ret;
204+
183205
if (tr->func.ftrace_managed)
184206
ret = unregister_ftrace_direct(tr->fops, (long)old_addr, false);
185207
else
186-
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
208+
ret = bpf_text_poke(tr, old_addr, NULL);
187209

188210
return ret;
189211
}
190212

191213
static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr,
192214
bool lock_direct_mutex)
193215
{
194-
void *ip = tr->func.addr;
195216
int ret;
196217

197218
if (tr->func.ftrace_managed) {
@@ -200,7 +221,7 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad
200221
else
201222
ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr);
202223
} else {
203-
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
224+
ret = bpf_text_poke(tr, old_addr, new_addr);
204225
}
205226
return ret;
206227
}
@@ -223,7 +244,7 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
223244
ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
224245
ret = register_ftrace_direct(tr->fops, (long)new_addr);
225246
} else {
226-
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
247+
ret = bpf_text_poke(tr, NULL, new_addr);
227248
}
228249

229250
return ret;
@@ -415,7 +436,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
415436
}
416437

417438
/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
418-
tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
439+
tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX |
440+
BPF_TRAMP_F_JMPED);
419441

420442
if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
421443
tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
@@ -432,9 +454,17 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
432454

433455
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
434456
again:
435-
if ((tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) &&
436-
(tr->flags & BPF_TRAMP_F_CALL_ORIG))
437-
tr->flags |= BPF_TRAMP_F_ORIG_STACK;
457+
if (tr->flags & BPF_TRAMP_F_CALL_ORIG) {
458+
if (tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) {
459+
tr->flags |= BPF_TRAMP_F_ORIG_STACK;
460+
} else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_JMP)) {
461+
/* Use "jmp" instead of "call" for the trampoline
462+
* in the origin call case, and we don't need to
463+
* skip the frame.
464+
*/
465+
tr->flags &= ~BPF_TRAMP_F_SKIP_FRAME;
466+
}
467+
}
438468
#endif
439469

440470
size = arch_bpf_trampoline_size(&tr->func.model, tr->flags,
@@ -465,6 +495,11 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
465495
if (err)
466496
goto out_free;
467497

498+
if (bpf_trampoline_need_jmp(tr->flags))
499+
tr->fops->flags |= FTRACE_OPS_FL_JMP;
500+
else
501+
tr->fops->flags &= ~FTRACE_OPS_FL_JMP;
502+
468503
WARN_ON(tr->cur_image && total == 0);
469504
if (tr->cur_image)
470505
/* progs already running at this address */

0 commit comments

Comments
 (0)