Skip to content

Commit 017822b

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf-next/master' into for-next
Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents 34240f5 + 688b745 commit 017822b

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+1040
-557
lines changed

Documentation/bpf/map_array.rst

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,9 @@ of constant size. The size of the array is defined in ``max_entries`` at
1515
creation time. All array elements are pre-allocated and zero initialized when
1616
created. ``BPF_MAP_TYPE_PERCPU_ARRAY`` uses a different memory region for each
1717
CPU whereas ``BPF_MAP_TYPE_ARRAY`` uses the same memory region. The value
18-
stored can be of any size, however, all array elements are aligned to 8
19-
bytes.
18+
stored can be of any size for ``BPF_MAP_TYPE_ARRAY`` and not more than
19+
``PCPU_MIN_UNIT_SIZE`` (32 kB) for ``BPF_MAP_TYPE_PERCPU_ARRAY``. All
20+
array elements are aligned to 8 bytes.
2021

2122
Since kernel 5.5, memory mapping may be enabled for ``BPF_MAP_TYPE_ARRAY`` by
2223
setting the flag ``BPF_F_MMAPABLE``. The map definition is page-aligned and

arch/arm64/net/bpf_jit_comp.c

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1452,6 +1452,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
14521452
emit(A64_ASR(is64, dst, dst, imm), ctx);
14531453
break;
14541454

1455+
/* JUMP reg */
1456+
case BPF_JMP | BPF_JA | BPF_X:
1457+
emit(A64_BR(dst), ctx);
1458+
break;
14551459
/* JUMP off */
14561460
case BPF_JMP | BPF_JA:
14571461
case BPF_JMP32 | BPF_JA:
@@ -2231,6 +2235,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
22312235
for (i = 0; i <= prog->len; i++)
22322236
ctx.offset[i] *= AARCH64_INSN_SIZE;
22332237
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
2238+
/*
2239+
* The bpf_prog_update_insn_ptrs function expects offsets to
2240+
* point to the first byte of the jitted instruction (unlike
2241+
* the bpf_prog_fill_jited_linfo above, which, for historical
2242+
* reasons, expects to point to the next instruction)
2243+
*/
2244+
bpf_prog_update_insn_ptrs(prog, ctx.offset, ctx.ro_image);
22342245
out_off:
22352246
if (!ro_header && priv_stack_ptr) {
22362247
free_percpu(priv_stack_ptr);
@@ -2923,8 +2934,9 @@ static int gen_branch_or_nop(enum aarch64_insn_branch_type type, void *ip,
29232934
* The dummy_tramp is used to prevent another CPU from jumping to unknown
29242935
* locations during the patching process, making the patching process easier.
29252936
*/
2926-
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
2927-
void *old_addr, void *new_addr)
2937+
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
2938+
enum bpf_text_poke_type new_t, void *old_addr,
2939+
void *new_addr)
29282940
{
29292941
int ret;
29302942
u32 old_insn;
@@ -2968,14 +2980,13 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
29682980
!poking_bpf_entry))
29692981
return -EINVAL;
29702982

2971-
if (poke_type == BPF_MOD_CALL)
2972-
branch_type = AARCH64_INSN_BRANCH_LINK;
2973-
else
2974-
branch_type = AARCH64_INSN_BRANCH_NOLINK;
2975-
2983+
branch_type = old_t == BPF_MOD_CALL ? AARCH64_INSN_BRANCH_LINK :
2984+
AARCH64_INSN_BRANCH_NOLINK;
29762985
if (gen_branch_or_nop(branch_type, ip, old_addr, plt, &old_insn) < 0)
29772986
return -EFAULT;
29782987

2988+
branch_type = new_t == BPF_MOD_CALL ? AARCH64_INSN_BRANCH_LINK :
2989+
AARCH64_INSN_BRANCH_NOLINK;
29792990
if (gen_branch_or_nop(branch_type, ip, new_addr, plt, &new_insn) < 0)
29802991
return -EFAULT;
29812992

arch/loongarch/net/bpf_jit.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1284,11 +1284,12 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len)
12841284
return ret ? ERR_PTR(-EINVAL) : dst;
12851285
}
12861286

1287-
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
1288-
void *old_addr, void *new_addr)
1287+
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
1288+
enum bpf_text_poke_type new_t, void *old_addr,
1289+
void *new_addr)
12891290
{
12901291
int ret;
1291-
bool is_call = (poke_type == BPF_MOD_CALL);
1292+
bool is_call;
12921293
u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
12931294
u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
12941295

@@ -1298,13 +1299,15 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
12981299
if (!is_bpf_text_address((unsigned long)ip))
12991300
return -ENOTSUPP;
13001301

1302+
is_call = old_t == BPF_MOD_CALL;
13011303
ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
13021304
if (ret)
13031305
return ret;
13041306

13051307
if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES))
13061308
return -EFAULT;
13071309

1310+
is_call = new_t == BPF_MOD_CALL;
13081311
ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call);
13091312
if (ret)
13101313
return ret;

arch/powerpc/net/bpf_jit_comp.c

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1107,8 +1107,9 @@ static void do_isync(void *info __maybe_unused)
11071107
* execute isync (or some CSI) so that they don't go back into the
11081108
* trampoline again.
11091109
*/
1110-
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
1111-
void *old_addr, void *new_addr)
1110+
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
1111+
enum bpf_text_poke_type new_t, void *old_addr,
1112+
void *new_addr)
11121113
{
11131114
unsigned long bpf_func, bpf_func_end, size, offset;
11141115
ppc_inst_t old_inst, new_inst;
@@ -1119,7 +1120,6 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
11191120
return -EOPNOTSUPP;
11201121

11211122
bpf_func = (unsigned long)ip;
1122-
branch_flags = poke_type == BPF_MOD_CALL ? BRANCH_SET_LINK : 0;
11231123

11241124
/* We currently only support poking bpf programs */
11251125
if (!__bpf_address_lookup(bpf_func, &size, &offset, name)) {
@@ -1132,7 +1132,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
11321132
* an unconditional branch instruction at im->ip_after_call
11331133
*/
11341134
if (offset) {
1135-
if (poke_type != BPF_MOD_JUMP) {
1135+
if (old_t == BPF_MOD_CALL || new_t == BPF_MOD_CALL) {
11361136
pr_err("%s (0x%lx): calls are not supported in bpf prog body\n", __func__,
11371137
bpf_func);
11381138
return -EOPNOTSUPP;
@@ -1166,6 +1166,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
11661166
}
11671167

11681168
old_inst = ppc_inst(PPC_RAW_NOP());
1169+
branch_flags = old_t == BPF_MOD_CALL ? BRANCH_SET_LINK : 0;
11691170
if (old_addr) {
11701171
if (is_offset_in_branch_range(ip - old_addr))
11711172
create_branch(&old_inst, ip, (unsigned long)old_addr, branch_flags);
@@ -1174,6 +1175,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
11741175
branch_flags);
11751176
}
11761177
new_inst = ppc_inst(PPC_RAW_NOP());
1178+
branch_flags = new_t == BPF_MOD_CALL ? BRANCH_SET_LINK : 0;
11771179
if (new_addr) {
11781180
if (is_offset_in_branch_range(ip - new_addr))
11791181
create_branch(&new_inst, ip, (unsigned long)new_addr, branch_flags);

arch/riscv/net/bpf_jit_comp64.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -852,24 +852,27 @@ static int gen_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
852852
return emit_jump_and_link(is_call ? RV_REG_T0 : RV_REG_ZERO, rvoff, false, &ctx);
853853
}
854854

855-
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
856-
void *old_addr, void *new_addr)
855+
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
856+
enum bpf_text_poke_type new_t, void *old_addr,
857+
void *new_addr)
857858
{
858859
u32 old_insns[RV_FENTRY_NINSNS], new_insns[RV_FENTRY_NINSNS];
859-
bool is_call = poke_type == BPF_MOD_CALL;
860+
bool is_call;
860861
int ret;
861862

862863
if (!is_kernel_text((unsigned long)ip) &&
863864
!is_bpf_text_address((unsigned long)ip))
864865
return -ENOTSUPP;
865866

867+
is_call = old_t == BPF_MOD_CALL;
866868
ret = gen_jump_or_nops(old_addr, ip, old_insns, is_call);
867869
if (ret)
868870
return ret;
869871

870872
if (memcmp(ip, old_insns, RV_FENTRY_NBYTES))
871873
return -EFAULT;
872874

875+
is_call = new_t == BPF_MOD_CALL;
873876
ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call);
874877
if (ret)
875878
return ret;
@@ -1131,7 +1134,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
11311134
store_args(nr_arg_slots, args_off, ctx);
11321135

11331136
/* skip to actual body of traced function */
1134-
if (flags & BPF_TRAMP_F_SKIP_FRAME)
1137+
if (flags & BPF_TRAMP_F_ORIG_STACK)
11351138
orig_call += RV_FENTRY_NINSNS * 4;
11361139

11371140
if (flags & BPF_TRAMP_F_CALL_ORIG) {

arch/s390/net/bpf_jit_comp.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2413,8 +2413,9 @@ bool bpf_jit_supports_far_kfunc_call(void)
24132413
return true;
24142414
}
24152415

2416-
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2417-
void *old_addr, void *new_addr)
2416+
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
2417+
enum bpf_text_poke_type new_t, void *old_addr,
2418+
void *new_addr)
24182419
{
24192420
struct bpf_plt expected_plt, current_plt, new_plt, *plt;
24202421
struct {
@@ -2431,7 +2432,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
24312432
if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0)))
24322433
return -EINVAL;
24332434

2434-
if (t == BPF_MOD_JUMP &&
2435+
if ((new_t == BPF_MOD_JUMP || old_t == BPF_MOD_JUMP) &&
24352436
insn.disp == ((char *)new_addr - (char *)ip) >> 1) {
24362437
/*
24372438
* The branch already points to the destination,

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,7 @@ config X86
230230
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64
231231
select HAVE_FTRACE_REGS_HAVING_PT_REGS if X86_64
232232
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
233+
select HAVE_DYNAMIC_FTRACE_WITH_JMP if X86_64
233234
select HAVE_SAMPLE_FTRACE_DIRECT if X86_64
234235
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64
235236
select HAVE_EBPF_JIT

arch/x86/kernel/ftrace.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,12 @@ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
7474
* No need to translate into a callthunk. The trampoline does
7575
* the depth accounting itself.
7676
*/
77-
return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
77+
if (ftrace_is_jmp(addr)) {
78+
addr = ftrace_jmp_get(addr);
79+
return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
80+
} else {
81+
return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
82+
}
7883
}
7984

8085
static int ftrace_verify_code(unsigned long ip, const char *old_code)

arch/x86/kernel/ftrace_64.S

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
285285
ANNOTATE_NOENDBR
286286
RET
287287

288+
1:
289+
testb $1, %al
290+
jz 2f
291+
andq $0xfffffffffffffffe, %rax
292+
movq %rax, MCOUNT_REG_SIZE+8(%rsp)
293+
restore_mcount_regs
294+
/* Restore flags */
295+
popfq
296+
RET
297+
288298
/* Swap the flags with orig_rax */
289-
1: movq MCOUNT_REG_SIZE(%rsp), %rdi
299+
2: movq MCOUNT_REG_SIZE(%rsp), %rdi
290300
movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
291301
movq %rax, MCOUNT_REG_SIZE(%rsp)
292302

0 commit comments

Comments
 (0)