Skip to content

Commit 62a9ed6

Browse files
committed
[tmp] support the "simplified" gotox
Make the libbpf parse and pass proper offsets for "new" gotox instructions (generated by llvm/llvm-project#133856). Hack fast, so there are leftovers from the old patch. (And the blindness which was presumably fixed, breaks again in bpf_goto_x tests.) Signed-off-by: Anton Protopopov <[email protected]>
1 parent 0c86ef7 commit 62a9ed6

File tree

3 files changed

+104
-108
lines changed

3 files changed

+104
-108
lines changed

kernel/bpf/verifier.c

Lines changed: 52 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19925,22 +19925,24 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
1992519925
return err;
1992619926

1992719927
dst_reg = reg_state(env, insn->dst_reg);
19928-
if (dst_reg->type != PTR_TO_INSN) {
19929-
verbose(env, "BPF_JA|BPF_X R%d has type %d, expected PTR_TO_INSN\n",
19928+
if (dst_reg->type != SCALAR_VALUE) {
19929+
verbose(env, "BPF_JA|BPF_X R%d has type %d, expected SCALAR_VALUE\n",
1993019930
insn->dst_reg, dst_reg->type);
1993119931
return -EINVAL;
1993219932
}
1993319933

19934+
#if 0
1993419935
if (dst_reg->map_ptr != map) {
1993519936
verbose(env, "BPF_JA|BPF_X R%d was loaded from map id=%u, expected id=%u\n",
1993619937
insn->dst_reg, dst_reg->map_ptr->id, map->id);
1993719938
return -EINVAL;
1993819939
}
19940+
#endif
1993919941

1994019942
if (dst_reg->max_index >= map->max_entries)
1994119943
return -EINVAL;
1994219944

19943-
for (i = dst_reg->min_index + 1; i <= dst_reg->max_index; i++) {
19945+
for (i = dst_reg->umin_value + 1; i <= dst_reg->umax_value; i++) {
1994419946
xoff = bpf_insn_array_iter_xlated_offset(map, i);
1994519947
if (xoff == -ENOENT)
1994619948
break;
@@ -19952,7 +19954,7 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
1995219954
return -EFAULT;
1995319955
}
1995419956

19955-
env->insn_idx = bpf_insn_array_iter_xlated_offset(map, dst_reg->min_index);
19957+
env->insn_idx = bpf_insn_array_iter_xlated_offset(map, dst_reg->umin_value);
1995619958
if (env->insn_idx < 0)
1995719959
return env->insn_idx;
1995819960

@@ -22364,6 +22366,52 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2236422366
goto next_insn;
2236522367
}
2236622368

22369+
if ((insn->code == (BPF_JMP | BPF_JA | BPF_X) || insn->code == (BPF_JMP32 | BPF_JA | BPF_X))) {
22370+
struct bpf_insn *patch = &insn_buf[0];
22371+
struct bpf_map *map;
22372+
22373+
ret = add_used_map(env, insn->imm, &map);
22374+
if (ret < 0)
22375+
return ret;
22376+
22377+
/*
22378+
* Replace BPF_JMP|BPF_JA|BPF,SRC=Rx,DST=0,IMM=fd with
22379+
*
22380+
* Rt = ldimm64(map_address)
22381+
* Rt += "offset to elements"
22382+
* Rx *= element size
22383+
* Rx += Rt
22384+
* BPF_JMP|BPF_JA|BPF,SRC=Rx,DST=1,IMM=fd
22385+
*/
22386+
struct bpf_insn_array {
22387+
struct bpf_map map;
22388+
struct mutex state_mutex;
22389+
int state;
22390+
u32 **unique_offsets;
22391+
u32 unique_offsets_cnt;
22392+
long *ips;
22393+
DECLARE_FLEX_ARRAY(struct bpf_insn_ptr, ptrs);
22394+
};
22395+
22396+
*patch++ = BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_AX, 0, 0, (u32)(u64)map);
22397+
*patch++ = BPF_RAW_INSN(0, 0, 0, 0, (u32)((u64)map >> 32));
22398+
*patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, sizeof(struct bpf_insn_array));
22399+
*patch++ = BPF_ALU64_IMM(BPF_MUL, insn->dst_reg, sizeof(struct bpf_insn_ptr));
22400+
*patch++ = BPF_ALU64_REG(BPF_ADD, insn->dst_reg, BPF_REG_AX);
22401+
*patch++ = BPF_RAW_INSN(BPF_LDX | BPF_DW | BPF_MEM, insn->dst_reg, insn->dst_reg, 0, 0);
22402+
*patch++ = BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, insn->dst_reg, 0, 0, insn->imm);
22403+
22404+
cnt = patch - insn_buf;
22405+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
22406+
if (!new_prog)
22407+
return -ENOMEM;
22408+
22409+
delta += cnt - 1; // XXX can be patched with other code
22410+
env->prog = prog = new_prog;
22411+
insn = new_prog->insnsi + i + delta;
22412+
goto next_insn;
22413+
}
22414+
2236722415
/* Make it impossible to de-reference a userspace address */
2236822416
if (BPF_CLASS(insn->code) == BPF_LDX &&
2236922417
(BPF_MODE(insn->code) == BPF_PROBE_MEM ||

tools/lib/bpf/libbpf.c

Lines changed: 49 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -372,6 +372,7 @@ enum reloc_type {
372372
RELO_EXTERN_CALL,
373373
RELO_SUBPROG_ADDR,
374374
RELO_CORE,
375+
RELO_INSN_ARRAY,
375376
};
376377

377378
struct reloc_desc {
@@ -382,6 +383,7 @@ struct reloc_desc {
382383
struct {
383384
int map_idx;
384385
int sym_off;
386+
int sym_size;
385387
int ext_idx;
386388
};
387389
};
@@ -664,6 +666,7 @@ struct elf_state {
664666
Elf_Data *symbols;
665667
Elf_Data *arena_data;
666668
Elf_Data *jt_sizes_data;
669+
Elf_Data *jumptables_data;
667670
size_t shstrndx; /* section index for section name strings */
668671
size_t strtabidx;
669672
struct elf_sec_desc *secs;
@@ -675,6 +678,7 @@ struct elf_state {
675678
bool has_st_ops;
676679
int arena_data_shndx;
677680
int jt_sizes_data_shndx;
681+
int jumptables_data_shndx;
678682
};
679683

680684
struct usdt_manager;
@@ -825,6 +829,13 @@ static bool insn_is_pseudo_func(struct bpf_insn *insn)
825829
return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
826830
}
827831

832+
static bool is_goto_x(const struct bpf_insn *insn)
833+
{
834+
return BPF_CLASS(insn->code) == BPF_JMP &&
835+
BPF_OP(insn->code) == BPF_JA &&
836+
BPF_SRC(insn->code) == BPF_X;
837+
}
838+
828839
static int
829840
bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
830841
const char *name, size_t sec_idx, const char *sec_name,
@@ -4064,6 +4075,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
40644075
} else if (strcmp(name, ARENA_SEC) == 0) {
40654076
obj->efile.arena_data = data;
40664077
obj->efile.arena_data_shndx = idx;
4078+
} else if (strcmp(name, ".jumptables") == 0) {
4079+
obj->efile.jumptables_data = calloc(1, sizeof(*data)); // XXX, do it properly, otherwise ->d_buf is corrupted
4080+
memcpy(obj->efile.jumptables_data, data, sizeof(*data));
4081+
obj->efile.jumptables_data_shndx = idx;
40674082
} else {
40684083
pr_info("elf: skipping unrecognized data section(%d) %s\n",
40694084
idx, name);
@@ -4621,7 +4636,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
46214636
const char *sym_sec_name;
46224637
struct bpf_map *map;
46234638

4624-
if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
4639+
if (!is_call_insn(insn) && !is_ldimm64_insn(insn) && !is_goto_x(insn)) {
46254640
pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
46264641
prog->name, sym_name, insn_idx, insn->code);
46274642
return -LIBBPF_ERRNO__RELOC;
@@ -4712,6 +4727,16 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
47124727
return 0;
47134728
}
47144729

4730+
/* jump table data relocation */
4731+
if (shdr_idx == obj->efile.jumptables_data_shndx) {
4732+
reloc_desc->type = RELO_INSN_ARRAY;
4733+
reloc_desc->insn_idx = insn_idx;
4734+
reloc_desc->map_idx = -1;
4735+
reloc_desc->sym_off = sym->st_value; // XXX ?
4736+
reloc_desc->sym_size = sym->st_size;
4737+
return 0;
4738+
}
4739+
47154740
/* generic map reference relocation */
47164741
if (type == LIBBPF_MAP_UNSPEC) {
47174742
if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
@@ -6204,12 +6229,7 @@ static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
62046229
insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
62056230
}
62066231

6207-
static bool map_fd_is_rodata(struct bpf_object *obj, int map_fd)
6208-
{
6209-
return map_fd == obj->rodata_map_fd;
6210-
}
6211-
6212-
static int create_jt_map(const struct jt *jt, int adjust_off)
6232+
static int create_jt_map(struct bpf_object *obj, int off, int size, int adjust_off)
62136233
{
62146234
static union bpf_attr attr = {
62156235
.map_type = BPF_MAP_TYPE_INSN_ARRAY,
@@ -6221,15 +6241,20 @@ static int create_jt_map(const struct jt *jt, int adjust_off)
62216241
int map_fd;
62226242
int err;
62236243
__u32 i;
6244+
__u32 *jt;
62246245

6225-
attr.max_entries = jt->jump_target_cnt;
6246+
attr.max_entries = size / 4;
62266247

62276248
map_fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
62286249
if (map_fd < 0)
62296250
return map_fd;
62306251

6231-
for (i = 0; i < jt->jump_target_cnt; i++) {
6232-
val.xlated_off = jt->jump_target[i] + adjust_off;
6252+
jt = (__u32 *)(obj->efile.jumptables_data->d_buf + off);
6253+
if (!jt)
6254+
return -EINVAL;
6255+
6256+
for (i = 0; i < attr.max_entries; i++) {
6257+
val.xlated_off = jt[i] + adjust_off;
62336258
err = bpf_map_update_elem(map_fd, &i, &val, 0);
62346259
if (err) {
62356260
close(map_fd);
@@ -6246,17 +6271,6 @@ static int create_jt_map(const struct jt *jt, int adjust_off)
62466271
return map_fd;
62476272
}
62486273

6249-
static int subprog_insn_off(struct bpf_program *prog, int insn_idx)
6250-
{
6251-
int i;
6252-
6253-
for (i = prog->subprog_cnt - 1; i >= 0; i--)
6254-
if (insn_idx >= prog->subprog_offset[i])
6255-
return prog->subprog_offset[i] - prog->subprog_sec_offst[i];
6256-
6257-
return -prog->sec_insn_off;
6258-
}
6259-
62606274
/* Relocate data references within program code:
62616275
* - map references;
62626276
* - global variable references;
@@ -6294,31 +6308,8 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
62946308
insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
62956309
insn[0].imm = relo->map_idx;
62966310
} else if (map->autocreate) {
6297-
const struct jt *jt;
6298-
int ajdust_insn_off;
6299-
int map_fd = map->fd;
6300-
6301-
/*
6302-
* Set imm to proper map file descriptor. In normal case,
6303-
* it is just map->fd. However, in case of a jump table,
6304-
* a new map file descriptor should be created
6305-
*/
6306-
jt = bpf_object__find_jt(obj, insn[1].imm / 8);
6307-
if (map_fd_is_rodata(obj, map_fd) && !IS_ERR(jt)) {
6308-
ajdust_insn_off = subprog_insn_off(prog, relo->insn_idx);
6309-
map_fd = create_jt_map(jt, ajdust_insn_off);
6310-
if (map_fd < 0) {
6311-
pr_warn("prog '%s': relo #%d: failed to create a jt map for .rodata offset %u\n",
6312-
prog->name, i, insn[1].imm / 8);
6313-
return map_fd;
6314-
}
6315-
6316-
/* a new map is created, so offset should be 0 */
6317-
insn[1].imm = 0;
6318-
}
6319-
63206311
insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6321-
insn[0].imm = map_fd;
6312+
insn[0].imm = map->fd;
63226313
} else {
63236314
poison_map_ldimm64(prog, i, relo->insn_idx, insn,
63246315
relo->map_idx, map);
@@ -6371,6 +6362,19 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
63716362
case RELO_CORE:
63726363
/* will be handled by bpf_program_record_relos() */
63736364
break;
6365+
case RELO_INSN_ARRAY: {
6366+
int map_fd;
6367+
6368+
map_fd = create_jt_map(obj, relo->sym_off, relo->sym_size, relo->insn_idx + 1);
6369+
if (map_fd < 0) {
6370+
pr_warn("prog '%s': relo #%d: failed to create a jt map for .rodata offset %u\n",
6371+
prog->name, i, relo->sym_off);
6372+
return map_fd;
6373+
}
6374+
insn->imm = map_fd;
6375+
insn->off = 0;
6376+
}
6377+
break;
63746378
default:
63756379
pr_warn("prog '%s': relo #%d: bad relo type %d\n",
63766380
prog->name, i, relo->type);
@@ -7612,58 +7616,6 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
76127616
return 0;
76137617
}
76147618

7615-
static bool insn_is_gotox(struct bpf_insn *insn)
7616-
{
7617-
return BPF_CLASS(insn->code) == BPF_JMP &&
7618-
BPF_OP(insn->code) == BPF_JA &&
7619-
BPF_SRC(insn->code) == BPF_X;
7620-
}
7621-
7622-
/*
7623-
* This one is too dumb, of course. TBD to make it smarter.
7624-
*/
7625-
static int find_jt_map_fd(struct bpf_program *prog, int insn_idx)
7626-
{
7627-
struct bpf_insn *insn = &prog->insns[insn_idx];
7628-
__u8 dst_reg = insn->dst_reg;
7629-
7630-
/* TBD: this function is such smart for now that it even ignores this
7631-
* register. Instead, it should backtrack the load more carefully.
7632-
* (So far even this dumb version works with all selftests.)
7633-
*/
7634-
pr_debug("searching for a load instruction which populated dst_reg=r%u\n", dst_reg);
7635-
7636-
while (--insn >= prog->insns) {
7637-
if (insn->code == (BPF_LD|BPF_DW|BPF_IMM))
7638-
return insn[0].imm;
7639-
}
7640-
7641-
return -ENOENT;
7642-
}
7643-
7644-
static int bpf_object__patch_gotox(struct bpf_object *obj, struct bpf_program *prog)
7645-
{
7646-
struct bpf_insn *insn = prog->insns;
7647-
int map_fd;
7648-
int i;
7649-
7650-
for (i = 0; i < prog->insns_cnt; i++, insn++) {
7651-
if (!insn_is_gotox(insn))
7652-
continue;
7653-
7654-
if (obj->gen_loader)
7655-
return -EFAULT;
7656-
7657-
map_fd = find_jt_map_fd(prog, i);
7658-
if (map_fd < 0)
7659-
return map_fd;
7660-
7661-
insn->imm = map_fd;
7662-
}
7663-
7664-
return 0;
7665-
}
7666-
76677619
static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
76687620
int *btf_obj_fd, int *btf_type_id);
76697621

@@ -8208,13 +8160,6 @@ static int bpf_object_prepare_progs(struct bpf_object *obj)
82088160
return err;
82098161
}
82108162

8211-
for (i = 0; i < obj->nr_programs; i++) {
8212-
prog = &obj->programs[i];
8213-
err = bpf_object__patch_gotox(obj, prog);
8214-
if (err)
8215-
return err;
8216-
}
8217-
82188163
return 0;
82198164
}
82208165

tools/lib/bpf/linker.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2084,6 +2084,9 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
20842084
obj->sym_map[src_sym_idx] = dst_sec->sec_sym_idx;
20852085
return 0;
20862086
}
2087+
2088+
if (!strcmp(src_sec->sec_name, ".jumptables"))
2089+
goto add_sym;
20872090
}
20882091

20892092
if (sym_bind == STB_LOCAL)

0 commit comments

Comments
 (0)