Skip to content

Commit cd1c318

Browse files
committed
bpf, x86: add support for indirect jumps
Add support for a new instruction BPF_JMP|BPF_X|BPF_JA, SRC=0, DST=Rx, off=0, imm=fd(M) which does an indirect jump to a location stored in Rx. The map M is an instruction array map containing all possible targets for this particular jump. On the jump the register Rx should have type PTR_TO_INSN. This new type assures that the Rx register contains a value (or a range of values) loaded from the map M. Typically, this will be done like this The code above could have been generated for a switch statement with (e.g., this could be a switch statement compiled with LLVM): 0: r3 = r1 # "switch (r3)" 1: if r3 > 0x13 goto +0x666 # check r3 boundaries 2: r3 <<= 0x3 # r3 is void*, point to an address 3: r1 = 0xbeef ll # r1 is PTR_TO_MAP_VALUE, r1->map_ptr=M 5: r1 += r3 # r1 inherits boundaries from r3 6: r1 = *(u64 *)(r1 + 0x0) # r1 now has type INSN_TO_PTR 7: gotox r1[,imm=fd(M)] # verifier checks that M == r1->map_ptr On building the jump graph, and the static analysis, a new function of the INSN_ARRAY is used: bpf_insn_array_iter_xlated_offset(map, n). It lets to iterate over unique slots in an instruction array (equal items can be generated, e.g., for a sparse jump table for a switch, where not all possible branches are taken). Instruction (3) above loads an address of the first element of the map. From BPF point of view, the map is a jump table in native architecture, e.g., an array of jump targets. This patch allows to grab such an address and then later to adjust an offset, like in instruction (5). A value of such type can be dereferenced once to create a PTR_TO_INSN, see instruction (6). When building the config, the high 16 bytes of the insn_state are used, so this patch (theoretically) supports jump tables of up to 2^16 slots. Signed-off-by: Anton Protopopov <[email protected]>
1 parent 93b2ef4 commit cd1c318

File tree

6 files changed

+280
-9
lines changed

6 files changed

+280
-9
lines changed

arch/x86/net/bpf_jit_comp.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -671,9 +671,11 @@ static void __emit_indirect_jump(u8 **pprog, int reg, bool ereg)
671671
*pprog = prog;
672672
}
673673

674-
static void emit_indirect_jump(u8 **pprog, int reg, bool ereg, u8 *ip)
674+
static void emit_indirect_jump(u8 **pprog, int bpf_reg, u8 *ip)
675675
{
676676
u8 *prog = *pprog;
677+
int reg = reg2hex[bpf_reg];
678+
bool ereg = is_ereg(bpf_reg);
677679

678680
if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
679681
OPTIMIZER_HIDE_VAR(reg);
@@ -808,7 +810,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
808810
* rdi == ctx (1st arg)
809811
* rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
810812
*/
811-
emit_indirect_jump(&prog, 1 /* rcx */, false, ip + (prog - start));
813+
emit_indirect_jump(&prog, BPF_REG_4 /* R4 -> rcx */, ip + (prog - start));
812814

813815
/* out: */
814816
ctx->tail_call_indirect_label = prog - start;
@@ -2518,6 +2520,10 @@ st: if (is_imm8(insn->off))
25182520

25192521
break;
25202522

2523+
case BPF_JMP | BPF_JA | BPF_X:
2524+
case BPF_JMP32 | BPF_JA | BPF_X:
2525+
emit_indirect_jump(&prog, insn->dst_reg, image + addrs[i - 1]);
2526+
break;
25212527
case BPF_JMP | BPF_JA:
25222528
case BPF_JMP32 | BPF_JA:
25232529
if (BPF_CLASS(insn->code) == BPF_JMP) {
@@ -3454,7 +3460,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image,
34543460
if (err)
34553461
return err;
34563462

3457-
emit_indirect_jump(&prog, 2 /* rdx */, false, image + (prog - buf));
3463+
emit_indirect_jump(&prog, BPF_REG_3 /* R3 -> rdx */, image + (prog - buf));
34583464

34593465
*pprog = prog;
34603466
return 0;

include/linux/bpf.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -952,6 +952,7 @@ enum bpf_reg_type {
952952
PTR_TO_ARENA,
953953
PTR_TO_BUF, /* reg points to a read/write buffer */
954954
PTR_TO_FUNC, /* reg points to a bpf program function */
955+
PTR_TO_INSN, /* reg points to a bpf program instruction */
955956
CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
956957
__BPF_REG_TYPE_MAX,
957958

@@ -3601,6 +3602,7 @@ int bpf_insn_array_ready(struct bpf_map *map);
36013602
void bpf_insn_array_release(struct bpf_map *map);
36023603
void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
36033604
void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
3605+
int bpf_insn_array_iter_xlated_offset(struct bpf_map *map, u32 iter_no);
36043606

36053607
/*
36063608
* The struct bpf_insn_ptr structure describes a pointer to a

include/linux/bpf_verifier.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,10 @@ struct bpf_reg_state {
229229
enum bpf_reg_liveness live;
230230
/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
231231
bool precise;
232+
233+
/* Used to track boundaries of a PTR_TO_INSN */
234+
u32 min_index;
235+
u32 max_index;
232236
};
233237

234238
enum bpf_stack_slot_type {

kernel/bpf/bpf_insn_array.c

Lines changed: 70 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ struct bpf_insn_array {
99
struct bpf_map map;
1010
struct mutex state_mutex;
1111
int state;
12+
u32 **unique_offsets;
13+
u32 unique_offsets_cnt;
1214
long *ips;
1315
DECLARE_FLEX_ARRAY(struct bpf_insn_ptr, ptrs);
1416
};
@@ -50,6 +52,7 @@ static void insn_array_free(struct bpf_map *map)
5052
{
5153
struct bpf_insn_array *insn_array = cast_insn_array(map);
5254

55+
kfree(insn_array->unique_offsets);
5356
kfree(insn_array->ips);
5457
bpf_map_area_free(insn_array);
5558
}
@@ -69,6 +72,12 @@ static struct bpf_map *insn_array_alloc(union bpf_attr *attr)
6972
return ERR_PTR(-ENOMEM);
7073
}
7174

75+
insn_array->unique_offsets = kzalloc(sizeof(long) * attr->max_entries, GFP_KERNEL);
76+
if (!insn_array->unique_offsets) {
77+
insn_array_free(&insn_array->map);
78+
return ERR_PTR(-ENOMEM);
79+
}
80+
7281
bpf_map_init_from_attr(&insn_array->map, attr);
7382

7483
mutex_init(&insn_array->state_mutex);
@@ -155,10 +164,25 @@ static u64 insn_array_mem_usage(const struct bpf_map *map)
155164
u64 extra_size = 0;
156165

157166
extra_size += sizeof(long) * map->max_entries; /* insn_array->ips */
167+
extra_size += 4 * map->max_entries; /* insn_array->unique_offsets */
158168

159169
return insn_array_alloc_size(map->max_entries) + extra_size;
160170
}
161171

172+
static int insn_array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off)
173+
{
174+
struct bpf_insn_array *insn_array = cast_insn_array(map);
175+
176+
/* for now, just reject all such loads */
177+
if (off > 0)
178+
return -EINVAL;
179+
180+
/* from BPF's point of view, this map is a jump table */
181+
*imm = (unsigned long)insn_array->ips;
182+
183+
return 0;
184+
}
185+
162186
BTF_ID_LIST_SINGLE(insn_array_btf_ids, struct, bpf_insn_array)
163187

164188
const struct bpf_map_ops insn_array_map_ops = {
@@ -171,6 +195,7 @@ const struct bpf_map_ops insn_array_map_ops = {
171195
.map_delete_elem = insn_array_delete_elem,
172196
.map_check_btf = insn_array_check_btf,
173197
.map_mem_usage = insn_array_mem_usage,
198+
.map_direct_value_addr = insn_array_map_direct_value_addr,
174199
.map_btf_id = &insn_array_btf_ids[0],
175200
};
176201

@@ -207,6 +232,37 @@ static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
207232
return true;
208233
}
209234

235+
static int cmp_unique_offsets(const void *a, const void *b)
236+
{
237+
return *(u32 *)a - *(u32 *)b;
238+
}
239+
240+
static int bpf_insn_array_init_unique_offsets(struct bpf_insn_array *insn_array)
241+
{
242+
u32 cnt = insn_array->map.max_entries, ucnt = 1;
243+
u32 **off = insn_array->unique_offsets;
244+
int i;
245+
246+
/* [0,3,2,4,6,5,5,5,1,1,0,0] */
247+
for (i = 0; i < cnt; i++)
248+
off[i] = &insn_array->ptrs[i].user_value.xlated_off;
249+
250+
/* [0,0,0,1,1,2,3,4,5,5,5,6] */
251+
sort(off, cnt, sizeof(off[0]), cmp_unique_offsets, NULL);
252+
253+
/*
254+
* [0,1,2,3,4,5,6,x,x,x,x,x]
255+
* \.........../
256+
* unique_offsets_cnt
257+
*/
258+
for (i = 1; i < cnt; i++)
259+
if (*off[i] != *off[ucnt-1])
260+
off[ucnt++] = off[i];
261+
262+
insn_array->unique_offsets_cnt = ucnt;
263+
return 0;
264+
}
265+
210266
int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog)
211267
{
212268
struct bpf_insn_array *insn_array = cast_insn_array(map);
@@ -237,7 +293,10 @@ int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog)
237293
for (i = 0; i < map->max_entries; i++)
238294
insn_array->ptrs[i].user_value.xlated_off = insn_array->ptrs[i].orig_xlated_off;
239295

240-
return 0;
296+
/*
297+
* Prepare a set of unique offsets
298+
*/
299+
return bpf_insn_array_init_unique_offsets(insn_array);
241300
}
242301

243302
int bpf_insn_array_ready(struct bpf_map *map)
@@ -330,3 +389,13 @@ void bpf_prog_update_insn_ptr(struct bpf_prog *prog,
330389
}
331390
}
332391
}
392+
393+
int bpf_insn_array_iter_xlated_offset(struct bpf_map *map, u32 iter_no)
394+
{
395+
struct bpf_insn_array *insn_array = cast_insn_array(map);
396+
397+
if (iter_no >= insn_array->unique_offsets_cnt)
398+
return -ENOENT;
399+
400+
return *insn_array->unique_offsets[iter_no];
401+
}

kernel/bpf/core.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1725,6 +1725,8 @@ bool bpf_opcode_in_insntable(u8 code)
17251725
[BPF_LD | BPF_IND | BPF_B] = true,
17261726
[BPF_LD | BPF_IND | BPF_H] = true,
17271727
[BPF_LD | BPF_IND | BPF_W] = true,
1728+
[BPF_JMP | BPF_JA | BPF_X] = true,
1729+
[BPF_JMP32 | BPF_JA | BPF_X] = true,
17281730
[BPF_JMP | BPF_JCOND] = true,
17291731
};
17301732
#undef BPF_INSN_3_TBL

0 commit comments

Comments
 (0)