Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions Documentation/bpf/linux-notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,6 @@ Byte swap instructions

``BPF_FROM_LE`` and ``BPF_FROM_BE`` exist as aliases for ``BPF_TO_LE`` and ``BPF_TO_BE`` respectively.

Jump instructions
=================

``BPF_CALL | BPF_X | BPF_JMP`` (0x8d), where the helper function
integer would be read from a specified register, is not currently supported
by the verifier. Any programs with this instruction will fail to load
until such support is added.

Maps
====

Expand Down
39 changes: 32 additions & 7 deletions arch/x86/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -660,24 +660,38 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,

#define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)

static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
static void __emit_indirect_jump(u8 **pprog, int reg, bool ereg)
{
u8 *prog = *pprog;

if (ereg)
EMIT1(0x41);

EMIT2(0xFF, 0xE0 + reg);

*pprog = prog;
}

static void emit_indirect_jump(u8 **pprog, int bpf_reg, u8 *ip)
{
u8 *prog = *pprog;
int reg = reg2hex[bpf_reg];
bool ereg = is_ereg(bpf_reg);

if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
OPTIMIZER_HIDE_VAR(reg);
emit_jump(&prog, its_static_thunk(reg), ip);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
EMIT_LFENCE();
EMIT2(0xFF, 0xE0 + reg);
__emit_indirect_jump(&prog, reg, ereg);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
OPTIMIZER_HIDE_VAR(reg);
if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg + 8*ereg], ip);
else
emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
emit_jump(&prog, &__x86_indirect_thunk_array[reg + 8*ereg], ip);
} else {
EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */
__emit_indirect_jump(&prog, reg, ereg);
if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
EMIT1(0xCC); /* int3 */
}
Expand Down Expand Up @@ -797,7 +811,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
* rdi == ctx (1st arg)
* rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
*/
emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
emit_indirect_jump(&prog, BPF_REG_4 /* R4 -> rcx */, ip + (prog - start));

/* out: */
ctx->tail_call_indirect_label = prog - start;
Expand Down Expand Up @@ -1691,6 +1705,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
prog = temp;

for (i = 1; i <= insn_cnt; i++, insn++) {
u32 abs_xlated_off = bpf_prog->aux->subprog_start + i - 1;
const s32 imm32 = insn->imm;
u32 dst_reg = insn->dst_reg;
u32 src_reg = insn->src_reg;
Expand Down Expand Up @@ -2614,6 +2629,9 @@ st: if (is_imm8(insn->off))

break;

case BPF_JMP | BPF_JA | BPF_X:
emit_indirect_jump(&prog, insn->dst_reg, image + addrs[i - 1]);
break;
case BPF_JMP | BPF_JA:
case BPF_JMP32 | BPF_JA:
if (BPF_CLASS(insn->code) == BPF_JMP) {
Expand Down Expand Up @@ -2751,6 +2769,13 @@ st: if (is_imm8(insn->off))
return -EFAULT;
}
memcpy(rw_image + proglen, temp, ilen);

/*
* Instruction arrays need to know how xlated code
* maps to jitted code
*/
bpf_prog_update_insn_ptr(bpf_prog, abs_xlated_off, proglen,
image + proglen);
}
proglen += ilen;
addrs[i] = proglen;
Expand Down Expand Up @@ -3543,7 +3568,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image,
if (err)
return err;

emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
emit_indirect_jump(&prog, BPF_REG_3 /* R3 -> rdx */, image + (prog - buf));

*pprog = prog;
return 0;
Expand Down
37 changes: 37 additions & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -995,6 +995,7 @@ enum bpf_reg_type {
PTR_TO_ARENA,
PTR_TO_BUF, /* reg points to a read/write buffer */
PTR_TO_FUNC, /* reg points to a bpf program function */
PTR_TO_INSN, /* reg points to a bpf program instruction */
CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
__BPF_REG_TYPE_MAX,

Expand Down Expand Up @@ -3789,4 +3790,40 @@ int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char *
const char **linep, int *nump);
struct bpf_prog *bpf_prog_find_from_stack(void);

int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog);
int bpf_insn_array_ready(struct bpf_map *map);
void bpf_insn_array_release(struct bpf_map *map);
void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);

/*
* The struct bpf_insn_ptr structure describes a pointer to a
* particular instruction in a loaded BPF program. Initially
* it is initialised from userspace via user_value.xlated_off.
* During the program verification all other fields are populated
* accordingly:
*
* jitted_ip: address of the instruction in the jitted image
* user_value: user-visible original, xlated, and jitted offsets
*/
struct bpf_insn_ptr {
void *jitted_ip;
struct bpf_insn_array_value user_value;
};

#ifdef CONFIG_BPF_SYSCALL
void bpf_prog_update_insn_ptr(struct bpf_prog *prog,
u32 xlated_off,
u32 jitted_off,
void *jitted_ip);
#else
static inline void
bpf_prog_update_insn_ptr(struct bpf_prog *prog,
u32 xlated_off,
u32 jitted_off,
void *jitted_ip)
{
}
#endif

#endif /* _LINUX_BPF_H */
1 change: 1 addition & 0 deletions include/linux/bpf_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_ARENA, arena_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_INSN_ARRAY, insn_array_map_ops)

BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
Expand Down
11 changes: 11 additions & 0 deletions include/linux/bpf_verifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -527,6 +527,7 @@ struct bpf_insn_aux_data {
struct {
u32 map_index; /* index into used_maps[] */
u32 map_off; /* offset from value base address */
struct bpf_iarray *jt; /* jump table for gotox instruction */
};
struct {
enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
Expand Down Expand Up @@ -754,8 +755,10 @@ struct bpf_verifier_env {
struct list_head free_list; /* list of struct bpf_verifier_state_list */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
struct bpf_map *insn_array_maps[MAX_USED_MAPS]; /* array of INSN_ARRAY map's to be relocated */
u32 used_map_cnt; /* number of used maps */
u32 used_btf_cnt; /* number of used BTF objects */
u32 insn_array_map_cnt; /* number of used maps of type BPF_MAP_TYPE_INSN_ARRAY */
u32 id_gen; /* used to generate unique reg IDs */
u32 hidden_subprog_cnt; /* number of hidden subprogs */
int exception_callback_subprog;
Expand Down Expand Up @@ -838,6 +841,7 @@ struct bpf_verifier_env {
struct bpf_scc_info **scc_info;
u32 scc_cnt;
struct bpf_iarray *succ;
struct bpf_iarray *gotox_tmp_buf;
};

static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
Expand Down Expand Up @@ -1048,6 +1052,13 @@ static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_
return !(off % BPF_REG_SIZE);
}

static inline bool insn_is_gotox(struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_JMP &&
BPF_OP(insn->code) == BPF_JA &&
BPF_SRC(insn->code) == BPF_X;
}

const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
Expand Down
21 changes: 21 additions & 0 deletions include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -1026,6 +1026,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
BPF_MAP_TYPE_ARENA,
BPF_MAP_TYPE_INSN_ARRAY,
__MAX_BPF_MAP_TYPE
};

Expand Down Expand Up @@ -7645,4 +7646,24 @@ enum bpf_kfunc_flags {
BPF_F_PAD_ZEROS = (1ULL << 0),
};

/*
* Values of a BPF_MAP_TYPE_INSN_ARRAY entry must be of this type.
*
* Before the map is used the orig_off field should point to an
* instruction inside the program being loaded. The other fields
* must be set to 0.
*
* After the program is loaded, the xlated_off will be adjusted
* by the verifier to point to the index of the original instruction
* in the xlated program. If the instruction is deleted, it will
* be set to (u32)-1. The jitted_off will be set to the corresponding
* offset in the jitted image of the program.
*/
struct bpf_insn_array_value {
__u32 orig_off;
__u32 xlated_off;
__u32 jitted_off;
__u32 :32;
};

#endif /* _UAPI__LINUX_BPF_H__ */
2 changes: 1 addition & 1 deletion kernel/bpf/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ CFLAGS_core.o += -Wno-override-init $(cflags-nogcse-yy)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o token.o liveness.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o bpf_insn_array.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o mprog.o
Expand Down
Loading
Loading