Skip to content

Commit d0b98f6

Browse files
eddyz87Alexei Starovoitov
authored andcommitted
bpf: disallow 40-bytes extra stack for bpf_fastcall patterns
Hou Tao reported an issue with bpf_fastcall patterns allowing extra stack space above MAX_BPF_STACK limit. This extra stack allowance is not integrated properly with the following verifier parts: - backtracking logic still assumes that stack can't exceed MAX_BPF_STACK; - bpf_verifier_env->scratched_stack_slots assumes only 64 slots are available. Here is an example of an issue with precision tracking (note stack slot -8 tracked as precise instead of -520): 0: (b7) r1 = 42 ; R1_w=42 1: (b7) r2 = 42 ; R2_w=42 2: (7b) *(u64 *)(r10 -512) = r1 ; R1_w=42 R10=fp0 fp-512_w=42 3: (7b) *(u64 *)(r10 -520) = r2 ; R2_w=42 R10=fp0 fp-520_w=42 4: (85) call bpf_get_smp_processor_id#8 ; R0_w=scalar(...) 5: (79) r2 = *(u64 *)(r10 -520) ; R2_w=42 R10=fp0 fp-520_w=42 6: (79) r1 = *(u64 *)(r10 -512) ; R1_w=42 R10=fp0 fp-512_w=42 7: (bf) r3 = r10 ; R3_w=fp0 R10=fp0 8: (0f) r3 += r2 mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx -1 mark_precise: frame0: regs=r2 stack= before 7: (bf) r3 = r10 mark_precise: frame0: regs=r2 stack= before 6: (79) r1 = *(u64 *)(r10 -512) mark_precise: frame0: regs=r2 stack= before 5: (79) r2 = *(u64 *)(r10 -520) mark_precise: frame0: regs= stack=-8 before 4: (85) call bpf_get_smp_processor_id#8 mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -520) = r2 mark_precise: frame0: regs=r2 stack= before 2: (7b) *(u64 *)(r10 -512) = r1 mark_precise: frame0: regs=r2 stack= before 1: (b7) r2 = 42 9: R2_w=42 R3_w=fp42 9: (95) exit This patch disables the additional allowance for the moment. Also, two test cases are removed: - bpf_fastcall_max_stack_ok: it fails w/o additional stack allowance; - bpf_fastcall_max_stack_fail: this test is no longer necessary, stack size follows regular rules, pattern invalidation is checked by other test cases. Reported-by: Hou Tao <[email protected]> Closes: https://lore.kernel.org/bpf/[email protected]/ Fixes: 5b5f51b ("bpf: no_caller_saved_registers attribute for helper calls") Signed-off-by: Eduard Zingerman <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Tested-by: Hou Tao <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent d7f214a commit d0b98f6

File tree

2 files changed

+2
-67
lines changed

2 files changed

+2
-67
lines changed

kernel/bpf/verifier.c

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6804,20 +6804,10 @@ static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
68046804
struct bpf_func_state *state,
68056805
enum bpf_access_type t)
68066806
{
6807-
struct bpf_insn_aux_data *aux = &env->insn_aux_data[env->insn_idx];
6808-
int min_valid_off, max_bpf_stack;
6809-
6810-
/* If accessing instruction is a spill/fill from bpf_fastcall pattern,
6811-
* add room for all caller saved registers below MAX_BPF_STACK.
6812-
* In case if bpf_fastcall rewrite won't happen maximal stack depth
6813-
* would be checked by check_max_stack_depth_subprog().
6814-
*/
6815-
max_bpf_stack = MAX_BPF_STACK;
6816-
if (aux->fastcall_pattern)
6817-
max_bpf_stack += CALLER_SAVED_REGS * BPF_REG_SIZE;
6807+
int min_valid_off;
68186808

68196809
if (t == BPF_WRITE || env->allow_uninit_stack)
6820-
min_valid_off = -max_bpf_stack;
6810+
min_valid_off = -MAX_BPF_STACK;
68216811
else
68226812
min_valid_off = -state->allocated_stack;
68236813

tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c

Lines changed: 0 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -790,61 +790,6 @@ __naked static void cumulative_stack_depth_subprog(void)
790790
:: __imm(bpf_get_smp_processor_id) : __clobber_all);
791791
}
792792

793-
SEC("raw_tp")
794-
__arch_x86_64
795-
__log_level(4)
796-
__msg("stack depth 512")
797-
__xlated("0: r1 = 42")
798-
__xlated("1: *(u64 *)(r10 -512) = r1")
799-
__xlated("2: w0 = ")
800-
__xlated("3: r0 = &(void __percpu *)(r0)")
801-
__xlated("4: r0 = *(u32 *)(r0 +0)")
802-
__xlated("5: exit")
803-
__success
804-
__naked int bpf_fastcall_max_stack_ok(void)
805-
{
806-
asm volatile(
807-
"r1 = 42;"
808-
"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
809-
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
810-
"call %[bpf_get_smp_processor_id];"
811-
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
812-
"exit;"
813-
:
814-
: __imm_const(max_bpf_stack, MAX_BPF_STACK),
815-
__imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
816-
__imm(bpf_get_smp_processor_id)
817-
: __clobber_all
818-
);
819-
}
820-
821-
SEC("raw_tp")
822-
__arch_x86_64
823-
__log_level(4)
824-
__msg("stack depth 520")
825-
__failure
826-
__naked int bpf_fastcall_max_stack_fail(void)
827-
{
828-
asm volatile(
829-
"r1 = 42;"
830-
"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
831-
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
832-
"call %[bpf_get_smp_processor_id];"
833-
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
834-
/* call to prandom blocks bpf_fastcall rewrite */
835-
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
836-
"call %[bpf_get_prandom_u32];"
837-
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
838-
"exit;"
839-
:
840-
: __imm_const(max_bpf_stack, MAX_BPF_STACK),
841-
__imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
842-
__imm(bpf_get_smp_processor_id),
843-
__imm(bpf_get_prandom_u32)
844-
: __clobber_all
845-
);
846-
}
847-
848793
SEC("cgroup/getsockname_unix")
849794
__xlated("0: r2 = 1")
850795
/* bpf_cast_to_kern_ctx is replaced by a single assignment */

0 commit comments

Comments
 (0)