Skip to content

Commit b5518c7

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf-fixes'
Daniel Borkmann says: ==================== First one is a panic I ran into while testing the second one where we got several syzkaller reports. Series here fixes both. Thanks! ==================== Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents 26bf8a8 + 9facc33 commit b5518c7

File tree

3 files changed

+106
-38
lines changed

3 files changed

+106
-38
lines changed

include/linux/filter.h

Lines changed: 41 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -469,7 +469,8 @@ struct sock_fprog_kern {
469469
};
470470

471471
struct bpf_binary_header {
472-
unsigned int pages;
472+
u16 pages;
473+
u16 locked:1;
473474
u8 image[];
474475
};
475476

@@ -671,50 +672,49 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
671672

672673
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
673674

674-
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
675675
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
676676
{
677+
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
677678
fp->locked = 1;
678-
WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
679+
if (set_memory_ro((unsigned long)fp, fp->pages))
680+
fp->locked = 0;
681+
#endif
679682
}
680683

681684
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
682685
{
686+
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
683687
if (fp->locked) {
684688
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
685689
/* In case set_memory_rw() fails, we want to be the first
686690
* to crash here instead of some random place later on.
687691
*/
688692
fp->locked = 0;
689693
}
694+
#endif
690695
}
691696

692697
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
693698
{
694-
WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
695-
}
696-
697-
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
698-
{
699-
WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
700-
}
701-
#else
702-
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
703-
{
704-
}
705-
706-
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
707-
{
708-
}
709-
710-
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
711-
{
699+
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
700+
hdr->locked = 1;
701+
if (set_memory_ro((unsigned long)hdr, hdr->pages))
702+
hdr->locked = 0;
703+
#endif
712704
}
713705

714706
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
715707
{
708+
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
709+
if (hdr->locked) {
710+
WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
711+
/* In case set_memory_rw() fails, we want to be the first
712+
* to crash here instead of some random place later on.
713+
*/
714+
hdr->locked = 0;
715+
}
716+
#endif
716717
}
717-
#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
718718

719719
static inline struct bpf_binary_header *
720720
bpf_jit_binary_hdr(const struct bpf_prog *fp)
@@ -725,6 +725,22 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp)
725725
return (void *)addr;
726726
}
727727

728+
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
729+
static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp)
730+
{
731+
if (!fp->locked)
732+
return -ENOLCK;
733+
if (fp->jited) {
734+
const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
735+
736+
if (!hdr->locked)
737+
return -ENOLCK;
738+
}
739+
740+
return 0;
741+
}
742+
#endif
743+
728744
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
729745
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
730746
{
@@ -961,6 +977,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
961977
}
962978
#endif /* CONFIG_BPF_JIT */
963979

980+
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
981+
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
982+
964983
#define BPF_ANC BIT(15)
965984

966985
static inline bool bpf_needs_clear_a(const struct sock_filter *first)

kernel/bpf/core.c

Lines changed: 62 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
350350
return prog_adj;
351351
}
352352

353+
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
354+
{
355+
int i;
356+
357+
for (i = 0; i < fp->aux->func_cnt; i++)
358+
bpf_prog_kallsyms_del(fp->aux->func[i]);
359+
}
360+
361+
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
362+
{
363+
bpf_prog_kallsyms_del_subprogs(fp);
364+
bpf_prog_kallsyms_del(fp);
365+
}
366+
353367
#ifdef CONFIG_BPF_JIT
354368
/* All BPF JIT sysctl knobs here. */
355369
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
@@ -584,6 +598,8 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
584598
bpf_fill_ill_insns(hdr, size);
585599

586600
hdr->pages = size / PAGE_SIZE;
601+
hdr->locked = 0;
602+
587603
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
588604
PAGE_SIZE - sizeof(*hdr));
589605
start = (get_random_int() % hole) & ~(alignment - 1);
@@ -1434,6 +1450,33 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
14341450
return 0;
14351451
}
14361452

1453+
static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp)
1454+
{
1455+
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
1456+
int i, err;
1457+
1458+
for (i = 0; i < fp->aux->func_cnt; i++) {
1459+
err = bpf_prog_check_pages_ro_single(fp->aux->func[i]);
1460+
if (err)
1461+
return err;
1462+
}
1463+
1464+
return bpf_prog_check_pages_ro_single(fp);
1465+
#endif
1466+
return 0;
1467+
}
1468+
1469+
static void bpf_prog_select_func(struct bpf_prog *fp)
1470+
{
1471+
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1472+
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1473+
1474+
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1475+
#else
1476+
fp->bpf_func = __bpf_prog_ret0_warn;
1477+
#endif
1478+
}
1479+
14371480
/**
14381481
* bpf_prog_select_runtime - select exec runtime for BPF program
14391482
* @fp: bpf_prog populated with internal BPF program
@@ -1444,13 +1487,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
14441487
*/
14451488
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
14461489
{
1447-
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1448-
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1490+
/* In case of BPF to BPF calls, verifier did all the prep
1491+
* work with regards to JITing, etc.
1492+
*/
1493+
if (fp->bpf_func)
1494+
goto finalize;
14491495

1450-
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1451-
#else
1452-
fp->bpf_func = __bpf_prog_ret0_warn;
1453-
#endif
1496+
bpf_prog_select_func(fp);
14541497

14551498
/* eBPF JITs can rewrite the program in case constant
14561499
* blinding is active. However, in case of error during
@@ -1471,6 +1514,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
14711514
if (*err)
14721515
return fp;
14731516
}
1517+
1518+
finalize:
14741519
bpf_prog_lock_ro(fp);
14751520

14761521
/* The tail call compatibility check can only be done at
@@ -1479,7 +1524,17 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
14791524
* all eBPF JITs might immediately support all features.
14801525
*/
14811526
*err = bpf_check_tail_call(fp);
1482-
1527+
if (*err)
1528+
return fp;
1529+
1530+
/* Checkpoint: at this point onwards any cBPF -> eBPF or
1531+
* native eBPF program is read-only. If we failed to change
1532+
* the page attributes (e.g. allocation failure from
1533+
* splitting large pages), then reject the whole program
1534+
* in order to guarantee not ending up with any W+X pages
1535+
* from BPF side in kernel.
1536+
*/
1537+
*err = bpf_prog_check_pages_ro_locked(fp);
14831538
return fp;
14841539
}
14851540
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);

kernel/bpf/syscall.c

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1034,14 +1034,9 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
10341034
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
10351035
{
10361036
if (atomic_dec_and_test(&prog->aux->refcnt)) {
1037-
int i;
1038-
10391037
/* bpf_prog_free_id() must be called first */
10401038
bpf_prog_free_id(prog, do_idr_lock);
1041-
1042-
for (i = 0; i < prog->aux->func_cnt; i++)
1043-
bpf_prog_kallsyms_del(prog->aux->func[i]);
1044-
bpf_prog_kallsyms_del(prog);
1039+
bpf_prog_kallsyms_del_all(prog);
10451040

10461041
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
10471042
}
@@ -1358,9 +1353,7 @@ static int bpf_prog_load(union bpf_attr *attr)
13581353
if (err < 0)
13591354
goto free_used_maps;
13601355

1361-
/* eBPF program is ready to be JITed */
1362-
if (!prog->bpf_func)
1363-
prog = bpf_prog_select_runtime(prog, &err);
1356+
prog = bpf_prog_select_runtime(prog, &err);
13641357
if (err < 0)
13651358
goto free_used_maps;
13661359

@@ -1384,6 +1377,7 @@ static int bpf_prog_load(union bpf_attr *attr)
13841377
return err;
13851378

13861379
free_used_maps:
1380+
bpf_prog_kallsyms_del_subprogs(prog);
13871381
free_used_maps(prog->aux);
13881382
free_prog:
13891383
bpf_prog_uncharge_memlock(prog);

0 commit comments

Comments
 (0)