Skip to content

Commit 8fc3d2d

Browse files
etsalAlexei Starovoitov
authored andcommitted
bpf/arena: add bpf_arena_reserve_pages kfunc
Add a new BPF arena kfunc for reserving a range of arena virtual addresses without backing them with pages. This prevents the range from being populated using bpf_arena_alloc_pages(). Acked-by: Yonghong Song <[email protected]> Signed-off-by: Emil Tsalapatis <[email protected]> Acked-by: Kumar Kartikeya Dwivedi <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent ad97cb2 commit 8fc3d2d

File tree

1 file changed

+43
-0
lines changed

1 file changed

+43
-0
lines changed

kernel/bpf/arena.c

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -550,6 +550,34 @@ static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
550550
}
551551
}
552552

553+
/*
554+
* Reserve an arena virtual address range without populating it. This call stops
555+
* bpf_arena_alloc_pages from adding pages to this range.
556+
*/
557+
static int arena_reserve_pages(struct bpf_arena *arena, long uaddr, u32 page_cnt)
558+
{
559+
long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT;
560+
long pgoff;
561+
int ret;
562+
563+
if (uaddr & ~PAGE_MASK)
564+
return 0;
565+
566+
pgoff = compute_pgoff(arena, uaddr);
567+
if (pgoff + page_cnt > page_cnt_max)
568+
return -EINVAL;
569+
570+
guard(mutex)(&arena->lock);
571+
572+
/* Cannot guard already allocated pages. */
573+
ret = is_range_tree_set(&arena->rt, pgoff, page_cnt);
574+
if (ret)
575+
return -EBUSY;
576+
577+
/* "Allocate" the region to prevent it from being allocated. */
578+
return range_tree_clear(&arena->rt, pgoff, page_cnt);
579+
}
580+
553581
__bpf_kfunc_start_defs();
554582

555583
__bpf_kfunc void *bpf_arena_alloc_pages(void *p__map, void *addr__ign, u32 page_cnt,
@@ -573,11 +601,26 @@ __bpf_kfunc void bpf_arena_free_pages(void *p__map, void *ptr__ign, u32 page_cnt
573601
return;
574602
arena_free_pages(arena, (long)ptr__ign, page_cnt);
575603
}
604+
605+
__bpf_kfunc int bpf_arena_reserve_pages(void *p__map, void *ptr__ign, u32 page_cnt)
606+
{
607+
struct bpf_map *map = p__map;
608+
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
609+
610+
if (map->map_type != BPF_MAP_TYPE_ARENA)
611+
return -EINVAL;
612+
613+
if (!page_cnt)
614+
return 0;
615+
616+
return arena_reserve_pages(arena, (long)ptr__ign, page_cnt);
617+
}
576618
__bpf_kfunc_end_defs();
577619

578620
BTF_KFUNCS_START(arena_kfuncs)
579621
BTF_ID_FLAGS(func, bpf_arena_alloc_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE | KF_ARENA_RET | KF_ARENA_ARG2)
580622
BTF_ID_FLAGS(func, bpf_arena_free_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE | KF_ARENA_ARG2)
623+
BTF_ID_FLAGS(func, bpf_arena_reserve_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE | KF_ARENA_ARG2)
581624
BTF_KFUNCS_END(arena_kfuncs)
582625

583626
static const struct btf_kfunc_id_set common_kfunc_set = {

0 commit comments

Comments
 (0)