Skip to content

Commit d2757e1

Browse files
AsphalttKernel Patches Daemon
authored andcommitted
selftests/bpf: Add tests to verify freeing the special fields when update hash and local storage maps
Add tests to verify that updating hash and local storage maps decrements refcount when BPF_KPTR_REF objects are involved. The tests perform the following steps: 1. Call update_elem() to insert an initial value. 2. Use bpf_refcount_acquire() to increment the refcount. 3. Store the node pointer in the map value. 4. Add the node to a linked list. 5. Probe-read the refcount and verify it is *2*. 6. Call update_elem() again to trigger refcount decrement. 7. Probe-read the refcount and verify it is *1*. Signed-off-by: Leon Hwang <[email protected]>
1 parent 94d83ea commit d2757e1

File tree

2 files changed

+262
-1
lines changed

2 files changed

+262
-1
lines changed

tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c

Lines changed: 133 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
#include <test_progs.h>
55
#include <network_helpers.h>
6-
6+
#include "cgroup_helpers.h"
77
#include "refcounted_kptr.skel.h"
88
#include "refcounted_kptr_fail.skel.h"
99

@@ -44,3 +44,135 @@ void test_refcounted_kptr_wrong_owner(void)
4444
ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
4545
refcounted_kptr__destroy(skel);
4646
}
47+
48+
static void test_refcnt_leak(struct refcounted_kptr *skel, int key, void *values, size_t values_sz,
49+
u64 flags, struct bpf_map *map, struct bpf_program *prog_leak,
50+
struct bpf_program *prog_check, struct bpf_test_run_opts *opts)
51+
{
52+
int ret, fd;
53+
54+
ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags);
55+
if (!ASSERT_OK(ret, "bpf_map__update_elem init"))
56+
return;
57+
58+
fd = bpf_program__fd(prog_leak);
59+
ret = bpf_prog_test_run_opts(fd, opts);
60+
if (!ASSERT_OK(ret, "bpf_prog_test_run_opts"))
61+
return;
62+
if (!ASSERT_EQ(skel->bss->kptr_refcount, 2, "refcount"))
63+
return;
64+
65+
ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags);
66+
if (!ASSERT_OK(ret, "bpf_map__update_elem dec refcount"))
67+
return;
68+
69+
fd = bpf_program__fd(prog_check);
70+
ret = bpf_prog_test_run_opts(fd, opts);
71+
ASSERT_OK(ret, "bpf_prog_test_run_opts");
72+
ASSERT_EQ(skel->bss->kptr_refcount, 1, "refcount");
73+
}
74+
75+
static void test_percpu_hash_refcount_leak(void)
76+
{
77+
struct refcounted_kptr *skel;
78+
size_t values_sz;
79+
u64 *values;
80+
int cpu_nr;
81+
LIBBPF_OPTS(bpf_test_run_opts, opts,
82+
.data_in = &pkt_v4,
83+
.data_size_in = sizeof(pkt_v4),
84+
.repeat = 1,
85+
);
86+
87+
cpu_nr = libbpf_num_possible_cpus();
88+
if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus"))
89+
return;
90+
91+
values = calloc(cpu_nr, sizeof(u64));
92+
if (!ASSERT_OK_PTR(values, "calloc values"))
93+
return;
94+
95+
skel = refcounted_kptr__open_and_load();
96+
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) {
97+
free(values);
98+
return;
99+
}
100+
101+
values_sz = cpu_nr * sizeof(u64);
102+
memset(values, 0, values_sz);
103+
104+
test_refcnt_leak(skel, 0, values, values_sz, 0, skel->maps.pcpu_hash,
105+
skel->progs.pcpu_hash_refcount_leak,
106+
skel->progs.check_pcpu_hash_refcount, &opts);
107+
108+
refcounted_kptr__destroy(skel);
109+
free(values);
110+
}
111+
112+
struct lock_map_value {
113+
u64 kptr;
114+
struct bpf_spin_lock lock;
115+
int value;
116+
};
117+
118+
static void test_hash_lock_refcount_leak(void)
119+
{
120+
struct lock_map_value value = {};
121+
struct refcounted_kptr *skel;
122+
LIBBPF_OPTS(bpf_test_run_opts, opts,
123+
.data_in = &pkt_v4,
124+
.data_size_in = sizeof(pkt_v4),
125+
.repeat = 1,
126+
);
127+
128+
skel = refcounted_kptr__open_and_load();
129+
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
130+
return;
131+
132+
test_refcnt_leak(skel, 0, &value, sizeof(value), BPF_F_LOCK, skel->maps.lock_hash,
133+
skel->progs.hash_lock_refcount_leak,
134+
skel->progs.check_hash_lock_refcount, &opts);
135+
136+
refcounted_kptr__destroy(skel);
137+
}
138+
139+
static void test_cgroup_storage_lock_refcount_leak(void)
140+
{
141+
struct lock_map_value value = {};
142+
struct refcounted_kptr *skel;
143+
int cgroup, err;
144+
LIBBPF_OPTS(bpf_test_run_opts, opts);
145+
146+
err = setup_cgroup_environment();
147+
if (!ASSERT_OK(err, "setup_cgroup_environment"))
148+
return;
149+
150+
cgroup = get_root_cgroup();
151+
if (!ASSERT_GE(cgroup, 0, "get_root_cgroup")) {
152+
cleanup_cgroup_environment();
153+
return;
154+
}
155+
156+
skel = refcounted_kptr__open_and_load();
157+
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
158+
goto out;
159+
160+
test_refcnt_leak(skel, cgroup, &value, sizeof(value), BPF_F_LOCK, skel->maps.cgrp_strg,
161+
skel->progs.cgroup_storage_lock_refcount_leak,
162+
skel->progs.check_cgroup_storage_lock_refcount, &opts);
163+
164+
refcounted_kptr__destroy(skel);
165+
out:
166+
close(cgroup);
167+
cleanup_cgroup_environment();
168+
}
169+
170+
void test_kptr_refcount_leak(void)
171+
{
172+
if (test__start_subtest("percpu_hash_refcount_leak"))
173+
test_percpu_hash_refcount_leak();
174+
if (test__start_subtest("hash_lock_refcount_leak"))
175+
test_hash_lock_refcount_leak();
176+
if (test__start_subtest("cgroup_storage_lock_refcount_leak"))
177+
test_cgroup_storage_lock_refcount_leak();
178+
}

tools/testing/selftests/bpf/progs/refcounted_kptr.c

Lines changed: 129 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -568,4 +568,133 @@ int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock,
568568
return 0;
569569
}
570570

571+
private(kptr_ref) u64 ref;
572+
u32 kptr_refcount;
573+
574+
static int probe_read_refcount(void)
575+
{
576+
bpf_probe_read_kernel(&kptr_refcount, sizeof(kptr_refcount), (void *) ref);
577+
return 0;
578+
}
579+
580+
static int __insert_in_list(struct bpf_list_head *head, struct bpf_spin_lock *lock,
581+
struct node_data __kptr **node)
582+
{
583+
struct node_data *n, *m;
584+
585+
n = bpf_obj_new(typeof(*n));
586+
if (!n)
587+
return 0;
588+
589+
m = bpf_refcount_acquire(n);
590+
n = bpf_kptr_xchg(node, n);
591+
if (n) {
592+
bpf_obj_drop(n);
593+
bpf_obj_drop(m);
594+
return 0;
595+
}
596+
597+
bpf_spin_lock(lock);
598+
bpf_list_push_front(head, &m->l);
599+
ref = (u64)(void *) &m->ref;
600+
bpf_spin_unlock(lock);
601+
return probe_read_refcount();
602+
}
603+
604+
static void *__lookup_map(void *map)
605+
{
606+
int key = 0;
607+
608+
return bpf_map_lookup_elem(map, &key);
609+
}
610+
611+
struct {
612+
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
613+
__type(key, int);
614+
__type(value, struct map_value);
615+
__uint(max_entries, 1);
616+
} pcpu_hash SEC(".maps");
617+
618+
SEC("tc")
619+
int pcpu_hash_refcount_leak(void *ctx)
620+
{
621+
struct map_value *v;
622+
623+
v = __lookup_map(&pcpu_hash);
624+
if (!v)
625+
return 0;
626+
627+
return __insert_in_list(&head, &lock, &v->node);
628+
}
629+
630+
SEC("tc")
631+
int check_pcpu_hash_refcount(void *ctx)
632+
{
633+
return probe_read_refcount();
634+
}
635+
636+
struct lock_map_value {
637+
struct node_data __kptr *node;
638+
struct bpf_spin_lock lock;
639+
int value;
640+
};
641+
642+
struct {
643+
__uint(type, BPF_MAP_TYPE_HASH);
644+
__type(key, int);
645+
__type(value, struct lock_map_value);
646+
__uint(max_entries, 1);
647+
} lock_hash SEC(".maps");
648+
649+
SEC("tc")
650+
int hash_lock_refcount_leak(void *ctx)
651+
{
652+
struct lock_map_value *v;
653+
654+
v = __lookup_map(&lock_hash);
655+
if (!v)
656+
return 0;
657+
658+
bpf_spin_lock(&v->lock);
659+
v->value = 42;
660+
bpf_spin_unlock(&v->lock);
661+
return __insert_in_list(&head, &lock, &v->node);
662+
}
663+
664+
SEC("tc")
665+
int check_hash_lock_refcount(void *ctx)
666+
{
667+
return probe_read_refcount();
668+
}
669+
670+
struct {
671+
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
672+
__uint(map_flags, BPF_F_NO_PREALLOC);
673+
__type(key, int);
674+
__type(value, struct lock_map_value);
675+
} cgrp_strg SEC(".maps");
676+
677+
SEC("syscall")
678+
int BPF_PROG(cgroup_storage_lock_refcount_leak)
679+
{
680+
struct lock_map_value *v;
681+
struct task_struct *task;
682+
683+
task = bpf_get_current_task_btf();
684+
bpf_rcu_read_lock();
685+
v = bpf_cgrp_storage_get(&cgrp_strg, task->cgroups->dfl_cgrp, 0,
686+
BPF_LOCAL_STORAGE_GET_F_CREATE);
687+
bpf_rcu_read_unlock();
688+
if (!v)
689+
return 0;
690+
691+
return __insert_in_list(&head, &lock, &v->node);
692+
}
693+
694+
SEC("syscall")
695+
int BPF_PROG(check_cgroup_storage_lock_refcount)
696+
{
697+
return probe_read_refcount();
698+
}
699+
571700
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)