Skip to content

Commit 918ba26

Browse files
etsalAlexei Starovoitov
authored andcommitted
selftests: bpf: add bpf_cpumask_populate selftests
Add selftests for the bpf_cpumask_populate helper that sets a bpf_cpumask to a bit pattern provided by a BPF program. Signed-off-by: Emil Tsalapatis (Meta) <[email protected]> Acked-by: Hou Tao <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 950ad93 commit 918ba26

File tree

4 files changed

+161
-0
lines changed

4 files changed

+161
-0
lines changed

tools/testing/selftests/bpf/prog_tests/cpumask.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ static const char * const cpumask_success_testcases[] = {
2525
"test_global_mask_nested_deep_rcu",
2626
"test_global_mask_nested_deep_array_rcu",
2727
"test_cpumask_weight",
28+
"test_populate_reject_small_mask",
29+
"test_populate_reject_unaligned",
30+
"test_populate",
2831
};
2932

3033
static void verify_success(const char *prog_name)

tools/testing/selftests/bpf/progs/cpumask_common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym __weak;
6161
u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
6262
const struct cpumask *src2) __ksym __weak;
6363
u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym __weak;
64+
int bpf_cpumask_populate(struct cpumask *cpumask, void *src, size_t src__sz) __ksym __weak;
6465

6566
void bpf_rcu_read_lock(void) __ksym __weak;
6667
void bpf_rcu_read_unlock(void) __ksym __weak;

tools/testing/selftests/bpf/progs/cpumask_failure.c

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -222,3 +222,41 @@ int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flag
222222

223223
return 0;
224224
}
225+
226+
SEC("tp_btf/task_newtask")
227+
__failure __msg("type=scalar expected=fp")
228+
int BPF_PROG(test_populate_invalid_destination, struct task_struct *task, u64 clone_flags)
229+
{
230+
struct bpf_cpumask *invalid = (struct bpf_cpumask *)0x123456;
231+
u64 bits;
232+
int ret;
233+
234+
ret = bpf_cpumask_populate((struct cpumask *)invalid, &bits, sizeof(bits));
235+
if (!ret)
236+
err = 2;
237+
238+
return 0;
239+
}
240+
241+
SEC("tp_btf/task_newtask")
242+
__failure __msg("leads to invalid memory access")
243+
int BPF_PROG(test_populate_invalid_source, struct task_struct *task, u64 clone_flags)
244+
{
245+
void *garbage = (void *)0x123456;
246+
struct bpf_cpumask *local;
247+
int ret;
248+
249+
local = create_cpumask();
250+
if (!local) {
251+
err = 1;
252+
return 0;
253+
}
254+
255+
ret = bpf_cpumask_populate((struct cpumask *)local, garbage, 8);
256+
if (!ret)
257+
err = 2;
258+
259+
bpf_cpumask_release(local);
260+
261+
return 0;
262+
}

tools/testing/selftests/bpf/progs/cpumask_success.c

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -770,3 +770,122 @@ int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_fl
770770
bpf_cpumask_release(mask2);
771771
return 0;
772772
}
773+
774+
SEC("tp_btf/task_newtask")
775+
int BPF_PROG(test_populate_reject_small_mask, struct task_struct *task, u64 clone_flags)
776+
{
777+
struct bpf_cpumask *local;
778+
u8 toofewbits;
779+
int ret;
780+
781+
if (!is_test_task())
782+
return 0;
783+
784+
local = create_cpumask();
785+
if (!local)
786+
return 0;
787+
788+
/* The kfunc should prevent this operation */
789+
ret = bpf_cpumask_populate((struct cpumask *)local, &toofewbits, sizeof(toofewbits));
790+
if (ret != -EACCES)
791+
err = 2;
792+
793+
bpf_cpumask_release(local);
794+
795+
return 0;
796+
}
797+
798+
/* Mask is guaranteed to be large enough for bpf_cpumask_t. */
799+
#define CPUMASK_TEST_MASKLEN (sizeof(cpumask_t))
800+
801+
/* Add an extra word for the test_populate_reject_unaligned test. */
802+
u64 bits[CPUMASK_TEST_MASKLEN / 8 + 1];
803+
extern bool CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS __kconfig __weak;
804+
805+
SEC("tp_btf/task_newtask")
806+
int BPF_PROG(test_populate_reject_unaligned, struct task_struct *task, u64 clone_flags)
807+
{
808+
struct bpf_cpumask *mask;
809+
char *src;
810+
int ret;
811+
812+
if (!is_test_task())
813+
return 0;
814+
815+
/* Skip if unaligned accesses are fine for this arch. */
816+
if (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
817+
return 0;
818+
819+
mask = bpf_cpumask_create();
820+
if (!mask) {
821+
err = 1;
822+
return 0;
823+
}
824+
825+
/* Misalign the source array by a byte. */
826+
src = &((char *)bits)[1];
827+
828+
ret = bpf_cpumask_populate((struct cpumask *)mask, src, CPUMASK_TEST_MASKLEN);
829+
if (ret != -EINVAL)
830+
err = 2;
831+
832+
bpf_cpumask_release(mask);
833+
834+
return 0;
835+
}
836+
837+
838+
SEC("tp_btf/task_newtask")
839+
int BPF_PROG(test_populate, struct task_struct *task, u64 clone_flags)
840+
{
841+
struct bpf_cpumask *mask;
842+
bool bit;
843+
int ret;
844+
int i;
845+
846+
if (!is_test_task())
847+
return 0;
848+
849+
/* Set only odd bits. */
850+
__builtin_memset(bits, 0xaa, CPUMASK_TEST_MASKLEN);
851+
852+
mask = bpf_cpumask_create();
853+
if (!mask) {
854+
err = 1;
855+
return 0;
856+
}
857+
858+
/* Pass the entire bits array, the kfunc will only copy the valid bits. */
859+
ret = bpf_cpumask_populate((struct cpumask *)mask, bits, CPUMASK_TEST_MASKLEN);
860+
if (ret) {
861+
err = 2;
862+
goto out;
863+
}
864+
865+
/*
866+
* Test is there to appease the verifier. We cannot directly
867+
* access NR_CPUS, the upper bound for nr_cpus, so we infer
868+
* it from the size of cpumask_t.
869+
*/
870+
if (nr_cpus < 0 || nr_cpus >= CPUMASK_TEST_MASKLEN * 8) {
871+
err = 3;
872+
goto out;
873+
}
874+
875+
bpf_for(i, 0, nr_cpus) {
876+
/* Odd-numbered bits should be set, even ones unset. */
877+
bit = bpf_cpumask_test_cpu(i, (const struct cpumask *)mask);
878+
if (bit == (i % 2 != 0))
879+
continue;
880+
881+
err = 4;
882+
break;
883+
}
884+
885+
out:
886+
bpf_cpumask_release(mask);
887+
888+
return 0;
889+
}
890+
891+
#undef CPUMASK_TEST_MASKLEN

0 commit comments

Comments
 (0)