@@ -420,6 +420,38 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
420
420
return cpumask_weight (cpumask );
421
421
}
422
422
423
+ /**
424
+ * bpf_cpumask_populate() - Populate the CPU mask from the contents of
425
+ * a BPF memory region.
426
+ *
427
+ * @cpumask: The cpumask being populated.
428
+ * @src: The BPF memory holding the bit pattern.
429
+ * @src__sz: Length of the BPF memory region in bytes.
430
+ *
431
+ * Return:
432
+ * * 0 if the struct cpumask * instance was populated successfully.
433
+ * * -EACCES if the memory region is too small to populate the cpumask.
434
+ * * -EINVAL if the memory region is not aligned to the size of a long
435
+ * and the architecture does not support efficient unaligned accesses.
436
+ */
437
+ __bpf_kfunc int bpf_cpumask_populate (struct cpumask * cpumask , void * src , size_t src__sz )
438
+ {
439
+ unsigned long source = (unsigned long )src ;
440
+
441
+ /* The memory region must be large enough to populate the entire CPU mask. */
442
+ if (src__sz < bitmap_size (nr_cpu_ids ))
443
+ return - EACCES ;
444
+
445
+ /* If avoiding unaligned accesses, the input region must be aligned to the nearest long. */
446
+ if (!IS_ENABLED (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ) &&
447
+ !IS_ALIGNED (source , sizeof (long )))
448
+ return - EINVAL ;
449
+
450
+ bitmap_copy (cpumask_bits (cpumask ), src , nr_cpu_ids );
451
+
452
+ return 0 ;
453
+ }
454
+
423
455
__bpf_kfunc_end_defs ();
424
456
425
457
BTF_KFUNCS_START (cpumask_kfunc_btf_ids )
@@ -448,6 +480,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
448
480
BTF_ID_FLAGS (func , bpf_cpumask_any_distribute , KF_RCU )
449
481
BTF_ID_FLAGS (func , bpf_cpumask_any_and_distribute , KF_RCU )
450
482
BTF_ID_FLAGS (func , bpf_cpumask_weight , KF_RCU )
483
+ BTF_ID_FLAGS (func , bpf_cpumask_populate , KF_RCU )
451
484
BTF_KFUNCS_END (cpumask_kfunc_btf_ids )
452
485
453
486
static const struct btf_kfunc_id_set cpumask_kfunc_set = {
0 commit comments