Skip to content

Commit 4b69e31

Browse files
AsphalttAlexei Starovoitov
authored andcommitted
selftests/bpf: Introduce experimental bpf_in_interrupt()
Filtering pid_tgid is meanlingless when the current task is preempted by an interrupt. To address this, introduce 'bpf_in_interrupt()' helper function, which allows BPF programs to determine whether they are executing in interrupt context. 'get_preempt_count()': * On x86, '*(int *) bpf_this_cpu_ptr(&__preempt_count)'. * On arm64, 'bpf_get_current_task_btf()->thread_info.preempt.count'. Then 'bpf_in_interrupt()' will be: * If !PREEMPT_RT, 'get_preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK)'. * If PREEMPT_RT, '(get_preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | (bpf_get_current_task_btf()->softirq_disable_cnt & SOFTIRQ_MASK)'. As for other archs, it can be added support by updating 'get_preempt_count()'. Suggested-by: Alexei Starovoitov <[email protected]> Signed-off-by: Leon Hwang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 929adf8 commit 4b69e31

File tree

1 file changed

+54
-0
lines changed

1 file changed

+54
-0
lines changed

tools/testing/selftests/bpf/bpf_experimental.h

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -599,4 +599,58 @@ extern void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it) __weak __ksym;
599599
extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
600600
struct bpf_dynptr *value_p) __weak __ksym;
601601

602+
#define PREEMPT_BITS 8
603+
#define SOFTIRQ_BITS 8
604+
#define HARDIRQ_BITS 4
605+
#define NMI_BITS 4
606+
607+
#define PREEMPT_SHIFT 0
608+
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
609+
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
610+
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
611+
612+
#define __IRQ_MASK(x) ((1UL << (x))-1)
613+
614+
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
615+
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
616+
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
617+
618+
extern bool CONFIG_PREEMPT_RT __kconfig __weak;
619+
#ifdef bpf_target_x86
620+
extern const int __preempt_count __ksym;
621+
#endif
622+
623+
struct task_struct___preempt_rt {
624+
int softirq_disable_cnt;
625+
} __attribute__((preserve_access_index));
626+
627+
static inline int get_preempt_count(void)
628+
{
629+
#if defined(bpf_target_x86)
630+
return *(int *) bpf_this_cpu_ptr(&__preempt_count);
631+
#elif defined(bpf_target_arm64)
632+
return bpf_get_current_task_btf()->thread_info.preempt.count;
633+
#endif
634+
return 0;
635+
}
636+
637+
/* Description
638+
* Report whether it is in interrupt context. Only works on the following archs:
639+
* * x86
640+
* * arm64
641+
*/
642+
static inline int bpf_in_interrupt(void)
643+
{
644+
struct task_struct___preempt_rt *tsk;
645+
int pcnt;
646+
647+
pcnt = get_preempt_count();
648+
if (!CONFIG_PREEMPT_RT)
649+
return pcnt & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK);
650+
651+
tsk = (void *) bpf_get_current_task_btf();
652+
return (pcnt & (NMI_MASK | HARDIRQ_MASK)) |
653+
(tsk->softirq_disable_cnt & SOFTIRQ_MASK);
654+
}
655+
602656
#endif

0 commit comments

Comments
 (0)