|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only |
| 2 | + * Copyright (c) 2024 Benjamin Tissoires |
| 3 | + */ |
| 4 | + |
| 5 | +#ifndef __HID_BPF_ASYNC_H__ |
| 6 | +#define __HID_BPF_ASYNC_H__ |
| 7 | + |
| 8 | +#ifndef HID_BPF_ASYNC_MAX_CTX |
| 9 | +#error "HID_BPF_ASYNC_MAX_CTX should be set to the maximum number of concurrent async functions" |
| 10 | +#endif /* HID_BPF_ASYNC_MAX_CTX */ |
| 11 | + |
| 12 | +#define CLOCK_MONOTONIC 1 |
| 13 | + |
| 14 | +typedef int (*hid_bpf_async_callback_t)(void *map, int *key, void *value); |
| 15 | + |
| 16 | +enum hid_bpf_async_state { |
| 17 | + HID_BPF_ASYNC_STATE_UNSET = 0, |
| 18 | + HID_BPF_ASYNC_STATE_INITIALIZING, |
| 19 | + HID_BPF_ASYNC_STATE_INITIALIZED, |
| 20 | + HID_BPF_ASYNC_STATE_STARTING, |
| 21 | + HID_BPF_ASYNC_STATE_RUNNING, |
| 22 | +}; |
| 23 | + |
| 24 | +struct hid_bpf_async_map_elem { |
| 25 | + struct bpf_spin_lock lock; |
| 26 | + enum hid_bpf_async_state state; |
| 27 | + struct bpf_timer t; |
| 28 | + struct bpf_wq wq; |
| 29 | + u32 hid; |
| 30 | +}; |
| 31 | + |
| 32 | +struct { |
| 33 | + __uint(type, BPF_MAP_TYPE_ARRAY); |
| 34 | + __uint(max_entries, HID_BPF_ASYNC_MAX_CTX); |
| 35 | + __type(key, u32); |
| 36 | + __type(value, struct hid_bpf_async_map_elem); |
| 37 | +} hid_bpf_async_ctx_map SEC(".maps"); |
| 38 | + |
| 39 | +/** |
| 40 | + * HID_BPF_ASYNC_CB: macro to define an async callback used in a bpf_wq |
| 41 | + * |
| 42 | + * The caller is responsible for allocating a key in the async map |
| 43 | + * with hid_bpf_async_get_ctx(). |
| 44 | + */ |
| 45 | +#define HID_BPF_ASYNC_CB(cb) \ |
| 46 | +cb(void *map, int *key, void *value); \ |
| 47 | +static __always_inline int \ |
| 48 | +____##cb(struct hid_bpf_ctx *ctx); \ |
| 49 | +typeof(cb(0, 0, 0)) cb(void *map, int *key, void *value) \ |
| 50 | +{ \ |
| 51 | + struct hid_bpf_async_map_elem *e; \ |
| 52 | + struct hid_bpf_ctx *ctx; \ |
| 53 | + \ |
| 54 | + e = (struct hid_bpf_async_map_elem *)value; \ |
| 55 | + ctx = hid_bpf_allocate_context(e->hid); \ |
| 56 | + if (!ctx) \ |
| 57 | + return 0; /* EPERM check */ \ |
| 58 | + \ |
| 59 | + e->state = HID_BPF_ASYNC_STATE_RUNNING; \ |
| 60 | + \ |
| 61 | + ____##cb(ctx); \ |
| 62 | + \ |
| 63 | + e->state = HID_BPF_ASYNC_STATE_INITIALIZED; \ |
| 64 | + hid_bpf_release_context(ctx); \ |
| 65 | + return 0; \ |
| 66 | +} \ |
| 67 | +static __always_inline int \ |
| 68 | +____##cb |
| 69 | + |
| 70 | +/** |
| 71 | + * ASYNC: macro to automatically handle async callbacks contexts |
| 72 | + * |
| 73 | + * Needs to be used in conjunction with HID_BPF_ASYNC_INIT and HID_BPF_ASYNC_DELAYED_CALL |
| 74 | + */ |
| 75 | +#define HID_BPF_ASYNC_FUN(fun) \ |
| 76 | +fun(struct hid_bpf_ctx *ctx); \ |
| 77 | +int ____key__##fun; \ |
| 78 | +static int ____async_init_##fun(void) \ |
| 79 | +{ \ |
| 80 | + ____key__##fun = hid_bpf_async_get_ctx(); \ |
| 81 | + if (____key__##fun < 0) \ |
| 82 | + return ____key__##fun; \ |
| 83 | + return 0; \ |
| 84 | +} \ |
| 85 | +static int HID_BPF_ASYNC_CB(____##fun##_cb)(struct hid_bpf_ctx *hctx) \ |
| 86 | +{ \ |
| 87 | + return fun(hctx); \ |
| 88 | +} \ |
| 89 | +typeof(fun(0)) fun |
| 90 | + |
| 91 | +#define HID_BPF_ASYNC_INIT(fun) ____async_init_##fun() |
| 92 | +#define HID_BPF_ASYNC_DELAYED_CALL(fun, ctx, delay) \ |
| 93 | + hid_bpf_async_delayed_call(ctx, delay, ____key__##fun, ____##fun##_cb) |
| 94 | + |
| 95 | +/* |
| 96 | + * internal cb for starting the delayed work callback in a workqueue. |
| 97 | + */ |
| 98 | +static int __start_wq_timer_cb(void *map, int *key, void *value) |
| 99 | +{ |
| 100 | + struct hid_bpf_async_map_elem *e = (struct hid_bpf_async_map_elem *)value; |
| 101 | + |
| 102 | + bpf_wq_start(&e->wq, 0); |
| 103 | + |
| 104 | + return 0; |
| 105 | +} |
| 106 | + |
| 107 | +static int hid_bpf_async_find_empty_key(void) |
| 108 | +{ |
| 109 | + int i; |
| 110 | + |
| 111 | + bpf_for(i, 0, HID_BPF_ASYNC_MAX_CTX) { |
| 112 | + struct hid_bpf_async_map_elem *elem; |
| 113 | + int key = i; |
| 114 | + |
| 115 | + elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key); |
| 116 | + if (!elem) |
| 117 | + return -ENOMEM; /* should never happen */ |
| 118 | + |
| 119 | + bpf_spin_lock(&elem->lock); |
| 120 | + |
| 121 | + if (elem->state == HID_BPF_ASYNC_STATE_UNSET) { |
| 122 | + elem->state = HID_BPF_ASYNC_STATE_INITIALIZING; |
| 123 | + bpf_spin_unlock(&elem->lock); |
| 124 | + return i; |
| 125 | + } |
| 126 | + |
| 127 | + bpf_spin_unlock(&elem->lock); |
| 128 | + } |
| 129 | + |
| 130 | + return -EINVAL; |
| 131 | +} |
| 132 | + |
| 133 | +static int hid_bpf_async_get_ctx(void) |
| 134 | +{ |
| 135 | + int key = hid_bpf_async_find_empty_key(); |
| 136 | + struct hid_bpf_async_map_elem *elem; |
| 137 | + int err; |
| 138 | + |
| 139 | + if (key < 0) |
| 140 | + return key; |
| 141 | + |
| 142 | + elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key); |
| 143 | + if (!elem) |
| 144 | + return -EINVAL; |
| 145 | + |
| 146 | + err = bpf_timer_init(&elem->t, &hid_bpf_async_ctx_map, CLOCK_MONOTONIC); |
| 147 | + if (err) |
| 148 | + return err; |
| 149 | + |
| 150 | + err = bpf_timer_set_callback(&elem->t, __start_wq_timer_cb); |
| 151 | + if (err) |
| 152 | + return err; |
| 153 | + |
| 154 | + err = bpf_wq_init(&elem->wq, &hid_bpf_async_ctx_map, 0); |
| 155 | + if (err) |
| 156 | + return err; |
| 157 | + |
| 158 | + elem->state = HID_BPF_ASYNC_STATE_INITIALIZED; |
| 159 | + |
| 160 | + return key; |
| 161 | +} |
| 162 | + |
| 163 | +static inline u64 ms_to_ns(u64 milliseconds) |
| 164 | +{ |
| 165 | + return (u64)milliseconds * 1000UL * 1000UL; |
| 166 | +} |
| 167 | + |
| 168 | +static int hid_bpf_async_delayed_call(struct hid_bpf_ctx *hctx, u64 milliseconds, int key, |
| 169 | + hid_bpf_async_callback_t wq_cb) |
| 170 | +{ |
| 171 | + struct hid_bpf_async_map_elem *elem; |
| 172 | + int err; |
| 173 | + |
| 174 | + elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key); |
| 175 | + if (!elem) |
| 176 | + return -EINVAL; |
| 177 | + |
| 178 | + bpf_spin_lock(&elem->lock); |
| 179 | + /* The wq must be: |
| 180 | + * - HID_BPF_ASYNC_STATE_INITIALIZED -> it's been initialized and ready to be called |
| 181 | + * - HID_BPF_ASYNC_STATE_RUNNING -> possible re-entry from the wq itself |
| 182 | + */ |
| 183 | + if (elem->state != HID_BPF_ASYNC_STATE_INITIALIZED && |
| 184 | + elem->state != HID_BPF_ASYNC_STATE_RUNNING) { |
| 185 | + bpf_spin_unlock(&elem->lock); |
| 186 | + return -EINVAL; |
| 187 | + } |
| 188 | + elem->state = HID_BPF_ASYNC_STATE_STARTING; |
| 189 | + bpf_spin_unlock(&elem->lock); |
| 190 | + |
| 191 | + elem->hid = hctx->hid->id; |
| 192 | + |
| 193 | + err = bpf_wq_set_callback(&elem->wq, wq_cb, 0); |
| 194 | + if (err) |
| 195 | + return err; |
| 196 | + |
| 197 | + if (milliseconds) { |
| 198 | + /* needed for every call because a cancel might unset this */ |
| 199 | + err = bpf_timer_set_callback(&elem->t, __start_wq_timer_cb); |
| 200 | + if (err) |
| 201 | + return err; |
| 202 | + |
| 203 | + err = bpf_timer_start(&elem->t, ms_to_ns(milliseconds), 0); |
| 204 | + if (err) |
| 205 | + return err; |
| 206 | + |
| 207 | + return 0; |
| 208 | + } |
| 209 | + |
| 210 | + return bpf_wq_start(&elem->wq, 0); |
| 211 | +} |
| 212 | + |
| 213 | +static inline int hid_bpf_async_call(struct hid_bpf_ctx *ctx, int key, |
| 214 | + hid_bpf_async_callback_t wq_cb) |
| 215 | +{ |
| 216 | + return hid_bpf_async_delayed_call(ctx, 0, key, wq_cb); |
| 217 | +} |
| 218 | + |
| 219 | +#endif /* __HID_BPF_ASYNC_H__ */ |
0 commit comments