Skip to content

Commit 4c97c4b

Browse files
mykyta5Alexei Starovoitov
authored andcommitted
bpf: Extract internal structs validation logic into helpers
The arraymap and hashtab duplicate the logic that checks for and frees internal structs (timer, workqueue, task_work) based on BTF record flags. Centralize this by introducing two helpers: * bpf_map_has_internal_structs(map) Returns true if the map value contains any of internal structs: BPF_TIMER | BPF_WORKQUEUE | BPF_TASK_WORK. * bpf_map_free_internal_structs(map, obj) Frees the internal structs for a single value object. Convert arraymap and both the prealloc/malloc hashtab paths to use the new generic functions. This keeps the functionality for when/how to free these special fields in one place and makes it easier to add support for new internal structs in the future without touching every map implementation. Signed-off-by: Mykyta Yatsenko <[email protected]> Acked-by: Eduard Zingerman <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent bca2b74 commit 4c97c4b

File tree

4 files changed

+36
-36
lines changed

4 files changed

+36
-36
lines changed

include/linux/bpf.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -663,6 +663,13 @@ int map_check_no_btf(const struct bpf_map *map,
663663
bool bpf_map_meta_equal(const struct bpf_map *meta0,
664664
const struct bpf_map *meta1);
665665

666+
static inline bool bpf_map_has_internal_structs(struct bpf_map *map)
667+
{
668+
return btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE | BPF_TASK_WORK);
669+
}
670+
671+
void bpf_map_free_internal_structs(struct bpf_map *map, void *obj);
672+
666673
extern const struct bpf_map_ops bpf_map_offload_ops;
667674

668675
/* bpf_type_flag contains a set of flags that are applicable to the values of

kernel/bpf/arraymap.c

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -448,19 +448,12 @@ static void array_map_free_internal_structs(struct bpf_map *map)
448448
struct bpf_array *array = container_of(map, struct bpf_array, map);
449449
int i;
450450

451-
/* We don't reset or free fields other than timer and workqueue
452-
* on uref dropping to zero.
453-
*/
454-
if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE | BPF_TASK_WORK)) {
455-
for (i = 0; i < array->map.max_entries; i++) {
456-
if (btf_record_has_field(map->record, BPF_TIMER))
457-
bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
458-
if (btf_record_has_field(map->record, BPF_WORKQUEUE))
459-
bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
460-
if (btf_record_has_field(map->record, BPF_TASK_WORK))
461-
bpf_obj_free_task_work(map->record, array_map_elem_ptr(array, i));
462-
}
463-
}
451+
/* We only free internal structs on uref dropping to zero */
452+
if (!bpf_map_has_internal_structs(map))
453+
return;
454+
455+
for (i = 0; i < array->map.max_entries; i++)
456+
bpf_map_free_internal_structs(map, array_map_elem_ptr(array, i));
464457
}
465458

466459
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */

kernel/bpf/hashtab.c

Lines changed: 13 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -215,19 +215,6 @@ static bool htab_has_extra_elems(struct bpf_htab *htab)
215215
return !htab_is_percpu(htab) && !htab_is_lru(htab) && !is_fd_htab(htab);
216216
}
217217

218-
static void htab_free_internal_structs(struct bpf_htab *htab, struct htab_elem *elem)
219-
{
220-
if (btf_record_has_field(htab->map.record, BPF_TIMER))
221-
bpf_obj_free_timer(htab->map.record,
222-
htab_elem_value(elem, htab->map.key_size));
223-
if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
224-
bpf_obj_free_workqueue(htab->map.record,
225-
htab_elem_value(elem, htab->map.key_size));
226-
if (btf_record_has_field(htab->map.record, BPF_TASK_WORK))
227-
bpf_obj_free_task_work(htab->map.record,
228-
htab_elem_value(elem, htab->map.key_size));
229-
}
230-
231218
static void htab_free_prealloced_internal_structs(struct bpf_htab *htab)
232219
{
233220
u32 num_entries = htab->map.max_entries;
@@ -240,7 +227,8 @@ static void htab_free_prealloced_internal_structs(struct bpf_htab *htab)
240227
struct htab_elem *elem;
241228

242229
elem = get_htab_elem(htab, i);
243-
htab_free_internal_structs(htab, elem);
230+
bpf_map_free_internal_structs(&htab->map,
231+
htab_elem_value(elem, htab->map.key_size));
244232
cond_resched();
245233
}
246234
}
@@ -1509,8 +1497,9 @@ static void htab_free_malloced_internal_structs(struct bpf_htab *htab)
15091497
struct htab_elem *l;
15101498

15111499
hlist_nulls_for_each_entry(l, n, head, hash_node) {
1512-
/* We only free timer on uref dropping to zero */
1513-
htab_free_internal_structs(htab, l);
1500+
/* We only free internal structs on uref dropping to zero */
1501+
bpf_map_free_internal_structs(&htab->map,
1502+
htab_elem_value(l, htab->map.key_size));
15141503
}
15151504
cond_resched_rcu();
15161505
}
@@ -1521,13 +1510,14 @@ static void htab_map_free_internal_structs(struct bpf_map *map)
15211510
{
15221511
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
15231512

1524-
/* We only free timer and workqueue on uref dropping to zero */
1525-
if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE | BPF_TASK_WORK)) {
1526-
if (!htab_is_prealloc(htab))
1527-
htab_free_malloced_internal_structs(htab);
1528-
else
1529-
htab_free_prealloced_internal_structs(htab);
1530-
}
1513+
/* We only free internal structs on uref dropping to zero */
1514+
if (!bpf_map_has_internal_structs(map))
1515+
return;
1516+
1517+
if (htab_is_prealloc(htab))
1518+
htab_free_prealloced_internal_structs(htab);
1519+
else
1520+
htab_free_malloced_internal_structs(htab);
15311521
}
15321522

15331523
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */

kernel/bpf/helpers.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4487,3 +4487,13 @@ void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
44874487
return NULL;
44884488
return (void *)__bpf_dynptr_data(ptr, len);
44894489
}
4490+
4491+
void bpf_map_free_internal_structs(struct bpf_map *map, void *val)
4492+
{
4493+
if (btf_record_has_field(map->record, BPF_TIMER))
4494+
bpf_obj_free_timer(map->record, val);
4495+
if (btf_record_has_field(map->record, BPF_WORKQUEUE))
4496+
bpf_obj_free_workqueue(map->record, val);
4497+
if (btf_record_has_field(map->record, BPF_TASK_WORK))
4498+
bpf_obj_free_task_work(map->record, val);
4499+
}

0 commit comments

Comments
 (0)