From c02e5e87b103db1a57ff146f6991f442ee877515 Mon Sep 17 00:00:00 2001 From: pengdonglin Date: Tue, 4 Nov 2025 21:40:27 +0800 Subject: [PATCH 1/7] libbpf: Extract BTF type remapping logic into helper function Refactor btf_dedup_remap_types() by extracting its core logic into a new btf_remap_types() helper function. This eliminates code duplication and improves modularity while maintaining the same functionality. The new function encapsulates iteration over BTF types and BTF ext sections, accepting a callback for flexible type ID remapping. This makes the type remapping logic more maintainable and reusable. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Signed-off-by: pengdonglin Signed-off-by: Donglin Peng --- tools/lib/bpf/btf.c | 63 +++++++++++++++++---------------- tools/lib/bpf/libbpf_internal.h | 1 + 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 18907f0fcf9f0..5e1c09b5dce81 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -3400,6 +3400,37 @@ int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian) return 0; } +static int btf_remap_types(struct btf *btf, struct btf_ext *btf_ext, + btf_remap_type_fn visit, void *ctx) +{ + int i, r; + + for (i = 0; i < btf->nr_types; i++) { + struct btf_type *t = btf_type_by_id(btf, btf->start_id + i); + struct btf_field_iter it; + __u32 *type_id; + + r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); + if (r) + return r; + + while ((type_id = btf_field_iter_next(&it))) { + r = visit(type_id, ctx); + if (r) + return r; + } + } + + if (!btf_ext) + return 0; + + r = btf_ext_visit_type_ids(btf_ext, visit, ctx); + if (r) + return r; + + return 0; +} + struct btf_dedup; static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts); @@ -5320,37 +5351,7 @@ static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx) */ static int btf_dedup_remap_types(struct btf_dedup *d) { - int i, r; - - for (i = 0; i < d->btf->nr_types; i++) { - struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); - struct btf_field_iter it; - __u32 *type_id; - - r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); - if (r) - return r; - - while ((type_id = btf_field_iter_next(&it))) { - __u32 resolved_id, new_id; - - resolved_id = resolve_type_id(d, *type_id); - new_id = d->hypot_map[resolved_id]; - if (new_id > BTF_MAX_NR_TYPES) - return -EINVAL; - - *type_id = new_id; - } - } - - if (!d->btf_ext) - return 0; - - r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d); - if (r) - return r; - - return 0; + return btf_remap_types(d->btf, d->btf_ext, btf_dedup_remap_type_id, d); } /* diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 35b2527bedecb..b09d6163f5c3a 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -582,6 +582,7 @@ int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx); __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, __u32 kind); +typedef int (*btf_remap_type_fn)(__u32 *type_id, void *ctx); /* handle direct returned errors */ static inline int libbpf_err(int ret) From 9698ebf6a61190f76e8f5461b3c57e2095487b1a Mon Sep 17 00:00:00 2001 From: pengdonglin Date: Tue, 4 Nov 2025 21:40:28 +0800 Subject: [PATCH 2/7] libbpf: Add BTF permutation support for type reordering Introduce btf__permute() API to allow in-place rearrangement of BTF types. This function reorganizes BTF type order according to a provided array of type IDs, updating all type references to maintain consistency. The permutation process involves: 1. Shuffling types into new order based on the provided ID mapping 2. Remapping all type ID references to point to new locations 3. Handling BTF extension data if provided via options This is particularly useful for optimizing type locality after BTF deduplication or for meeting specific layout requirements in specialized use cases. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Signed-off-by: pengdonglin Signed-off-by: Donglin Peng --- tools/lib/bpf/btf.c | 161 +++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/btf.h | 34 +++++++++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 196 insertions(+) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 5e1c09b5dce81..3bc03f7fe31fa 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -5830,3 +5830,164 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf) btf->owns_base = false; return libbpf_err(err); } + +struct btf_permute { + /* .BTF section to be permuted in-place */ + struct btf *btf; + struct btf_ext *btf_ext; + /* Array of type IDs used for permutation. The array length must equal + * the number of types in the BTF being permuted, excluding the special + * void type at ID 0. For split BTF, the length corresponds to the + * number of types added on top of the base BTF. + */ + __u32 *ids; + /* Array of type IDs used to map from original type ID to a new permuted + * type ID, its length equals to the above ids */ + __u32 *map; +}; + +static int btf_permute_shuffle_types(struct btf_permute *p); +static int btf_permute_remap_types(struct btf_permute *p); +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx); + +int btf__permute(struct btf *btf, __u32 *ids, const struct btf_permute_opts *opts) +{ + struct btf_permute p; + int i, err = 0; + __u32 *map = NULL; + + if (!OPTS_VALID(opts, btf_permute_opts) || !ids) + return libbpf_err(-EINVAL); + + map = calloc(btf->nr_types, sizeof(*map)); + if (!map) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < btf->nr_types; i++) + map[i] = BTF_UNPROCESSED_ID; + + p.btf = btf; + p.btf_ext = OPTS_GET(opts, btf_ext, NULL); + p.ids = ids; + p.map = map; + + if (btf_ensure_modifiable(btf)) { + err = -ENOMEM; + goto done; + } + err = btf_permute_shuffle_types(&p); + if (err < 0) { + pr_debug("btf_permute_shuffle_types failed: %s\n", errstr(err)); + goto done; + } + err = btf_permute_remap_types(&p); + if (err < 0) { + pr_debug("btf_permute_remap_types failed: %s\n", errstr(err)); + goto done; + } + +done: + free(map); + return libbpf_err(err); +} + +/* Shuffle BTF types. + * + * Rearranges types according to the permutation map in p->ids. The p->map + * array stores the mapping from original type IDs to new shuffled IDs, + * which is used in the next phase to update type references. + * + * Validates that all IDs in the permutation array are valid and unique. + */ +static int btf_permute_shuffle_types(struct btf_permute *p) +{ + struct btf *btf = p->btf; + const struct btf_type *t; + __u32 *new_offs = NULL, *map; + void *nt, *new_types = NULL; + int i, id, len, err; + + new_offs = calloc(btf->nr_types, sizeof(*new_offs)); + new_types = calloc(btf->hdr->type_len, 1); + if (!new_offs || !new_types) { + err = -ENOMEM; + goto out_err; + } + + nt = new_types; + for (i = 0; i < btf->nr_types; i++) { + id = p->ids[i]; + /* type IDs from base_btf and the VOID type are not allowed */ + if (id < btf->start_id) { + err = -EINVAL; + goto out_err; + } + /* must be a valid type ID */ + t = btf__type_by_id(btf, id); + if (!t) { + err = -EINVAL; + goto out_err; + } + map = &p->map[id - btf->start_id]; + /* duplicate type IDs are not allowed */ + if (*map != BTF_UNPROCESSED_ID) { + err = -EINVAL; + goto out_err; + } + len = btf_type_size(t); + memcpy(nt, t, len); + new_offs[i] = nt - new_types; + *map = btf->start_id + i; + nt += len; + } + + free(btf->types_data); + free(btf->type_offs); + btf->types_data = new_types; + btf->type_offs = new_offs; + return 0; + +out_err: + free(new_offs); + free(new_types); + return err; +} + +/* Callback function to remap individual type ID references + * + * This callback is invoked by btf_remap_types() for each type ID reference + * found in the BTF data. It updates the reference to point to the new + * permuted type ID using the mapping table. + */ +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx) +{ + struct btf_permute *p = ctx; + __u32 new_type_id = *type_id; + + /* skip references that point into the base BTF */ + if (new_type_id < p->btf->start_id) + return 0; + + new_type_id = p->map[*type_id - p->btf->start_id]; + if (new_type_id > BTF_MAX_NR_TYPES) + return -EINVAL; + + *type_id = new_type_id; + return 0; +} + +/* Remap referenced type IDs into permuted type IDs. + * + * After BTF types are permuted, their final type IDs may differ from original + * ones. The map from original to a corresponding permuted type ID is stored + * in btf_permute->map and is populated during shuffle phase. During remapping + * phase we are rewriting all type IDs referenced from any BTF type (e.g., + * struct fields, func proto args, etc) to their final deduped type IDs. + */ +static int btf_permute_remap_types(struct btf_permute *p) +{ + return btf_remap_types(p->btf, p->btf_ext, btf_permute_remap_type_id, p); +} + diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index ccfd905f03dfe..441f6445d7625 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -273,6 +273,40 @@ LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts); */ LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf); +struct btf_permute_opts { + size_t sz; + /* optional .BTF.ext info along the main BTF info */ + struct btf_ext *btf_ext; + size_t :0; +}; +#define btf_permute_opts__last_field btf_ext + +/** + * @brief **btf__permute()** rearranges BTF types in-place according to specified mapping + * @param btf BTF object to permute + * @param ids Array defining new type order. Must contain exactly btf->nr_types elements, + * each being a valid type ID in range [btf->start_id, btf->start_id + btf->nr_types - 1] + * @param opts Optional parameters, including BTF extension data for reference updates + * @return 0 on success, negative error code on failure + * + * **btf__permute()** performs an in-place permutation of BTF types, rearranging them + * according to the order specified in @p ids array. After reordering, all type references + * within the BTF data and optional BTF extension are updated to maintain consistency. + * + * The permutation process consists of two phases: + * 1. Type shuffling: Physical reordering of type data in memory + * 2. Reference remapping: Updating all type ID references to new locations + * + * This is particularly useful for optimizing type locality after BTF deduplication + * or for meeting specific layout requirements in specialized use cases. + * + * On error, negative error code is returned and errno is set appropriately. + * Common error codes include: + * - -EINVAL: Invalid parameters or invalid ID mapping (e.g., duplicate IDs, out-of-range IDs) + * - -ENOMEM: Memory allocation failure during permutation process + */ +LIBBPF_API int btf__permute(struct btf *btf, __u32 *ids, const struct btf_permute_opts *opts); + struct btf_dump; struct btf_dump_opts { diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 8ed8749907d47..b778e5a5d0a88 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -451,4 +451,5 @@ LIBBPF_1.7.0 { global: bpf_map__set_exclusive_program; bpf_map__exclusive_program; + btf__permute; } LIBBPF_1.6.0; From babc0dcfccdb65a80374bcc9a2798ed570e44954 Mon Sep 17 00:00:00 2001 From: pengdonglin Date: Tue, 4 Nov 2025 21:40:29 +0800 Subject: [PATCH 3/7] libbpf: Optimize type lookup with binary search for sorted BTF This patch introduces binary search optimization for BTF type lookups when the BTF instance contains sorted types. The optimization significantly improves performance when searching for types in large BTF instances with sorted type names. For unsorted BTF or when nr_sorted_types is zero, the implementation falls back to the original linear search algorithm. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Signed-off-by: pengdonglin Signed-off-by: Donglin Peng --- tools/lib/bpf/btf.c | 142 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 119 insertions(+), 23 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 3bc03f7fe31fa..5af14304409cf 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -92,6 +92,12 @@ struct btf { * - for split BTF counts number of types added on top of base BTF. */ __u32 nr_types; + /* number of sorted and named types in this BTF instance: + * - doesn't include special [0] void type; + * - for split BTF counts number of sorted and named types added on + * top of base BTF. + */ + __u32 nr_sorted_types; /* if not NULL, points to the base BTF on top of which the current * split BTF is based */ @@ -897,44 +903,134 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id) return type_id; } -__s32 btf__find_by_name(const struct btf *btf, const char *type_name) +/* + * Find BTF types with matching names within the [left, right] index range. + * On success, updates *left and *right to the boundaries of the matching range + * and returns the leftmost matching index. + */ +static __s32 btf_find_type_by_name_bsearch(const struct btf *btf, const char *name, + __s32 *left, __s32 *right) { - __u32 i, nr_types = btf__type_cnt(btf); + const struct btf_type *t; + const char *tname; + __s32 l, r, m, lmost, rmost; + int ret; + + /* found the leftmost btf_type that matches */ + l = *left; + r = *right; + lmost = -1; + while (l <= r) { + m = l + (r - l) / 2; + t = btf_type_by_id(btf, m); + tname = btf__str_by_offset(btf, t->name_off); + ret = strcmp(tname, name); + if (ret < 0) { + l = m + 1; + } else { + if (ret == 0) + lmost = m; + r = m - 1; + } + } - if (!strcmp(type_name, "void")) - return 0; + if (lmost == -1) + return -ENOENT; + + /* found the rightmost btf_type that matches */ + l = lmost; + r = *right; + rmost = -1; + while (l <= r) { + m = l + (r - l) / 2; + t = btf_type_by_id(btf, m); + tname = btf__str_by_offset(btf, t->name_off); + ret = strcmp(tname, name); + if (ret <= 0) { + if (ret == 0) + rmost = m; + l = m + 1; + } else { + r = m - 1; + } + } - for (i = 1; i < nr_types; i++) { - const struct btf_type *t = btf__type_by_id(btf, i); - const char *name = btf__name_by_offset(btf, t->name_off); + *left = lmost; + *right = rmost; + return lmost; +} + +static __s32 btf_find_type_by_name_kind(const struct btf *btf, int start_id, + const char *type_name, __u32 kind) +{ + const struct btf_type *t; + const char *tname; + int err = -ENOENT; - if (name && !strcmp(type_name, name)) - return i; + if (!btf) + goto out; + + if (start_id < btf->start_id) { + err = btf_find_type_by_name_kind(btf->base_btf, start_id, + type_name, kind); + if (err == -ENOENT) + start_id = btf->start_id; + } + + if (err == -ENOENT) { + if (btf->nr_sorted_types) { + /* binary search */ + __s32 l, r; + int ret; + + l = start_id; + r = start_id + btf->nr_sorted_types - 1; + ret = btf_find_type_by_name_bsearch(btf, type_name, &l, &r); + if (ret < 0) + goto out; + /* return the leftmost with maching names and skip kind checking */ + if (kind == -1) + return ret; + /* found the leftmost btf_type that matches */ + while (l <= r) { + t = btf_type_by_id(btf, l); + if (BTF_INFO_KIND(t->info) == kind) + return l; + l++; + } + } else { + /* linear search */ + __u32 i, total; + + total = btf__type_cnt(btf); + for (i = start_id; i < total; i++) { + t = btf_type_by_id(btf, i); + if (kind != -1 && btf_kind(t) != kind) + continue; + tname = btf__str_by_offset(btf, t->name_off); + if (tname && !strcmp(tname, type_name)) + return i; + } + } } - return libbpf_err(-ENOENT); +out: + return err; } static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id, const char *type_name, __u32 kind) { - __u32 i, nr_types = btf__type_cnt(btf); - if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) return 0; - for (i = start_id; i < nr_types; i++) { - const struct btf_type *t = btf__type_by_id(btf, i); - const char *name; - - if (btf_kind(t) != kind) - continue; - name = btf__name_by_offset(btf, t->name_off); - if (name && !strcmp(type_name, name)) - return i; - } + return libbpf_err(btf_find_type_by_name_kind(btf, start_id, type_name, kind)); +} - return libbpf_err(-ENOENT); +/* the kind value of -1 indicates that kind matching should be skipped */ +__s32 btf__find_by_name(const struct btf *btf, const char *type_name) +{ + return btf_find_by_name_kind(btf, btf->start_id, type_name, -1); } __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, From 02bc5442ee228e482d639ac99faeea50f50d4ba4 Mon Sep 17 00:00:00 2001 From: pengdonglin Date: Tue, 4 Nov 2025 21:40:30 +0800 Subject: [PATCH 4/7] libbpf: Implement lazy sorting validation for binary search optimization This patch adds lazy validation of BTF type ordering to determine if types are sorted by name. The check is performed on first access and cached, enabling efficient binary search for sorted BTF while maintaining linear search fallback for unsorted cases. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Signed-off-by: pengdonglin Signed-off-by: Donglin Peng --- tools/lib/bpf/btf.c | 76 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 74 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 5af14304409cf..0ee00cec5c05f 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -26,6 +26,10 @@ #define BTF_MAX_NR_TYPES 0x7fffffffU #define BTF_MAX_STR_OFFSET 0x7fffffffU +/* sort verification occurs lazily upon first btf_find_type_by_name_kind() + * call + */ +#define BTF_NEED_SORT_CHECK ((__u32)-1) static struct btf_type btf_void; @@ -96,6 +100,10 @@ struct btf { * - doesn't include special [0] void type; * - for split BTF counts number of sorted and named types added on * top of base BTF. + * - BTF_NEED_SORT_CHECK value indicates sort validation will be performed + * on first call to btf_find_type_by_name_kind. + * - zero value indicates applied sorting check with unsorted BTF or no + * named types. */ __u32 nr_sorted_types; /* if not NULL, points to the base BTF on top of which the current @@ -903,8 +911,67 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id) return type_id; } -/* - * Find BTF types with matching names within the [left, right] index range. +static int btf_compare_type_names(const void *a, const void *b, void *priv) +{ + struct btf *btf = (struct btf *)priv; + struct btf_type *ta = btf_type_by_id(btf, *(__u32 *)a); + struct btf_type *tb = btf_type_by_id(btf, *(__u32 *)b); + const char *na, *nb; + bool anon_a, anon_b; + + na = btf__str_by_offset(btf, ta->name_off); + nb = btf__str_by_offset(btf, tb->name_off); + anon_a = str_is_empty(na); + anon_b = str_is_empty(nb); + + if (anon_a && !anon_b) + return 1; + if (!anon_a && anon_b) + return -1; + if (anon_a && anon_b) + return 0; + + return strcmp(na, nb); +} + +/* Verifies BTF type ordering by name and counts named types. + * + * Checks that types are sorted in ascending order with named types + * before anonymous ones. If verified, sets nr_sorted_types to the + * number of named types. + */ +static void btf_check_sorted(struct btf *btf, int start_id) +{ + const struct btf_type *t; + int i, n, nr_sorted_types; + + if (likely(btf->nr_sorted_types != BTF_NEED_SORT_CHECK)) + return; + btf->nr_sorted_types = 0; + + if (btf->nr_types < 2) + return; + + nr_sorted_types = 0; + n = btf__type_cnt(btf); + for (n--, i = start_id; i < n; i++) { + int k = i + 1; + + if (btf_compare_type_names(&i, &k, btf) > 0) + return; + t = btf_type_by_id(btf, k); + if (!str_is_empty(btf__str_by_offset(btf, t->name_off))) + nr_sorted_types++; + } + + t = btf_type_by_id(btf, start_id); + if (!str_is_empty(btf__str_by_offset(btf, t->name_off))) + nr_sorted_types++; + if (nr_sorted_types) + btf->nr_sorted_types = nr_sorted_types; +} + +/* Find BTF types with matching names within the [left, right] index range. * On success, updates *left and *right to the boundaries of the matching range * and returns the leftmost matching index. */ @@ -978,6 +1045,8 @@ static __s32 btf_find_type_by_name_kind(const struct btf *btf, int start_id, } if (err == -ENOENT) { + btf_check_sorted((struct btf *)btf, btf->start_id); + if (btf->nr_sorted_types) { /* binary search */ __s32 l, r; @@ -1102,6 +1171,7 @@ static struct btf *btf_new_empty(struct btf *base_btf) btf->fd = -1; btf->ptr_sz = sizeof(void *); btf->swapped_endian = false; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; if (base_btf) { btf->base_btf = base_btf; @@ -1153,6 +1223,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, b btf->start_id = 1; btf->start_str_off = 0; btf->fd = -1; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; if (base_btf) { btf->base_btf = base_btf; @@ -1811,6 +1882,7 @@ static void btf_invalidate_raw_data(struct btf *btf) free(btf->raw_data_swapped); btf->raw_data_swapped = NULL; } + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; } /* Ensure BTF is ready to be modified (by splitting into a three memory From 8b08ede699f84633f28afaedad5ca2ba400ee3db Mon Sep 17 00:00:00 2001 From: pengdonglin Date: Tue, 4 Nov 2025 21:40:31 +0800 Subject: [PATCH 5/7] btf: Optimize type lookup with binary search Improve btf_find_by_name_kind() performance by adding binary search support for sorted types. Falls back to linear search for compatibility. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Signed-off-by: pengdonglin Signed-off-by: Donglin Peng --- kernel/bpf/btf.c | 111 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 101 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 0de8fc8a0e0b3..da35d8636b9bf 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -259,6 +259,7 @@ struct btf { void *nohdr_data; struct btf_header hdr; u32 nr_types; /* includes VOID for base BTF */ + u32 nr_sorted_types; /* exclude VOID for base BTF */ u32 types_size; u32 data_size; refcount_t refcnt; @@ -494,6 +495,11 @@ static bool btf_type_is_modifier(const struct btf_type *t) return false; } +static int btf_start_id(const struct btf *btf) +{ + return btf->start_id + (btf->base_btf ? 0 : 1); +} + bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void; @@ -544,24 +550,109 @@ u32 btf_nr_types(const struct btf *btf) return total; } -s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) +/* Find BTF types with matching names within the [left, right] index range. + * On success, updates *left and *right to the boundaries of the matching range + * and returns the leftmost matching index. + */ +static s32 btf_find_by_name_kind_bsearch(const struct btf *btf, const char *name, + s32 *left, s32 *right) { const struct btf_type *t; const char *tname; - u32 i, total; + s32 l, r, m, lmost, rmost; + int ret; - total = btf_nr_types(btf); - for (i = 1; i < total; i++) { - t = btf_type_by_id(btf, i); - if (BTF_INFO_KIND(t->info) != kind) - continue; + /* found the leftmost btf_type that matches */ + l = *left; + r = *right; + lmost = -1; + while (l <= r) { + m = l + (r - l) / 2; + t = btf_type_by_id(btf, m); + tname = btf_name_by_offset(btf, t->name_off); + ret = strcmp(tname, name); + if (ret < 0) { + l = m + 1; + } else { + if (ret == 0) + lmost = m; + r = m - 1; + } + } + if (lmost == -1) + return -ENOENT; + + /* found the rightmost btf_type that matches */ + l = lmost; + r = *right; + rmost = -1; + while (l <= r) { + m = l + (r - l) / 2; + t = btf_type_by_id(btf, m); tname = btf_name_by_offset(btf, t->name_off); - if (!strcmp(tname, name)) - return i; + ret = strcmp(tname, name); + if (ret <= 0) { + if (ret == 0) + rmost = m; + l = m + 1; + } else { + r = m - 1; + } } - return -ENOENT; + *left = lmost; + *right = rmost; + return lmost; +} + +s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) +{ + const struct btf *base_btf = btf_base_btf(btf);; + const struct btf_type *t; + const char *tname; + int err = -ENOENT; + + if (base_btf) + err = btf_find_by_name_kind(base_btf, name, kind); + + if (err == -ENOENT) { + if (btf->nr_sorted_types) { + /* binary search */ + s32 l, r; + int ret; + + l = btf_start_id(btf); + r = l + btf->nr_sorted_types - 1; + ret = btf_find_by_name_kind_bsearch(btf, name, &l, &r); + if (ret < 0) + goto out; + /* found the leftmost btf_type that matches */ + while (l <= r) { + t = btf_type_by_id(btf, l); + if (BTF_INFO_KIND(t->info) == kind) + return l; + l++; + } + } else { + /* linear search */ + u32 i, total; + + total = btf_nr_types(btf); + for (i = btf_start_id(btf); i < total; i++) { + t = btf_type_by_id(btf, i); + if (BTF_INFO_KIND(t->info) != kind) + continue; + + tname = btf_name_by_offset(btf, t->name_off); + if (!strcmp(tname, name)) + return i; + } + } + } + +out: + return err; } s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) From 266fe867ebdada329b9fbdc6ba31671790ed1938 Mon Sep 17 00:00:00 2001 From: pengdonglin Date: Tue, 4 Nov 2025 21:40:32 +0800 Subject: [PATCH 6/7] btf: Add lazy sorting validation for binary search Implement lazy validation of BTF type ordering to enable efficient binary search for sorted BTF while maintaining linear search fallback for unsorted cases. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Signed-off-by: pengdonglin Signed-off-by: Donglin Peng --- kernel/bpf/btf.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index da35d8636b9bf..c76d77fd30a7f 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -192,6 +192,8 @@ */ #define BTF_MAX_SIZE (16 * 1024 * 1024) +#define BTF_NEED_SORT_CHECK ((u32)-1) + #define for_each_member_from(i, from, struct_type, member) \ for (i = from, member = btf_type_member(struct_type) + from; \ i < btf_type_vlen(struct_type); \ @@ -550,6 +552,65 @@ u32 btf_nr_types(const struct btf *btf) return total; } +static int btf_compare_type_names(const void *a, const void *b, void *priv) +{ + struct btf *btf = (struct btf *)priv; + const struct btf_type *ta = btf_type_by_id(btf, *(__u32 *)a); + const struct btf_type *tb = btf_type_by_id(btf, *(__u32 *)b); + const char *na, *nb; + + if (!ta->name_off && tb->name_off) + return 1; + if (ta->name_off && !tb->name_off) + return -1; + if (!ta->name_off && !tb->name_off) + return 0; + + na = btf_name_by_offset(btf, ta->name_off); + nb = btf_name_by_offset(btf, tb->name_off); + return strcmp(na, nb); +} + +/* Verifies BTF type ordering by name and counts named types. + * + * Checks that types are sorted in ascending order with named types + * before anonymous ones. If verified, sets nr_sorted_types to the + * number of named types. + */ +static void btf_check_sorted(struct btf *btf, int start_id) +{ + const struct btf_type *t; + int i, n, nr_sorted_types; + + if (likely(btf->nr_sorted_types != BTF_NEED_SORT_CHECK)) + return; + + btf->nr_sorted_types = 0; + + if (btf->nr_types < 2) + return; + + nr_sorted_types = 0; + n = btf_nr_types(btf); + for (n--, i = start_id; i < n; i++) { + int k = i + 1; + + if (btf_compare_type_names(&i, &k, btf) > 0) + return; + + t = btf_type_by_id(btf, k); + if (t->name_off) + nr_sorted_types++; + } + + t = btf_type_by_id(btf, start_id); + if (t->name_off) + nr_sorted_types++; + + if (nr_sorted_types) + btf->nr_sorted_types = nr_sorted_types; +} + /* Find BTF types with matching names within the [left, right] index range. * On success, updates *left and *right to the boundaries of the matching range * and returns the leftmost matching index. @@ -617,6 +678,8 @@ s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) err = btf_find_by_name_kind(base_btf, name, kind); if (err == -ENOENT) { + btf_check_sorted((struct btf *)btf, btf_start_id(btf)); + if (btf->nr_sorted_types) { /* binary search */ s32 l, r; @@ -5882,6 +5945,7 @@ static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uat goto errout; } env->btf = btf; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN); if (!data) { @@ -6301,6 +6365,7 @@ static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name btf->data = data; btf->data_size = data_size; btf->kernel_btf = true; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; snprintf(btf->name, sizeof(btf->name), "%s", name); err = btf_parse_hdr(env); @@ -6418,6 +6483,7 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, btf->start_id = base_btf->nr_types; btf->start_str_off = base_btf->hdr.str_len; btf->kernel_btf = true; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; snprintf(btf->name, sizeof(btf->name), "%s", module_name); btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN); From 429887d798e3f6fb38fb28b29a9388b75966ad2f Mon Sep 17 00:00:00 2001 From: pengdonglin Date: Tue, 4 Nov 2025 21:40:33 +0800 Subject: [PATCH 7/7] selftests/bpf: Add test cases for btf__permute functionality This patch introduces test cases for the btf__permute function to ensure it works correctly with both base BTF and split BTF scenarios. The test suite includes: - test_permute_base: Validates permutation on standalone BTF - test_permute_split: Tests permutation on split BTF with base dependencies Each test verifies that type IDs are correctly rearranged and type references are properly updated after permutation operations. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Signed-off-by: pengdonglin Signed-off-by: Donglin Peng --- .../selftests/bpf/prog_tests/btf_permute.c | 142 ++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/btf_permute.c diff --git a/tools/testing/selftests/bpf/prog_tests/btf_permute.c b/tools/testing/selftests/bpf/prog_tests/btf_permute.c new file mode 100644 index 0000000000000..2692cef627ab5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_permute.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Xiaomi */ + +#include +#include +#include "btf_helpers.h" + +/* ensure btf__permute work as expected with base_btf */ +static void test_permute_base(void) +{ + struct btf *btf; + __u32 permute_ids[6]; + int err; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_main_btf")) + return; + + btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(btf, 1); /* [2] ptr to int */ + btf__add_struct(btf, "s1", 4); /* [3] struct s1 { */ + btf__add_field(btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_struct(btf, "s2", 4); /* [4] struct s2 { */ + btf__add_field(btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_func_proto(btf, 1); /* [5] int (*)(int *p); */ + btf__add_func_param(btf, "p", 2); + btf__add_func(btf, "f", BTF_FUNC_STATIC, 5); /* [6] int f(int *p); */ + + VALIDATE_RAW_BTF( + btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[4] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[5] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p' type_id=2", + "[6] FUNC 'f' type_id=5 linkage=static"); + + permute_ids[0] = 4; /* struct s2 */ + permute_ids[1] = 3; /* struct s1 */ + permute_ids[2] = 5; /* int (*)(int *p) */ + permute_ids[3] = 1; /* int */ + permute_ids[4] = 6; /* int f(int *p) */ + permute_ids[5] = 2; /* ptr to int */ + err = btf__permute(btf, permute_ids, NULL); + if (!ASSERT_OK(err, "btf__permute")) + goto done; + + VALIDATE_RAW_BTF( + btf, + "[1] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=4 bits_offset=0", + "[2] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=4 bits_offset=0", + "[3] FUNC_PROTO '(anon)' ret_type_id=4 vlen=1\n" + "\t'p' type_id=6", + "[4] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[5] FUNC 'f' type_id=3 linkage=static", + "[6] PTR '(anon)' type_id=4"); + +done: + btf__free(btf); +} + +/* ensure btf__permute work as expected with split_btf */ +static void test_permute_split(void) +{ + struct btf *split_btf = NULL, *base_btf = NULL; + __u32 permute_ids[4]; + int err; + + base_btf = btf__new_empty(); + if (!ASSERT_OK_PTR(base_btf, "empty_main_btf")) + return; + + btf__add_int(base_btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(base_btf, 1); /* [2] ptr to int */ + VALIDATE_RAW_BTF( + base_btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1"); + split_btf = btf__new_empty_split(base_btf); + if (!ASSERT_OK_PTR(split_btf, "empty_split_btf")) + goto cleanup; + btf__add_struct(split_btf, "s1", 4); /* [3] struct s1 { */ + btf__add_field(split_btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_struct(split_btf, "s2", 4); /* [4] struct s2 { */ + btf__add_field(split_btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_func_proto(split_btf, 1); /* [5] int (*)(int p); */ + btf__add_func_param(split_btf, "p", 2); + btf__add_func(split_btf, "f", BTF_FUNC_STATIC, 5); /* [6] int f(int *p); */ + + VALIDATE_RAW_BTF( + split_btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[4] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[5] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p' type_id=2", + "[6] FUNC 'f' type_id=5 linkage=static"); + + permute_ids[0] = 6; /* int f(int *p) */ + permute_ids[1] = 3; /* struct s1 */ + permute_ids[2] = 5; /* int (*)(int *p) */ + permute_ids[3] = 4; /* struct s2 */ + err = btf__permute(split_btf, permute_ids, NULL); + if (!ASSERT_OK(err, "btf__permute")) + goto cleanup; + + VALIDATE_RAW_BTF( + split_btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] FUNC 'f' type_id=5 linkage=static", + "[4] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[5] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p' type_id=2", + "[6] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0"); + +cleanup: + btf__free(split_btf); + btf__free(base_btf); +} + +void test_btf_permute(void) +{ + if (test__start_subtest("permute_base")) + test_permute_base(); + if (test__start_subtest("permute_split")) + test_permute_split(); +}