@@ -3478,60 +3478,44 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
3478
3478
return BTF_FIELD_FOUND ;
3479
3479
}
3480
3480
3481
- #define field_mask_test_name (field_type , field_type_str ) \
3482
- if (field_mask & field_type && !strcmp(name, field_type_str)) { \
3483
- type = field_type; \
3484
- goto end; \
3485
- }
3486
-
3487
3481
static int btf_get_field_type (const struct btf * btf , const struct btf_type * var_type ,
3488
- u32 field_mask , u32 * seen_mask ,
3489
- int * align , int * sz )
3490
- {
3491
- int type = 0 ;
3482
+ u32 field_mask , u32 * seen_mask , int * align , int * sz )
3483
+ {
3484
+ const struct {
3485
+ enum btf_field_type type ;
3486
+ const char * const name ;
3487
+ const bool is_unique ;
3488
+ } field_types [] = {
3489
+ { BPF_SPIN_LOCK , "bpf_spin_lock" , true },
3490
+ { BPF_RES_SPIN_LOCK , "bpf_res_spin_lock" , true },
3491
+ { BPF_TIMER , "bpf_timer" , true },
3492
+ { BPF_WORKQUEUE , "bpf_wq" , true },
3493
+ { BPF_LIST_HEAD , "bpf_list_head" , false },
3494
+ { BPF_LIST_NODE , "bpf_list_node" , false },
3495
+ { BPF_RB_ROOT , "bpf_rb_root" , false },
3496
+ { BPF_RB_NODE , "bpf_rb_node" , false },
3497
+ { BPF_REFCOUNT , "bpf_refcount" , false },
3498
+ };
3499
+ int type = 0 , i ;
3492
3500
const char * name = __btf_name_by_offset (btf , var_type -> name_off );
3493
-
3494
- if (field_mask & BPF_SPIN_LOCK ) {
3495
- if (!strcmp (name , "bpf_spin_lock" )) {
3496
- if (* seen_mask & BPF_SPIN_LOCK )
3497
- return - E2BIG ;
3498
- * seen_mask |= BPF_SPIN_LOCK ;
3499
- type = BPF_SPIN_LOCK ;
3500
- goto end ;
3501
- }
3502
- }
3503
- if (field_mask & BPF_RES_SPIN_LOCK ) {
3504
- if (!strcmp (name , "bpf_res_spin_lock" )) {
3505
- if (* seen_mask & BPF_RES_SPIN_LOCK )
3506
- return - E2BIG ;
3507
- * seen_mask |= BPF_RES_SPIN_LOCK ;
3508
- type = BPF_RES_SPIN_LOCK ;
3509
- goto end ;
3510
- }
3511
- }
3512
- if (field_mask & BPF_TIMER ) {
3513
- if (!strcmp (name , "bpf_timer" )) {
3514
- if (* seen_mask & BPF_TIMER )
3515
- return - E2BIG ;
3516
- * seen_mask |= BPF_TIMER ;
3517
- type = BPF_TIMER ;
3518
- goto end ;
3519
- }
3520
- }
3521
- if (field_mask & BPF_WORKQUEUE ) {
3522
- if (!strcmp (name , "bpf_wq" )) {
3523
- if (* seen_mask & BPF_WORKQUEUE )
3501
+ const char * field_type_name ;
3502
+ enum btf_field_type field_type ;
3503
+ bool is_unique ;
3504
+
3505
+ for (i = 0 ; i < ARRAY_SIZE (field_types ); ++ i ) {
3506
+ field_type = field_types [i ].type ;
3507
+ field_type_name = field_types [i ].name ;
3508
+ is_unique = field_types [i ].is_unique ;
3509
+ if (!(field_mask & field_type ) || strcmp (name , field_type_name ))
3510
+ continue ;
3511
+ if (is_unique ) {
3512
+ if (* seen_mask & field_type )
3524
3513
return - E2BIG ;
3525
- * seen_mask |= BPF_WORKQUEUE ;
3526
- type = BPF_WORKQUEUE ;
3527
- goto end ;
3514
+ * seen_mask |= field_type ;
3528
3515
}
3516
+ type = field_type ;
3517
+ goto end ;
3529
3518
}
3530
- field_mask_test_name (BPF_LIST_HEAD , "bpf_list_head" );
3531
- field_mask_test_name (BPF_LIST_NODE , "bpf_list_node" );
3532
- field_mask_test_name (BPF_RB_ROOT , "bpf_rb_root" );
3533
- field_mask_test_name (BPF_RB_NODE , "bpf_rb_node" );
3534
- field_mask_test_name (BPF_REFCOUNT , "bpf_refcount" );
3535
3519
3536
3520
/* Only return BPF_KPTR when all other types with matchable names fail */
3537
3521
if (field_mask & (BPF_KPTR | BPF_UPTR ) && !__btf_type_is_struct (var_type )) {
@@ -3545,8 +3529,6 @@ static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_
3545
3529
return type ;
3546
3530
}
3547
3531
3548
- #undef field_mask_test_name
3549
-
3550
3532
/* Repeat a number of fields for a specified number of times.
3551
3533
*
3552
3534
* Copy the fields starting from the first field and repeat them for
0 commit comments