@@ -499,6 +499,7 @@ struct bpf_program {
499
499
__u32 line_info_rec_size ;
500
500
__u32 line_info_cnt ;
501
501
__u32 prog_flags ;
502
+ __u8 hash [SHA256_DIGEST_LENGTH ];
502
503
};
503
504
504
505
struct bpf_struct_ops {
@@ -578,6 +579,7 @@ struct bpf_map {
578
579
bool autocreate ;
579
580
bool autoattach ;
580
581
__u64 map_extra ;
582
+ struct bpf_program * excl_prog ;
581
583
};
582
584
583
585
enum extern_type {
@@ -4488,6 +4490,44 @@ bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4488
4490
}
4489
4491
}
4490
4492
4493
+ static int bpf_prog_compute_hash (struct bpf_program * prog )
4494
+ {
4495
+ struct bpf_insn * purged ;
4496
+ int i , err ;
4497
+
4498
+ purged = calloc (prog -> insns_cnt , BPF_INSN_SZ );
4499
+ if (!purged )
4500
+ return - ENOMEM ;
4501
+
4502
+ /* If relocations have been done, the map_fd needs to be
4503
+ * discarded for the digest calculation.
4504
+ */
4505
+ for (i = 0 ; i < prog -> insns_cnt ; i ++ ) {
4506
+ purged [i ] = prog -> insns [i ];
4507
+ if (purged [i ].code == (BPF_LD | BPF_IMM | BPF_DW ) &&
4508
+ (purged [i ].src_reg == BPF_PSEUDO_MAP_FD ||
4509
+ purged [i ].src_reg == BPF_PSEUDO_MAP_VALUE )) {
4510
+ purged [i ].imm = 0 ;
4511
+ i ++ ;
4512
+ if (i >= prog -> insns_cnt ||
4513
+ prog -> insns [i ].code != 0 ||
4514
+ prog -> insns [i ].dst_reg != 0 ||
4515
+ prog -> insns [i ].src_reg != 0 ||
4516
+ prog -> insns [i ].off != 0 ) {
4517
+ err = - EINVAL ;
4518
+ goto out ;
4519
+ }
4520
+ purged [i ] = prog -> insns [i ];
4521
+ purged [i ].imm = 0 ;
4522
+ }
4523
+ }
4524
+ err = libbpf_sha256 (purged , prog -> insns_cnt * sizeof (struct bpf_insn ),
4525
+ prog -> hash , SHA256_DIGEST_LENGTH );
4526
+ out :
4527
+ free (purged );
4528
+ return err ;
4529
+ }
4530
+
4491
4531
static int bpf_program__record_reloc (struct bpf_program * prog ,
4492
4532
struct reloc_desc * reloc_desc ,
4493
4533
__u32 insn_idx , const char * sym_name ,
@@ -5237,6 +5277,14 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
5237
5277
create_attr .token_fd = obj -> token_fd ;
5238
5278
if (obj -> token_fd )
5239
5279
create_attr .map_flags |= BPF_F_TOKEN_FD ;
5280
+ if (map -> excl_prog ) {
5281
+ err = bpf_prog_compute_hash (map -> excl_prog );
5282
+ if (err )
5283
+ return err ;
5284
+
5285
+ create_attr .excl_prog_hash = map -> excl_prog -> hash ;
5286
+ create_attr .excl_prog_hash_size = SHA256_DIGEST_LENGTH ;
5287
+ }
5240
5288
5241
5289
if (bpf_map__is_struct_ops (map )) {
5242
5290
create_attr .btf_vmlinux_value_type_id = map -> btf_vmlinux_value_type_id ;
@@ -10527,6 +10575,27 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
10527
10575
return 0 ;
10528
10576
}
10529
10577
10578
+ int bpf_map__set_exclusive_program (struct bpf_map * map , struct bpf_program * prog )
10579
+ {
10580
+ if (map_is_created (map )) {
10581
+ pr_warn ("exclusive programs must be set before map creation\n" );
10582
+ return libbpf_err (- EINVAL );
10583
+ }
10584
+
10585
+ if (map -> obj != prog -> obj ) {
10586
+ pr_warn ("excl_prog and map must be from the same bpf object\n" );
10587
+ return libbpf_err (- EINVAL );
10588
+ }
10589
+
10590
+ map -> excl_prog = prog ;
10591
+ return 0 ;
10592
+ }
10593
+
10594
+ struct bpf_program * bpf_map__exclusive_program (struct bpf_map * map )
10595
+ {
10596
+ return map -> excl_prog ;
10597
+ }
10598
+
10530
10599
static struct bpf_map *
10531
10600
__bpf_map__iter (const struct bpf_map * m , const struct bpf_object * obj , int i )
10532
10601
{
0 commit comments