@@ -1503,6 +1503,16 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam
1503
1503
return ERR_PTR (- ENOENT );
1504
1504
}
1505
1505
1506
+ static int create_placeholder_fd (void )
1507
+ {
1508
+ int fd ;
1509
+
1510
+ fd = ensure_good_fd (memfd_create ("libbpf-placeholder-fd" , MFD_CLOEXEC ));
1511
+ if (fd < 0 )
1512
+ return - errno ;
1513
+ return fd ;
1514
+ }
1515
+
1506
1516
static struct bpf_map * bpf_object__add_map (struct bpf_object * obj )
1507
1517
{
1508
1518
struct bpf_map * map ;
@@ -1515,7 +1525,21 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1515
1525
1516
1526
map = & obj -> maps [obj -> nr_maps ++ ];
1517
1527
map -> obj = obj ;
1518
- map -> fd = -1 ;
1528
+ /* Preallocate map FD without actually creating BPF map just yet.
1529
+ * These map FD "placeholders" will be reused later without changing
1530
+ * FD value when map is actually created in the kernel.
1531
+ *
1532
+ * This is useful to be able to perform BPF program relocations
1533
+ * without having to create BPF maps before that step. This allows us
1534
+ * to finalize and load BTF very late in BPF object's loading phase,
1535
+ * right before BPF maps have to be created and BPF programs have to
1536
+ * be loaded. By having these map FD placeholders we can perform all
1537
+ * the sanitizations, relocations, and any other adjustments before we
1538
+ * start creating actual BPF kernel objects (BTF, maps, progs).
1539
+ */
1540
+ map -> fd = create_placeholder_fd ();
1541
+ if (map -> fd < 0 )
1542
+ return ERR_PTR (map -> fd );
1519
1543
map -> inner_map_fd = -1 ;
1520
1544
map -> autocreate = true;
1521
1545
@@ -2607,7 +2631,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2607
2631
map -> inner_map = calloc (1 , sizeof (* map -> inner_map ));
2608
2632
if (!map -> inner_map )
2609
2633
return - ENOMEM ;
2610
- map -> inner_map -> fd = -1 ;
2634
+ map -> inner_map -> fd = create_placeholder_fd ();
2635
+ if (map -> inner_map -> fd < 0 )
2636
+ return map -> inner_map -> fd ;
2611
2637
map -> inner_map -> sec_idx = sec_idx ;
2612
2638
map -> inner_map -> name = malloc (strlen (map_name ) + sizeof (".inner" ) + 1 );
2613
2639
if (!map -> inner_map -> name )
@@ -4549,14 +4575,12 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4549
4575
goto err_free_new_name ;
4550
4576
}
4551
4577
4552
- err = zclose (map -> fd );
4553
- if (err ) {
4554
- err = - errno ;
4555
- goto err_close_new_fd ;
4556
- }
4578
+ err = reuse_fd (map -> fd , new_fd );
4579
+ if (err )
4580
+ goto err_free_new_name ;
4581
+
4557
4582
free (map -> name );
4558
4583
4559
- map -> fd = new_fd ;
4560
4584
map -> name = new_name ;
4561
4585
map -> def .type = info .type ;
4562
4586
map -> def .key_size = info .key_size ;
@@ -4570,8 +4594,6 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4570
4594
4571
4595
return 0 ;
4572
4596
4573
- err_close_new_fd :
4574
- close (new_fd );
4575
4597
err_free_new_name :
4576
4598
free (new_name );
4577
4599
return libbpf_err (err );
@@ -5210,7 +5232,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
5210
5232
LIBBPF_OPTS (bpf_map_create_opts , create_attr );
5211
5233
struct bpf_map_def * def = & map -> def ;
5212
5234
const char * map_name = NULL ;
5213
- int err = 0 ;
5235
+ int err = 0 , map_fd ;
5214
5236
5215
5237
if (kernel_supports (obj , FEAT_PROG_NAME ))
5216
5238
map_name = map -> name ;
@@ -5269,17 +5291,19 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
5269
5291
bpf_gen__map_create (obj -> gen_loader , def -> type , map_name ,
5270
5292
def -> key_size , def -> value_size , def -> max_entries ,
5271
5293
& create_attr , is_inner ? -1 : map - obj -> maps );
5272
- /* Pretend to have valid FD to pass various fd >= 0 checks.
5273
- * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
5294
+ /* We keep pretenting we have valid FD to pass various fd >= 0
5295
+ * checks by just keeping original placeholder FDs in place.
5296
+ * See bpf_object__add_map() comment.
5297
+ * This placeholder fd will not be used with any syscall and
5298
+ * will be reset to -1 eventually.
5274
5299
*/
5275
- map -> fd = 0 ;
5300
+ map_fd = map -> fd ;
5276
5301
} else {
5277
- map -> fd = bpf_map_create (def -> type , map_name ,
5278
- def -> key_size , def -> value_size ,
5279
- def -> max_entries , & create_attr );
5302
+ map_fd = bpf_map_create (def -> type , map_name ,
5303
+ def -> key_size , def -> value_size ,
5304
+ def -> max_entries , & create_attr );
5280
5305
}
5281
- if (map -> fd < 0 && (create_attr .btf_key_type_id ||
5282
- create_attr .btf_value_type_id )) {
5306
+ if (map_fd < 0 && (create_attr .btf_key_type_id || create_attr .btf_value_type_id )) {
5283
5307
char * cp , errmsg [STRERR_BUFSIZE ];
5284
5308
5285
5309
err = - errno ;
@@ -5291,21 +5315,31 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
5291
5315
create_attr .btf_value_type_id = 0 ;
5292
5316
map -> btf_key_type_id = 0 ;
5293
5317
map -> btf_value_type_id = 0 ;
5294
- map -> fd = bpf_map_create (def -> type , map_name ,
5295
- def -> key_size , def -> value_size ,
5296
- def -> max_entries , & create_attr );
5318
+ map_fd = bpf_map_create (def -> type , map_name ,
5319
+ def -> key_size , def -> value_size ,
5320
+ def -> max_entries , & create_attr );
5297
5321
}
5298
5322
5299
- err = map -> fd < 0 ? - errno : 0 ;
5300
-
5301
5323
if (bpf_map_type__is_map_in_map (def -> type ) && map -> inner_map ) {
5302
5324
if (obj -> gen_loader )
5303
5325
map -> inner_map -> fd = -1 ;
5304
5326
bpf_map__destroy (map -> inner_map );
5305
5327
zfree (& map -> inner_map );
5306
5328
}
5307
5329
5308
- return err ;
5330
+ if (map_fd < 0 )
5331
+ return map_fd ;
5332
+
5333
+ /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
5334
+ if (map -> fd == map_fd )
5335
+ return 0 ;
5336
+
5337
+ /* Keep placeholder FD value but now point it to the BPF map object.
5338
+ * This way everything that relied on this map's FD (e.g., relocated
5339
+ * ldimm64 instructions) will stay valid and won't need adjustments.
5340
+ * map->fd stays valid but now point to what map_fd points to.
5341
+ */
5342
+ return reuse_fd (map -> fd , map_fd );
5309
5343
}
5310
5344
5311
5345
static int init_map_in_map_slots (struct bpf_object * obj , struct bpf_map * map )
@@ -5389,10 +5423,8 @@ static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5389
5423
continue ;
5390
5424
5391
5425
err = init_prog_array_slots (obj , map );
5392
- if (err < 0 ) {
5393
- zclose (map -> fd );
5426
+ if (err < 0 )
5394
5427
return err ;
5395
- }
5396
5428
}
5397
5429
return 0 ;
5398
5430
}
@@ -5483,25 +5515,20 @@ bpf_object__create_maps(struct bpf_object *obj)
5483
5515
5484
5516
if (bpf_map__is_internal (map )) {
5485
5517
err = bpf_object__populate_internal_map (obj , map );
5486
- if (err < 0 ) {
5487
- zclose (map -> fd );
5518
+ if (err < 0 )
5488
5519
goto err_out ;
5489
- }
5490
5520
}
5491
5521
5492
5522
if (map -> init_slots_sz && map -> def .type != BPF_MAP_TYPE_PROG_ARRAY ) {
5493
5523
err = init_map_in_map_slots (obj , map );
5494
- if (err < 0 ) {
5495
- zclose (map -> fd );
5524
+ if (err < 0 )
5496
5525
goto err_out ;
5497
- }
5498
5526
}
5499
5527
}
5500
5528
5501
5529
if (map -> pin_path && !map -> pinned ) {
5502
5530
err = bpf_map__pin (map , NULL );
5503
5531
if (err ) {
5504
- zclose (map -> fd );
5505
5532
if (!retried && err == - EEXIST ) {
5506
5533
retried = true;
5507
5534
goto retry ;
@@ -8075,8 +8102,8 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
8075
8102
err = err ? : bpf_object__sanitize_and_load_btf (obj );
8076
8103
err = err ? : bpf_object__sanitize_maps (obj );
8077
8104
err = err ? : bpf_object__init_kern_struct_ops_maps (obj );
8078
- err = err ? : bpf_object__create_maps (obj );
8079
8105
err = err ? : bpf_object__relocate (obj , obj -> btf_custom_path ? : target_btf_path );
8106
+ err = err ? : bpf_object__create_maps (obj );
8080
8107
err = err ? : bpf_object__load_progs (obj , extra_log_level );
8081
8108
err = err ? : bpf_object_init_prog_arrays (obj );
8082
8109
err = err ? : bpf_object_prepare_struct_ops (obj );
@@ -8085,8 +8112,6 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
8085
8112
/* reset FDs */
8086
8113
if (obj -> btf )
8087
8114
btf__set_fd (obj -> btf , -1 );
8088
- for (i = 0 ; i < obj -> nr_maps ; i ++ )
8089
- obj -> maps [i ].fd = -1 ;
8090
8115
if (!err )
8091
8116
err = bpf_gen__finish (obj -> gen_loader , obj -> nr_programs , obj -> nr_maps );
8092
8117
}
0 commit comments