@@ -670,11 +670,18 @@ struct elf_state {
670
670
671
671
struct usdt_manager ;
672
672
673
+ enum bpf_object_state {
674
+ OBJ_OPEN ,
675
+ OBJ_PREPARED ,
676
+ OBJ_LOADED ,
677
+ };
678
+
673
679
struct bpf_object {
674
680
char name [BPF_OBJ_NAME_LEN ];
675
681
char license [64 ];
676
682
__u32 kern_version ;
677
683
684
+ enum bpf_object_state state ;
678
685
struct bpf_program * programs ;
679
686
size_t nr_programs ;
680
687
struct bpf_map * maps ;
@@ -686,7 +693,6 @@ struct bpf_object {
686
693
int nr_extern ;
687
694
int kconfig_map_idx ;
688
695
689
- bool loaded ;
690
696
bool has_subcalls ;
691
697
bool has_rodata ;
692
698
@@ -1511,7 +1517,7 @@ static struct bpf_object *bpf_object__new(const char *path,
1511
1517
obj -> kconfig_map_idx = -1 ;
1512
1518
1513
1519
obj -> kern_version = get_kernel_version ();
1514
- obj -> loaded = false ;
1520
+ obj -> state = OBJ_OPEN ;
1515
1521
1516
1522
return obj ;
1517
1523
}
@@ -4845,14 +4851,19 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4845
4851
return 0 ;
4846
4852
}
4847
4853
4854
+ static bool map_is_created (const struct bpf_map * map )
4855
+ {
4856
+ return map -> obj -> state >= OBJ_PREPARED || map -> reused ;
4857
+ }
4858
+
4848
4859
bool bpf_map__autocreate (const struct bpf_map * map )
4849
4860
{
4850
4861
return map -> autocreate ;
4851
4862
}
4852
4863
4853
4864
int bpf_map__set_autocreate (struct bpf_map * map , bool autocreate )
4854
4865
{
4855
- if (map -> obj -> loaded )
4866
+ if (map_is_created ( map ) )
4856
4867
return libbpf_err (- EBUSY );
4857
4868
4858
4869
map -> autocreate = autocreate ;
@@ -4946,7 +4957,7 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4946
4957
4947
4958
int bpf_map__set_max_entries (struct bpf_map * map , __u32 max_entries )
4948
4959
{
4949
- if (map -> obj -> loaded )
4960
+ if (map_is_created ( map ) )
4950
4961
return libbpf_err (- EBUSY );
4951
4962
4952
4963
map -> def .max_entries = max_entries ;
@@ -5191,11 +5202,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
5191
5202
5192
5203
static void bpf_map__destroy (struct bpf_map * map );
5193
5204
5194
- static bool map_is_created (const struct bpf_map * map )
5195
- {
5196
- return map -> obj -> loaded || map -> reused ;
5197
- }
5198
-
5199
5205
static int bpf_object__create_map (struct bpf_object * obj , struct bpf_map * map , bool is_inner )
5200
5206
{
5201
5207
LIBBPF_OPTS (bpf_map_create_opts , create_attr );
@@ -7895,13 +7901,6 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
7895
7901
size_t i ;
7896
7902
int err ;
7897
7903
7898
- for (i = 0 ; i < obj -> nr_programs ; i ++ ) {
7899
- prog = & obj -> programs [i ];
7900
- err = bpf_object__sanitize_prog (obj , prog );
7901
- if (err )
7902
- return err ;
7903
- }
7904
-
7905
7904
for (i = 0 ; i < obj -> nr_programs ; i ++ ) {
7906
7905
prog = & obj -> programs [i ];
7907
7906
if (prog_is_subprog (obj , prog ))
@@ -7927,6 +7926,21 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
7927
7926
return 0 ;
7928
7927
}
7929
7928
7929
+ static int bpf_object_prepare_progs (struct bpf_object * obj )
7930
+ {
7931
+ struct bpf_program * prog ;
7932
+ size_t i ;
7933
+ int err ;
7934
+
7935
+ for (i = 0 ; i < obj -> nr_programs ; i ++ ) {
7936
+ prog = & obj -> programs [i ];
7937
+ err = bpf_object__sanitize_prog (obj , prog );
7938
+ if (err )
7939
+ return err ;
7940
+ }
7941
+ return 0 ;
7942
+ }
7943
+
7930
7944
static const struct bpf_sec_def * find_sec_def (const char * sec_name );
7931
7945
7932
7946
static int bpf_object_init_progs (struct bpf_object * obj , const struct bpf_object_open_opts * opts )
@@ -8543,14 +8557,77 @@ static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8543
8557
return 0 ;
8544
8558
}
8545
8559
8560
+ static void bpf_object_unpin (struct bpf_object * obj )
8561
+ {
8562
+ int i ;
8563
+
8564
+ /* unpin any maps that were auto-pinned during load */
8565
+ for (i = 0 ; i < obj -> nr_maps ; i ++ )
8566
+ if (obj -> maps [i ].pinned && !obj -> maps [i ].reused )
8567
+ bpf_map__unpin (& obj -> maps [i ], NULL );
8568
+ }
8569
+
8570
+ static void bpf_object_post_load_cleanup (struct bpf_object * obj )
8571
+ {
8572
+ int i ;
8573
+
8574
+ /* clean up fd_array */
8575
+ zfree (& obj -> fd_array );
8576
+
8577
+ /* clean up module BTFs */
8578
+ for (i = 0 ; i < obj -> btf_module_cnt ; i ++ ) {
8579
+ close (obj -> btf_modules [i ].fd );
8580
+ btf__free (obj -> btf_modules [i ].btf );
8581
+ free (obj -> btf_modules [i ].name );
8582
+ }
8583
+ obj -> btf_module_cnt = 0 ;
8584
+ zfree (& obj -> btf_modules );
8585
+
8586
+ /* clean up vmlinux BTF */
8587
+ btf__free (obj -> btf_vmlinux );
8588
+ obj -> btf_vmlinux = NULL ;
8589
+ }
8590
+
8591
+ static int bpf_object_prepare (struct bpf_object * obj , const char * target_btf_path )
8592
+ {
8593
+ int err ;
8594
+
8595
+ if (obj -> state >= OBJ_PREPARED ) {
8596
+ pr_warn ("object '%s': prepare loading can't be attempted twice\n" , obj -> name );
8597
+ return - EINVAL ;
8598
+ }
8599
+
8600
+ err = bpf_object_prepare_token (obj );
8601
+ err = err ? : bpf_object__probe_loading (obj );
8602
+ err = err ? : bpf_object__load_vmlinux_btf (obj , false);
8603
+ err = err ? : bpf_object__resolve_externs (obj , obj -> kconfig );
8604
+ err = err ? : bpf_object__sanitize_maps (obj );
8605
+ err = err ? : bpf_object__init_kern_struct_ops_maps (obj );
8606
+ err = err ? : bpf_object_adjust_struct_ops_autoload (obj );
8607
+ err = err ? : bpf_object__relocate (obj , obj -> btf_custom_path ? : target_btf_path );
8608
+ err = err ? : bpf_object__sanitize_and_load_btf (obj );
8609
+ err = err ? : bpf_object__create_maps (obj );
8610
+ err = err ? : bpf_object_prepare_progs (obj );
8611
+
8612
+ if (err ) {
8613
+ bpf_object_unpin (obj );
8614
+ bpf_object_unload (obj );
8615
+ obj -> state = OBJ_LOADED ;
8616
+ return err ;
8617
+ }
8618
+
8619
+ obj -> state = OBJ_PREPARED ;
8620
+ return 0 ;
8621
+ }
8622
+
8546
8623
static int bpf_object_load (struct bpf_object * obj , int extra_log_level , const char * target_btf_path )
8547
8624
{
8548
- int err , i ;
8625
+ int err ;
8549
8626
8550
8627
if (!obj )
8551
8628
return libbpf_err (- EINVAL );
8552
8629
8553
- if (obj -> loaded ) {
8630
+ if (obj -> state >= OBJ_LOADED ) {
8554
8631
pr_warn ("object '%s': load can't be attempted twice\n" , obj -> name );
8555
8632
return libbpf_err (- EINVAL );
8556
8633
}
@@ -8565,17 +8642,12 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
8565
8642
return libbpf_err (- LIBBPF_ERRNO__ENDIAN );
8566
8643
}
8567
8644
8568
- err = bpf_object_prepare_token (obj );
8569
- err = err ? : bpf_object__probe_loading (obj );
8570
- err = err ? : bpf_object__load_vmlinux_btf (obj , false);
8571
- err = err ? : bpf_object__resolve_externs (obj , obj -> kconfig );
8572
- err = err ? : bpf_object__sanitize_maps (obj );
8573
- err = err ? : bpf_object__init_kern_struct_ops_maps (obj );
8574
- err = err ? : bpf_object_adjust_struct_ops_autoload (obj );
8575
- err = err ? : bpf_object__relocate (obj , obj -> btf_custom_path ? : target_btf_path );
8576
- err = err ? : bpf_object__sanitize_and_load_btf (obj );
8577
- err = err ? : bpf_object__create_maps (obj );
8578
- err = err ? : bpf_object__load_progs (obj , extra_log_level );
8645
+ if (obj -> state < OBJ_PREPARED ) {
8646
+ err = bpf_object_prepare (obj , target_btf_path );
8647
+ if (err )
8648
+ return libbpf_err (err );
8649
+ }
8650
+ err = bpf_object__load_progs (obj , extra_log_level );
8579
8651
err = err ? : bpf_object_init_prog_arrays (obj );
8580
8652
err = err ? : bpf_object_prepare_struct_ops (obj );
8581
8653
@@ -8587,36 +8659,22 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
8587
8659
err = bpf_gen__finish (obj -> gen_loader , obj -> nr_programs , obj -> nr_maps );
8588
8660
}
8589
8661
8590
- /* clean up fd_array */
8591
- zfree ( & obj -> fd_array );
8662
+ bpf_object_post_load_cleanup ( obj );
8663
+ obj -> state = OBJ_LOADED ; /* doesn't matter if successfully or not */
8592
8664
8593
- /* clean up module BTFs */
8594
- for ( i = 0 ; i < obj -> btf_module_cnt ; i ++ ) {
8595
- close (obj -> btf_modules [ i ]. fd );
8596
- btf__free ( obj -> btf_modules [ i ]. btf );
8597
- free ( obj -> btf_modules [ i ]. name );
8665
+ if ( err ) {
8666
+ bpf_object_unpin ( obj );
8667
+ bpf_object_unload (obj );
8668
+ pr_warn ( "failed to load object '%s'\n" , obj -> path );
8669
+ return libbpf_err ( err );
8598
8670
}
8599
- free (obj -> btf_modules );
8600
-
8601
- /* clean up vmlinux BTF */
8602
- btf__free (obj -> btf_vmlinux );
8603
- obj -> btf_vmlinux = NULL ;
8604
-
8605
- obj -> loaded = true; /* doesn't matter if successfully or not */
8606
-
8607
- if (err )
8608
- goto out ;
8609
8671
8610
8672
return 0 ;
8611
- out :
8612
- /* unpin any maps that were auto-pinned during load */
8613
- for (i = 0 ; i < obj -> nr_maps ; i ++ )
8614
- if (obj -> maps [i ].pinned && !obj -> maps [i ].reused )
8615
- bpf_map__unpin (& obj -> maps [i ], NULL );
8673
+ }
8616
8674
8617
- bpf_object_unload ( obj );
8618
- pr_warn ( "failed to load object '%s'\n" , obj -> path );
8619
- return libbpf_err (err );
8675
+ int bpf_object__prepare ( struct bpf_object * obj )
8676
+ {
8677
+ return libbpf_err (bpf_object_prepare ( obj , NULL ) );
8620
8678
}
8621
8679
8622
8680
int bpf_object__load (struct bpf_object * obj )
@@ -8866,7 +8924,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8866
8924
if (!obj )
8867
8925
return libbpf_err (- ENOENT );
8868
8926
8869
- if (! obj -> loaded ) {
8927
+ if (obj -> state < OBJ_PREPARED ) {
8870
8928
pr_warn ("object not yet loaded; load it first\n" );
8871
8929
return libbpf_err (- ENOENT );
8872
8930
}
@@ -8945,7 +9003,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8945
9003
if (!obj )
8946
9004
return libbpf_err (- ENOENT );
8947
9005
8948
- if (! obj -> loaded ) {
9006
+ if (obj -> state < OBJ_LOADED ) {
8949
9007
pr_warn ("object not yet loaded; load it first\n" );
8950
9008
return libbpf_err (- ENOENT );
8951
9009
}
@@ -9064,6 +9122,13 @@ void bpf_object__close(struct bpf_object *obj)
9064
9122
if (IS_ERR_OR_NULL (obj ))
9065
9123
return ;
9066
9124
9125
+ /*
9126
+ * if user called bpf_object__prepare() without ever getting to
9127
+ * bpf_object__load(), we need to clean up stuff that is normally
9128
+ * cleaned up at the end of loading step
9129
+ */
9130
+ bpf_object_post_load_cleanup (obj );
9131
+
9067
9132
usdt_manager_free (obj -> usdt_man );
9068
9133
obj -> usdt_man = NULL ;
9069
9134
@@ -9132,7 +9197,7 @@ int bpf_object__btf_fd(const struct bpf_object *obj)
9132
9197
9133
9198
int bpf_object__set_kversion (struct bpf_object * obj , __u32 kern_version )
9134
9199
{
9135
- if (obj -> loaded )
9200
+ if (obj -> state >= OBJ_LOADED )
9136
9201
return libbpf_err (- EINVAL );
9137
9202
9138
9203
obj -> kern_version = kern_version ;
@@ -9229,7 +9294,7 @@ bool bpf_program__autoload(const struct bpf_program *prog)
9229
9294
9230
9295
int bpf_program__set_autoload (struct bpf_program * prog , bool autoload )
9231
9296
{
9232
- if (prog -> obj -> loaded )
9297
+ if (prog -> obj -> state >= OBJ_LOADED )
9233
9298
return libbpf_err (- EINVAL );
9234
9299
9235
9300
prog -> autoload = autoload ;
@@ -9261,7 +9326,7 @@ int bpf_program__set_insns(struct bpf_program *prog,
9261
9326
{
9262
9327
struct bpf_insn * insns ;
9263
9328
9264
- if (prog -> obj -> loaded )
9329
+ if (prog -> obj -> state >= OBJ_LOADED )
9265
9330
return libbpf_err (- EBUSY );
9266
9331
9267
9332
insns = libbpf_reallocarray (prog -> insns , new_insn_cnt , sizeof (* insns ));
@@ -9304,7 +9369,7 @@ static int last_custom_sec_def_handler_id;
9304
9369
9305
9370
int bpf_program__set_type (struct bpf_program * prog , enum bpf_prog_type type )
9306
9371
{
9307
- if (prog -> obj -> loaded )
9372
+ if (prog -> obj -> state >= OBJ_LOADED )
9308
9373
return libbpf_err (- EBUSY );
9309
9374
9310
9375
/* if type is not changed, do nothing */
@@ -9335,7 +9400,7 @@ enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program
9335
9400
int bpf_program__set_expected_attach_type (struct bpf_program * prog ,
9336
9401
enum bpf_attach_type type )
9337
9402
{
9338
- if (prog -> obj -> loaded )
9403
+ if (prog -> obj -> state >= OBJ_LOADED )
9339
9404
return libbpf_err (- EBUSY );
9340
9405
9341
9406
prog -> expected_attach_type = type ;
@@ -9349,7 +9414,7 @@ __u32 bpf_program__flags(const struct bpf_program *prog)
9349
9414
9350
9415
int bpf_program__set_flags (struct bpf_program * prog , __u32 flags )
9351
9416
{
9352
- if (prog -> obj -> loaded )
9417
+ if (prog -> obj -> state >= OBJ_LOADED )
9353
9418
return libbpf_err (- EBUSY );
9354
9419
9355
9420
prog -> prog_flags = flags ;
@@ -9363,7 +9428,7 @@ __u32 bpf_program__log_level(const struct bpf_program *prog)
9363
9428
9364
9429
int bpf_program__set_log_level (struct bpf_program * prog , __u32 log_level )
9365
9430
{
9366
- if (prog -> obj -> loaded )
9431
+ if (prog -> obj -> state >= OBJ_LOADED )
9367
9432
return libbpf_err (- EBUSY );
9368
9433
9369
9434
prog -> log_level = log_level ;
@@ -9382,7 +9447,7 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log
9382
9447
return libbpf_err (- EINVAL );
9383
9448
if (prog -> log_size > UINT_MAX )
9384
9449
return libbpf_err (- EINVAL );
9385
- if (prog -> obj -> loaded )
9450
+ if (prog -> obj -> state >= OBJ_LOADED )
9386
9451
return libbpf_err (- EBUSY );
9387
9452
9388
9453
prog -> log_buf = log_buf ;
@@ -10299,7 +10364,7 @@ static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
10299
10364
10300
10365
int bpf_map__set_value_size (struct bpf_map * map , __u32 size )
10301
10366
{
10302
- if (map -> obj -> loaded || map -> reused )
10367
+ if (map_is_created ( map ) )
10303
10368
return libbpf_err (- EBUSY );
10304
10369
10305
10370
if (map -> mmaped ) {
@@ -10345,7 +10410,7 @@ int bpf_map__set_initial_value(struct bpf_map *map,
10345
10410
{
10346
10411
size_t actual_sz ;
10347
10412
10348
- if (map -> obj -> loaded || map -> reused )
10413
+ if (map_is_created ( map ) )
10349
10414
return libbpf_err (- EBUSY );
10350
10415
10351
10416
if (!map -> mmaped || map -> libbpf_type == LIBBPF_MAP_KCONFIG )
@@ -13666,7 +13731,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
13666
13731
if (!prog || attach_prog_fd < 0 )
13667
13732
return libbpf_err (- EINVAL );
13668
13733
13669
- if (prog -> obj -> loaded )
13734
+ if (prog -> obj -> state >= OBJ_LOADED )
13670
13735
return libbpf_err (- EINVAL );
13671
13736
13672
13737
if (attach_prog_fd && !attach_func_name ) {
0 commit comments