@@ -7901,13 +7901,6 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
7901
7901
size_t i ;
7902
7902
int err ;
7903
7903
7904
- for (i = 0 ; i < obj -> nr_programs ; i ++ ) {
7905
- prog = & obj -> programs [i ];
7906
- err = bpf_object__sanitize_prog (obj , prog );
7907
- if (err )
7908
- return err ;
7909
- }
7910
-
7911
7904
for (i = 0 ; i < obj -> nr_programs ; i ++ ) {
7912
7905
prog = & obj -> programs [i ];
7913
7906
if (prog_is_subprog (obj , prog ))
@@ -7933,6 +7926,21 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
7933
7926
return 0 ;
7934
7927
}
7935
7928
7929
+ static int bpf_object_prepare_progs (struct bpf_object * obj )
7930
+ {
7931
+ struct bpf_program * prog ;
7932
+ size_t i ;
7933
+ int err ;
7934
+
7935
+ for (i = 0 ; i < obj -> nr_programs ; i ++ ) {
7936
+ prog = & obj -> programs [i ];
7937
+ err = bpf_object__sanitize_prog (obj , prog );
7938
+ if (err )
7939
+ return err ;
7940
+ }
7941
+ return 0 ;
7942
+ }
7943
+
7936
7944
static const struct bpf_sec_def * find_sec_def (const char * sec_name );
7937
7945
7938
7946
static int bpf_object_init_progs (struct bpf_object * obj , const struct bpf_object_open_opts * opts )
@@ -8549,9 +8557,72 @@ static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8549
8557
return 0 ;
8550
8558
}
8551
8559
8560
+ static void bpf_object_unpin (struct bpf_object * obj )
8561
+ {
8562
+ int i ;
8563
+
8564
+ /* unpin any maps that were auto-pinned during load */
8565
+ for (i = 0 ; i < obj -> nr_maps ; i ++ )
8566
+ if (obj -> maps [i ].pinned && !obj -> maps [i ].reused )
8567
+ bpf_map__unpin (& obj -> maps [i ], NULL );
8568
+ }
8569
+
8570
+ static void bpf_object_post_load_cleanup (struct bpf_object * obj )
8571
+ {
8572
+ int i ;
8573
+
8574
+ /* clean up fd_array */
8575
+ zfree (& obj -> fd_array );
8576
+
8577
+ /* clean up module BTFs */
8578
+ for (i = 0 ; i < obj -> btf_module_cnt ; i ++ ) {
8579
+ close (obj -> btf_modules [i ].fd );
8580
+ btf__free (obj -> btf_modules [i ].btf );
8581
+ free (obj -> btf_modules [i ].name );
8582
+ }
8583
+ obj -> btf_module_cnt = 0 ;
8584
+ zfree (& obj -> btf_modules );
8585
+
8586
+ /* clean up vmlinux BTF */
8587
+ btf__free (obj -> btf_vmlinux );
8588
+ obj -> btf_vmlinux = NULL ;
8589
+ }
8590
+
8591
+ static int bpf_object_prepare (struct bpf_object * obj , const char * target_btf_path )
8592
+ {
8593
+ int err ;
8594
+
8595
+ if (obj -> state >= OBJ_PREPARED ) {
8596
+ pr_warn ("object '%s': prepare loading can't be attempted twice\n" , obj -> name );
8597
+ return - EINVAL ;
8598
+ }
8599
+
8600
+ err = bpf_object_prepare_token (obj );
8601
+ err = err ? : bpf_object__probe_loading (obj );
8602
+ err = err ? : bpf_object__load_vmlinux_btf (obj , false);
8603
+ err = err ? : bpf_object__resolve_externs (obj , obj -> kconfig );
8604
+ err = err ? : bpf_object__sanitize_maps (obj );
8605
+ err = err ? : bpf_object__init_kern_struct_ops_maps (obj );
8606
+ err = err ? : bpf_object_adjust_struct_ops_autoload (obj );
8607
+ err = err ? : bpf_object__relocate (obj , obj -> btf_custom_path ? : target_btf_path );
8608
+ err = err ? : bpf_object__sanitize_and_load_btf (obj );
8609
+ err = err ? : bpf_object__create_maps (obj );
8610
+ err = err ? : bpf_object_prepare_progs (obj );
8611
+
8612
+ if (err ) {
8613
+ bpf_object_unpin (obj );
8614
+ bpf_object_unload (obj );
8615
+ obj -> state = OBJ_LOADED ;
8616
+ return err ;
8617
+ }
8618
+
8619
+ obj -> state = OBJ_PREPARED ;
8620
+ return 0 ;
8621
+ }
8622
+
8552
8623
static int bpf_object_load (struct bpf_object * obj , int extra_log_level , const char * target_btf_path )
8553
8624
{
8554
- int err , i ;
8625
+ int err ;
8555
8626
8556
8627
if (!obj )
8557
8628
return libbpf_err (- EINVAL );
@@ -8571,17 +8642,12 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
8571
8642
return libbpf_err (- LIBBPF_ERRNO__ENDIAN );
8572
8643
}
8573
8644
8574
- err = bpf_object_prepare_token (obj );
8575
- err = err ? : bpf_object__probe_loading (obj );
8576
- err = err ? : bpf_object__load_vmlinux_btf (obj , false);
8577
- err = err ? : bpf_object__resolve_externs (obj , obj -> kconfig );
8578
- err = err ? : bpf_object__sanitize_maps (obj );
8579
- err = err ? : bpf_object__init_kern_struct_ops_maps (obj );
8580
- err = err ? : bpf_object_adjust_struct_ops_autoload (obj );
8581
- err = err ? : bpf_object__relocate (obj , obj -> btf_custom_path ? : target_btf_path );
8582
- err = err ? : bpf_object__sanitize_and_load_btf (obj );
8583
- err = err ? : bpf_object__create_maps (obj );
8584
- err = err ? : bpf_object__load_progs (obj , extra_log_level );
8645
+ if (obj -> state < OBJ_PREPARED ) {
8646
+ err = bpf_object_prepare (obj , target_btf_path );
8647
+ if (err )
8648
+ return libbpf_err (err );
8649
+ }
8650
+ err = bpf_object__load_progs (obj , extra_log_level );
8585
8651
err = err ? : bpf_object_init_prog_arrays (obj );
8586
8652
err = err ? : bpf_object_prepare_struct_ops (obj );
8587
8653
@@ -8593,35 +8659,22 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
8593
8659
err = bpf_gen__finish (obj -> gen_loader , obj -> nr_programs , obj -> nr_maps );
8594
8660
}
8595
8661
8596
- /* clean up fd_array */
8597
- zfree ( & obj -> fd_array );
8662
+ bpf_object_post_load_cleanup ( obj );
8663
+ obj -> state = OBJ_LOADED ; /* doesn't matter if successfully or not */
8598
8664
8599
- /* clean up module BTFs */
8600
- for ( i = 0 ; i < obj -> btf_module_cnt ; i ++ ) {
8601
- close (obj -> btf_modules [ i ]. fd );
8602
- btf__free ( obj -> btf_modules [ i ]. btf );
8603
- free ( obj -> btf_modules [ i ]. name );
8665
+ if ( err ) {
8666
+ bpf_object_unpin ( obj );
8667
+ bpf_object_unload (obj );
8668
+ pr_warn ( "failed to load object '%s'\n" , obj -> path );
8669
+ return libbpf_err ( err );
8604
8670
}
8605
- free (obj -> btf_modules );
8606
-
8607
- /* clean up vmlinux BTF */
8608
- btf__free (obj -> btf_vmlinux );
8609
- obj -> btf_vmlinux = NULL ;
8610
-
8611
- obj -> state = OBJ_LOADED ; /* doesn't matter if successfully or not */
8612
- if (err )
8613
- goto out ;
8614
8671
8615
8672
return 0 ;
8616
- out :
8617
- /* unpin any maps that were auto-pinned during load */
8618
- for (i = 0 ; i < obj -> nr_maps ; i ++ )
8619
- if (obj -> maps [i ].pinned && !obj -> maps [i ].reused )
8620
- bpf_map__unpin (& obj -> maps [i ], NULL );
8673
+ }
8621
8674
8622
- bpf_object_unload ( obj );
8623
- pr_warn ( "failed to load object '%s'\n" , obj -> path );
8624
- return libbpf_err (err );
8675
+ int bpf_object__prepare ( struct bpf_object * obj )
8676
+ {
8677
+ return libbpf_err (bpf_object_prepare ( obj , NULL ) );
8625
8678
}
8626
8679
8627
8680
int bpf_object__load (struct bpf_object * obj )
@@ -9069,6 +9122,13 @@ void bpf_object__close(struct bpf_object *obj)
9069
9122
if (IS_ERR_OR_NULL (obj ))
9070
9123
return ;
9071
9124
9125
+ /*
9126
+ * if user called bpf_object__prepare() without ever getting to
9127
+ * bpf_object__load(), we need to clean up stuff that is normally
9128
+ * cleaned up at the end of loading step
9129
+ */
9130
+ bpf_object_post_load_cleanup (obj );
9131
+
9072
9132
usdt_manager_free (obj -> usdt_man );
9073
9133
obj -> usdt_man = NULL ;
9074
9134
0 commit comments