Skip to content

Commit 2941e21

Browse files
anakryikoAlexei Starovoitov
authored andcommitted
Merge branch 'introduce-bpf_object__prepare'
Mykyta Yatsenko says: ==================== Introduce bpf_object__prepare From: Mykyta Yatsenko <[email protected]> We are introducing a new function in the libbpf API, bpf_object__prepare, which provides more granular control over the process of loading a bpf_object. bpf_object__prepare performs ELF processing, relocations, prepares final state of BPF program instructions (accessible with bpf_program__insns()), creates and potentially pins maps, and stops short of loading BPF programs. There are couple of anticipated usecases for this API: * Use BPF token for freplace programs that might need to lookup BTF of other programs (BPF token creation can't be moved to open step, as open step is "no privilege assumption" step so that tools like bpftool can generate skeleton, discover the structure of BPF object, etc). * Stopping at prepare gives users finalized BPF program instructions (with subprogs appended, everything relocated and finalized, etc). And that property can be taken advantage of by veristat (and similar tools) that might want to process one program at a time, but would like to avoid relatively slow ELF parsing and processing; and even BPF selftests itself (RUN_TESTS part of it at least) would benefit from this by eliminating waste of re-processing ELF many times. ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents 19856a5 + 6419d08 commit 2941e21

File tree

5 files changed

+273
-67
lines changed

5 files changed

+273
-67
lines changed

tools/lib/bpf/libbpf.c

Lines changed: 132 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -670,11 +670,18 @@ struct elf_state {
670670

671671
struct usdt_manager;
672672

673+
enum bpf_object_state {
674+
OBJ_OPEN,
675+
OBJ_PREPARED,
676+
OBJ_LOADED,
677+
};
678+
673679
struct bpf_object {
674680
char name[BPF_OBJ_NAME_LEN];
675681
char license[64];
676682
__u32 kern_version;
677683

684+
enum bpf_object_state state;
678685
struct bpf_program *programs;
679686
size_t nr_programs;
680687
struct bpf_map *maps;
@@ -686,7 +693,6 @@ struct bpf_object {
686693
int nr_extern;
687694
int kconfig_map_idx;
688695

689-
bool loaded;
690696
bool has_subcalls;
691697
bool has_rodata;
692698

@@ -1511,7 +1517,7 @@ static struct bpf_object *bpf_object__new(const char *path,
15111517
obj->kconfig_map_idx = -1;
15121518

15131519
obj->kern_version = get_kernel_version();
1514-
obj->loaded = false;
1520+
obj->state = OBJ_OPEN;
15151521

15161522
return obj;
15171523
}
@@ -4845,14 +4851,19 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
48454851
return 0;
48464852
}
48474853

4854+
static bool map_is_created(const struct bpf_map *map)
4855+
{
4856+
return map->obj->state >= OBJ_PREPARED || map->reused;
4857+
}
4858+
48484859
bool bpf_map__autocreate(const struct bpf_map *map)
48494860
{
48504861
return map->autocreate;
48514862
}
48524863

48534864
int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
48544865
{
4855-
if (map->obj->loaded)
4866+
if (map_is_created(map))
48564867
return libbpf_err(-EBUSY);
48574868

48584869
map->autocreate = autocreate;
@@ -4946,7 +4957,7 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
49464957

49474958
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
49484959
{
4949-
if (map->obj->loaded)
4960+
if (map_is_created(map))
49504961
return libbpf_err(-EBUSY);
49514962

49524963
map->def.max_entries = max_entries;
@@ -5191,11 +5202,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
51915202

51925203
static void bpf_map__destroy(struct bpf_map *map);
51935204

5194-
static bool map_is_created(const struct bpf_map *map)
5195-
{
5196-
return map->obj->loaded || map->reused;
5197-
}
5198-
51995205
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
52005206
{
52015207
LIBBPF_OPTS(bpf_map_create_opts, create_attr);
@@ -7895,13 +7901,6 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
78957901
size_t i;
78967902
int err;
78977903

7898-
for (i = 0; i < obj->nr_programs; i++) {
7899-
prog = &obj->programs[i];
7900-
err = bpf_object__sanitize_prog(obj, prog);
7901-
if (err)
7902-
return err;
7903-
}
7904-
79057904
for (i = 0; i < obj->nr_programs; i++) {
79067905
prog = &obj->programs[i];
79077906
if (prog_is_subprog(obj, prog))
@@ -7927,6 +7926,21 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
79277926
return 0;
79287927
}
79297928

7929+
static int bpf_object_prepare_progs(struct bpf_object *obj)
7930+
{
7931+
struct bpf_program *prog;
7932+
size_t i;
7933+
int err;
7934+
7935+
for (i = 0; i < obj->nr_programs; i++) {
7936+
prog = &obj->programs[i];
7937+
err = bpf_object__sanitize_prog(obj, prog);
7938+
if (err)
7939+
return err;
7940+
}
7941+
return 0;
7942+
}
7943+
79307944
static const struct bpf_sec_def *find_sec_def(const char *sec_name);
79317945

79327946
static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
@@ -8543,14 +8557,77 @@ static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
85438557
return 0;
85448558
}
85458559

8560+
static void bpf_object_unpin(struct bpf_object *obj)
8561+
{
8562+
int i;
8563+
8564+
/* unpin any maps that were auto-pinned during load */
8565+
for (i = 0; i < obj->nr_maps; i++)
8566+
if (obj->maps[i].pinned && !obj->maps[i].reused)
8567+
bpf_map__unpin(&obj->maps[i], NULL);
8568+
}
8569+
8570+
static void bpf_object_post_load_cleanup(struct bpf_object *obj)
8571+
{
8572+
int i;
8573+
8574+
/* clean up fd_array */
8575+
zfree(&obj->fd_array);
8576+
8577+
/* clean up module BTFs */
8578+
for (i = 0; i < obj->btf_module_cnt; i++) {
8579+
close(obj->btf_modules[i].fd);
8580+
btf__free(obj->btf_modules[i].btf);
8581+
free(obj->btf_modules[i].name);
8582+
}
8583+
obj->btf_module_cnt = 0;
8584+
zfree(&obj->btf_modules);
8585+
8586+
/* clean up vmlinux BTF */
8587+
btf__free(obj->btf_vmlinux);
8588+
obj->btf_vmlinux = NULL;
8589+
}
8590+
8591+
static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path)
8592+
{
8593+
int err;
8594+
8595+
if (obj->state >= OBJ_PREPARED) {
8596+
pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name);
8597+
return -EINVAL;
8598+
}
8599+
8600+
err = bpf_object_prepare_token(obj);
8601+
err = err ? : bpf_object__probe_loading(obj);
8602+
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8603+
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8604+
err = err ? : bpf_object__sanitize_maps(obj);
8605+
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8606+
err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
8607+
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8608+
err = err ? : bpf_object__sanitize_and_load_btf(obj);
8609+
err = err ? : bpf_object__create_maps(obj);
8610+
err = err ? : bpf_object_prepare_progs(obj);
8611+
8612+
if (err) {
8613+
bpf_object_unpin(obj);
8614+
bpf_object_unload(obj);
8615+
obj->state = OBJ_LOADED;
8616+
return err;
8617+
}
8618+
8619+
obj->state = OBJ_PREPARED;
8620+
return 0;
8621+
}
8622+
85468623
static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
85478624
{
8548-
int err, i;
8625+
int err;
85498626

85508627
if (!obj)
85518628
return libbpf_err(-EINVAL);
85528629

8553-
if (obj->loaded) {
8630+
if (obj->state >= OBJ_LOADED) {
85548631
pr_warn("object '%s': load can't be attempted twice\n", obj->name);
85558632
return libbpf_err(-EINVAL);
85568633
}
@@ -8565,17 +8642,12 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
85658642
return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
85668643
}
85678644

8568-
err = bpf_object_prepare_token(obj);
8569-
err = err ? : bpf_object__probe_loading(obj);
8570-
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8571-
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8572-
err = err ? : bpf_object__sanitize_maps(obj);
8573-
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8574-
err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
8575-
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8576-
err = err ? : bpf_object__sanitize_and_load_btf(obj);
8577-
err = err ? : bpf_object__create_maps(obj);
8578-
err = err ? : bpf_object__load_progs(obj, extra_log_level);
8645+
if (obj->state < OBJ_PREPARED) {
8646+
err = bpf_object_prepare(obj, target_btf_path);
8647+
if (err)
8648+
return libbpf_err(err);
8649+
}
8650+
err = bpf_object__load_progs(obj, extra_log_level);
85798651
err = err ? : bpf_object_init_prog_arrays(obj);
85808652
err = err ? : bpf_object_prepare_struct_ops(obj);
85818653

@@ -8587,36 +8659,22 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
85878659
err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
85888660
}
85898661

8590-
/* clean up fd_array */
8591-
zfree(&obj->fd_array);
8662+
bpf_object_post_load_cleanup(obj);
8663+
obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */
85928664

8593-
/* clean up module BTFs */
8594-
for (i = 0; i < obj->btf_module_cnt; i++) {
8595-
close(obj->btf_modules[i].fd);
8596-
btf__free(obj->btf_modules[i].btf);
8597-
free(obj->btf_modules[i].name);
8665+
if (err) {
8666+
bpf_object_unpin(obj);
8667+
bpf_object_unload(obj);
8668+
pr_warn("failed to load object '%s'\n", obj->path);
8669+
return libbpf_err(err);
85988670
}
8599-
free(obj->btf_modules);
8600-
8601-
/* clean up vmlinux BTF */
8602-
btf__free(obj->btf_vmlinux);
8603-
obj->btf_vmlinux = NULL;
8604-
8605-
obj->loaded = true; /* doesn't matter if successfully or not */
8606-
8607-
if (err)
8608-
goto out;
86098671

86108672
return 0;
8611-
out:
8612-
/* unpin any maps that were auto-pinned during load */
8613-
for (i = 0; i < obj->nr_maps; i++)
8614-
if (obj->maps[i].pinned && !obj->maps[i].reused)
8615-
bpf_map__unpin(&obj->maps[i], NULL);
8673+
}
86168674

8617-
bpf_object_unload(obj);
8618-
pr_warn("failed to load object '%s'\n", obj->path);
8619-
return libbpf_err(err);
8675+
int bpf_object__prepare(struct bpf_object *obj)
8676+
{
8677+
return libbpf_err(bpf_object_prepare(obj, NULL));
86208678
}
86218679

86228680
int bpf_object__load(struct bpf_object *obj)
@@ -8866,7 +8924,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
88668924
if (!obj)
88678925
return libbpf_err(-ENOENT);
88688926

8869-
if (!obj->loaded) {
8927+
if (obj->state < OBJ_PREPARED) {
88708928
pr_warn("object not yet loaded; load it first\n");
88718929
return libbpf_err(-ENOENT);
88728930
}
@@ -8945,7 +9003,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
89459003
if (!obj)
89469004
return libbpf_err(-ENOENT);
89479005

8948-
if (!obj->loaded) {
9006+
if (obj->state < OBJ_LOADED) {
89499007
pr_warn("object not yet loaded; load it first\n");
89509008
return libbpf_err(-ENOENT);
89519009
}
@@ -9064,6 +9122,13 @@ void bpf_object__close(struct bpf_object *obj)
90649122
if (IS_ERR_OR_NULL(obj))
90659123
return;
90669124

9125+
/*
9126+
* if user called bpf_object__prepare() without ever getting to
9127+
* bpf_object__load(), we need to clean up stuff that is normally
9128+
* cleaned up at the end of loading step
9129+
*/
9130+
bpf_object_post_load_cleanup(obj);
9131+
90679132
usdt_manager_free(obj->usdt_man);
90689133
obj->usdt_man = NULL;
90699134

@@ -9132,7 +9197,7 @@ int bpf_object__btf_fd(const struct bpf_object *obj)
91329197

91339198
int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
91349199
{
9135-
if (obj->loaded)
9200+
if (obj->state >= OBJ_LOADED)
91369201
return libbpf_err(-EINVAL);
91379202

91389203
obj->kern_version = kern_version;
@@ -9229,7 +9294,7 @@ bool bpf_program__autoload(const struct bpf_program *prog)
92299294

92309295
int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
92319296
{
9232-
if (prog->obj->loaded)
9297+
if (prog->obj->state >= OBJ_LOADED)
92339298
return libbpf_err(-EINVAL);
92349299

92359300
prog->autoload = autoload;
@@ -9261,7 +9326,7 @@ int bpf_program__set_insns(struct bpf_program *prog,
92619326
{
92629327
struct bpf_insn *insns;
92639328

9264-
if (prog->obj->loaded)
9329+
if (prog->obj->state >= OBJ_LOADED)
92659330
return libbpf_err(-EBUSY);
92669331

92679332
insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
@@ -9304,7 +9369,7 @@ static int last_custom_sec_def_handler_id;
93049369

93059370
int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
93069371
{
9307-
if (prog->obj->loaded)
9372+
if (prog->obj->state >= OBJ_LOADED)
93089373
return libbpf_err(-EBUSY);
93099374

93109375
/* if type is not changed, do nothing */
@@ -9335,7 +9400,7 @@ enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program
93359400
int bpf_program__set_expected_attach_type(struct bpf_program *prog,
93369401
enum bpf_attach_type type)
93379402
{
9338-
if (prog->obj->loaded)
9403+
if (prog->obj->state >= OBJ_LOADED)
93399404
return libbpf_err(-EBUSY);
93409405

93419406
prog->expected_attach_type = type;
@@ -9349,7 +9414,7 @@ __u32 bpf_program__flags(const struct bpf_program *prog)
93499414

93509415
int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
93519416
{
9352-
if (prog->obj->loaded)
9417+
if (prog->obj->state >= OBJ_LOADED)
93539418
return libbpf_err(-EBUSY);
93549419

93559420
prog->prog_flags = flags;
@@ -9363,7 +9428,7 @@ __u32 bpf_program__log_level(const struct bpf_program *prog)
93639428

93649429
int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
93659430
{
9366-
if (prog->obj->loaded)
9431+
if (prog->obj->state >= OBJ_LOADED)
93679432
return libbpf_err(-EBUSY);
93689433

93699434
prog->log_level = log_level;
@@ -9382,7 +9447,7 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log
93829447
return libbpf_err(-EINVAL);
93839448
if (prog->log_size > UINT_MAX)
93849449
return libbpf_err(-EINVAL);
9385-
if (prog->obj->loaded)
9450+
if (prog->obj->state >= OBJ_LOADED)
93869451
return libbpf_err(-EBUSY);
93879452

93889453
prog->log_buf = log_buf;
@@ -10299,7 +10364,7 @@ static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
1029910364

1030010365
int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
1030110366
{
10302-
if (map->obj->loaded || map->reused)
10367+
if (map_is_created(map))
1030310368
return libbpf_err(-EBUSY);
1030410369

1030510370
if (map->mmaped) {
@@ -10345,7 +10410,7 @@ int bpf_map__set_initial_value(struct bpf_map *map,
1034510410
{
1034610411
size_t actual_sz;
1034710412

10348-
if (map->obj->loaded || map->reused)
10413+
if (map_is_created(map))
1034910414
return libbpf_err(-EBUSY);
1035010415

1035110416
if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
@@ -13666,7 +13731,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
1366613731
if (!prog || attach_prog_fd < 0)
1366713732
return libbpf_err(-EINVAL);
1366813733

13669-
if (prog->obj->loaded)
13734+
if (prog->obj->state >= OBJ_LOADED)
1367013735
return libbpf_err(-EINVAL);
1367113736

1367213737
if (attach_prog_fd && !attach_func_name) {

0 commit comments

Comments
 (0)