Skip to content

Commit f30232b

Browse files
James-A-Clarkacmel
authored andcommitted
perf symbols: Update kcore map before merging in remaining symbols
When loading kcore, the main vmlinux map is updated in the same loop that merges the remaining maps. If a map that overlaps is merged in before kcore, the list can become unsortable when the main map addresses are updated. This will later trigger the check_invariants() assert: $ perf record $ perf report util/maps.c:96: check_invariants: Assertion `map__end(prev) <= map__start(map) || map__start(prev) == map__start(map)' failed. Aborted Fix it by moving the main map update prior to the loop so that maps__merge_in() can split it if necessary. Fixes: 659ad34 ("perf maps: Switch from rbtree to lazily sorted array for addresses") Signed-off-by: James Clark <[email protected]> Cc: Adrian Hunter <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Athira Rajeev <[email protected]> Cc: Ian Rogers <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Namhyung Kim <[email protected]> Cc: Peter Zijlstra <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
1 parent fd81f52 commit f30232b

File tree

1 file changed

+21
-19
lines changed

1 file changed

+21
-19
lines changed

tools/perf/util/symbol.c

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1290,7 +1290,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
12901290
{
12911291
struct maps *kmaps = map__kmaps(map);
12921292
struct kcore_mapfn_data md;
1293-
struct map *replacement_map = NULL;
1293+
struct map *map_ref, *replacement_map = NULL;
12941294
struct machine *machine;
12951295
bool is_64_bit;
12961296
int err, fd;
@@ -1368,31 +1368,33 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
13681368
if (!replacement_map)
13691369
replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
13701370

1371+
/*
1372+
* Update addresses of vmlinux map. Re-insert it to ensure maps are
1373+
* correctly ordered. Do this before using maps__merge_in() for the
1374+
* remaining maps so vmlinux gets split if necessary.
1375+
*/
1376+
map_ref = map__get(map);
1377+
maps__remove(kmaps, map_ref);
1378+
1379+
map__set_start(map_ref, map__start(replacement_map));
1380+
map__set_end(map_ref, map__end(replacement_map));
1381+
map__set_pgoff(map_ref, map__pgoff(replacement_map));
1382+
map__set_mapping_type(map_ref, map__mapping_type(replacement_map));
1383+
1384+
err = maps__insert(kmaps, map_ref);
1385+
map__put(map_ref);
1386+
if (err)
1387+
goto out_err;
1388+
13711389
/* Add new maps */
13721390
while (!list_empty(&md.maps)) {
13731391
struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
13741392
struct map *new_map = new_node->map;
13751393

13761394
list_del_init(&new_node->node);
13771395

1378-
if (RC_CHK_EQUAL(new_map, replacement_map)) {
1379-
struct map *map_ref;
1380-
1381-
/* Ensure maps are correctly ordered */
1382-
map_ref = map__get(map);
1383-
maps__remove(kmaps, map_ref);
1384-
1385-
map__set_start(map_ref, map__start(new_map));
1386-
map__set_end(map_ref, map__end(new_map));
1387-
map__set_pgoff(map_ref, map__pgoff(new_map));
1388-
map__set_mapping_type(map_ref, map__mapping_type(new_map));
1389-
1390-
err = maps__insert(kmaps, map_ref);
1391-
map__put(map_ref);
1392-
map__put(new_map);
1393-
if (err)
1394-
goto out_err;
1395-
} else {
1396+
/* skip if replacement_map, already inserted above */
1397+
if (!RC_CHK_EQUAL(new_map, replacement_map)) {
13961398
/*
13971399
* Merge kcore map into existing maps,
13981400
* and ensure that current maps (eBPF)

0 commit comments

Comments
 (0)