Skip to content

Commit fd39e48

Browse files
committed
tracing: Have persistent trace instances save module addresses
For trace instances that are mapped to persistent memory, have them use the scratch area to save the currently loaded modules. This will allow where the modules have been loaded on the next boot so that their addresses can be deciphered by using where they were loaded previously. Cc: Mark Rutland <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Andrew Morton <[email protected]> Link: https://lore.kernel.org/[email protected] Reviewed-by: Masami Hiramatsu (Google) <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent 966b7d0 commit fd39e48

File tree

1 file changed

+89
-9
lines changed

1 file changed

+89
-9
lines changed

kernel/trace/trace.c

Lines changed: 89 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5988,14 +5988,60 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
59885988
return __tracing_resize_ring_buffer(tr, size, cpu_id);
59895989
}
59905990

5991+
struct trace_mod_entry {
5992+
unsigned long mod_addr;
5993+
char mod_name[MODULE_NAME_LEN];
5994+
};
5995+
59915996
struct trace_scratch {
59925997
unsigned long kaslr_addr;
5998+
unsigned long nr_entries;
5999+
struct trace_mod_entry entries[];
59936000
};
59946001

6002+
static int save_mod(struct module *mod, void *data)
6003+
{
6004+
struct trace_array *tr = data;
6005+
struct trace_scratch *tscratch;
6006+
struct trace_mod_entry *entry;
6007+
unsigned int size;
6008+
6009+
tscratch = tr->scratch;
6010+
if (!tscratch)
6011+
return -1;
6012+
size = tr->scratch_size;
6013+
6014+
if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size)
6015+
return -1;
6016+
6017+
entry = &tscratch->entries[tscratch->nr_entries];
6018+
6019+
tscratch->nr_entries++;
6020+
6021+
entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base;
6022+
strscpy(entry->mod_name, mod->name);
6023+
6024+
return 0;
6025+
}
6026+
59956027
static void update_last_data(struct trace_array *tr)
59966028
{
59976029
struct trace_scratch *tscratch;
59986030

6031+
if (!(tr->flags & TRACE_ARRAY_FL_BOOT))
6032+
return;
6033+
6034+
/* Reset the module list and reload them */
6035+
if (tr->scratch) {
6036+
struct trace_scratch *tscratch = tr->scratch;
6037+
6038+
memset(tscratch->entries, 0,
6039+
flex_array_size(tscratch, entries, tscratch->nr_entries));
6040+
tscratch->nr_entries = 0;
6041+
6042+
module_for_each_mod(save_mod, tr);
6043+
}
6044+
59996045
if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
60006046
return;
60016047

@@ -9224,6 +9270,46 @@ static struct dentry *trace_instance_dir;
92249270
static void
92259271
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
92269272

9273+
static void setup_trace_scratch(struct trace_array *tr,
9274+
struct trace_scratch *tscratch, unsigned int size)
9275+
{
9276+
struct trace_mod_entry *entry;
9277+
9278+
if (!tscratch)
9279+
return;
9280+
9281+
tr->scratch = tscratch;
9282+
tr->scratch_size = size;
9283+
9284+
#ifdef CONFIG_RANDOMIZE_BASE
9285+
if (tscratch->kaslr_addr)
9286+
tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
9287+
#endif
9288+
9289+
if (struct_size(tscratch, entries, tscratch->nr_entries) > size)
9290+
goto reset;
9291+
9292+
/* Check if each module name is a valid string */
9293+
for (int i = 0; i < tscratch->nr_entries; i++) {
9294+
int n;
9295+
9296+
entry = &tscratch->entries[i];
9297+
9298+
for (n = 0; n < MODULE_NAME_LEN; n++) {
9299+
if (entry->mod_name[n] == '\0')
9300+
break;
9301+
if (!isprint(entry->mod_name[n]))
9302+
goto reset;
9303+
}
9304+
if (n == MODULE_NAME_LEN)
9305+
goto reset;
9306+
}
9307+
return;
9308+
reset:
9309+
/* Invalid trace modules */
9310+
memset(tscratch, 0, size);
9311+
}
9312+
92279313
static int
92289314
allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
92299315
{
@@ -9236,21 +9322,15 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
92369322
buf->tr = tr;
92379323

92389324
if (tr->range_addr_start && tr->range_addr_size) {
9325+
/* Add scratch buffer to handle 128 modules */
92399326
buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
92409327
tr->range_addr_start,
92419328
tr->range_addr_size,
9242-
sizeof(*tscratch));
9329+
struct_size(tscratch, entries, 128));
92439330

92449331
tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
9245-
if (tscratch) {
9246-
tr->scratch = tscratch;
9247-
tr->scratch_size = scratch_size;
9332+
setup_trace_scratch(tr, tscratch, scratch_size);
92489333

9249-
#ifdef CONFIG_RANDOMIZE_BASE
9250-
if (tscratch->kaslr_addr)
9251-
tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
9252-
#endif
9253-
}
92549334
/*
92559335
* This is basically the same as a mapped buffer,
92569336
* with the same restrictions.

0 commit comments

Comments
 (0)