Skip to content

Commit 4af0a9c

Browse files
committed
ring-buffer: Add ring_buffer_meta_scratch()
Now that there's one meta data at the start of the persistent memory used by the ring buffer, allow the caller to request some memory right after that data that it can use as its own persistent memory. Also fix some white space issues with ring_buffer_alloc(). Cc: Mark Rutland <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Andrew Morton <[email protected]> Link: https://lore.kernel.org/[email protected] Reviewed-by: Masami Hiramatsu (Google) <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent 4009cc3 commit 4af0a9c

File tree

3 files changed

+34
-9
lines changed

3 files changed

+34
-9
lines changed

include/linux/ring_buffer.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,11 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
9292
struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
9393
int order, unsigned long start,
9494
unsigned long range_size,
95+
unsigned long scratch_size,
9596
struct lock_class_key *key);
9697

9798
bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, unsigned long *kaslr_addr);
99+
void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size);
98100

99101
/*
100102
* Because the ring buffer is generic, if other users of the ring buffer get
@@ -112,11 +114,11 @@ bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, unsigned long *kas
112114
* traced by ftrace, it can produce lockdep warnings. We need to keep each
113115
* ring buffer's lock class separate.
114116
*/
115-
#define ring_buffer_alloc_range(size, flags, order, start, range_size) \
117+
#define ring_buffer_alloc_range(size, flags, order, start, range_size, s_size) \
116118
({ \
117119
static struct lock_class_key __key; \
118120
__ring_buffer_alloc_range((size), (flags), (order), (start), \
119-
(range_size), &__key); \
121+
(range_size), (s_size), &__key); \
120122
})
121123

122124
typedef bool (*ring_buffer_cond_fn)(void *data);

kernel/trace/ring_buffer.c

Lines changed: 29 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1678,7 +1678,7 @@ static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
16781678
* See if the existing memory contains a valid meta section.
16791679
* if so, use that, otherwise initialize it.
16801680
*/
1681-
static bool rb_meta_init(struct trace_buffer *buffer)
1681+
static bool rb_meta_init(struct trace_buffer *buffer, int scratch_size)
16821682
{
16831683
unsigned long ptr = buffer->range_addr_start;
16841684
struct ring_buffer_meta *bmeta;
@@ -1696,6 +1696,7 @@ static bool rb_meta_init(struct trace_buffer *buffer)
16961696
/* The first buffer will start word size after the meta page */
16971697
ptr += sizeof(*bmeta);
16981698
ptr = ALIGN(ptr, sizeof(long));
1699+
ptr += scratch_size;
16991700

17001701
if (bmeta->magic != RING_BUFFER_META_MAGIC) {
17011702
pr_info("Ring buffer boot meta mismatch of magic\n");
@@ -1730,6 +1731,9 @@ static bool rb_meta_init(struct trace_buffer *buffer)
17301731
bmeta->total_size = total_size;
17311732
bmeta->buffers_offset = (void *)ptr - (void *)bmeta;
17321733

1734+
/* Zero out the scatch pad */
1735+
memset((void *)bmeta + sizeof(*bmeta), 0, bmeta->buffers_offset - sizeof(*bmeta));
1736+
17331737
return false;
17341738
}
17351739

@@ -1954,7 +1958,7 @@ static void rb_meta_init_text_addr(struct ring_buffer_cpu_meta *meta)
19541958
#endif
19551959
}
19561960

1957-
static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
1961+
static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages, int scratch_size)
19581962
{
19591963
struct ring_buffer_cpu_meta *meta;
19601964
struct ring_buffer_meta *bmeta;
@@ -1969,7 +1973,7 @@ static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
19691973
subbuf_mask = bitmap_alloc(nr_pages + 1, GFP_KERNEL);
19701974
/* If subbuf_mask fails to allocate, then rb_meta_valid() will return false */
19711975

1972-
if (rb_meta_init(buffer))
1976+
if (rb_meta_init(buffer, scratch_size))
19731977
valid = true;
19741978

19751979
bmeta = buffer->meta;
@@ -2367,6 +2371,7 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
23672371
static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
23682372
int order, unsigned long start,
23692373
unsigned long end,
2374+
unsigned long scratch_size,
23702375
struct lock_class_key *key)
23712376
{
23722377
struct trace_buffer *buffer;
@@ -2416,10 +2421,15 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
24162421
/* Make sure that start is word aligned */
24172422
start = ALIGN(start, sizeof(long));
24182423

2424+
/* scratch_size needs to be aligned too */
2425+
scratch_size = ALIGN(scratch_size, sizeof(long));
2426+
24192427
/* Subtract the buffer meta data and word aligned */
24202428
buffers_start = start + sizeof(struct ring_buffer_cpu_meta);
24212429
buffers_start = ALIGN(buffers_start, sizeof(long));
2430+
buffers_start += scratch_size;
24222431

2432+
/* Calculate the size for the per CPU data */
24232433
size = end - buffers_start;
24242434
size = size / nr_cpu_ids;
24252435

@@ -2456,7 +2466,7 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
24562466
buffer->range_addr_start = start;
24572467
buffer->range_addr_end = end;
24582468

2459-
rb_range_meta_init(buffer, nr_pages);
2469+
rb_range_meta_init(buffer, nr_pages, scratch_size);
24602470
} else {
24612471

24622472
/* need at least two pages */
@@ -2509,7 +2519,7 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
25092519
struct lock_class_key *key)
25102520
{
25112521
/* Default buffer page size - one system page */
2512-
return alloc_buffer(size, flags, 0, 0, 0,key);
2522+
return alloc_buffer(size, flags, 0, 0, 0, 0, key);
25132523

25142524
}
25152525
EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
@@ -2521,6 +2531,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
25212531
* @order: sub-buffer order
25222532
* @start: start of allocated range
25232533
* @range_size: size of allocated range
2534+
* @scratch_size: size of scratch area (for preallocated memory buffers)
25242535
* @key: ring buffer reader_lock_key.
25252536
*
25262537
* Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -2531,9 +2542,11 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
25312542
struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
25322543
int order, unsigned long start,
25332544
unsigned long range_size,
2545+
unsigned long scratch_size,
25342546
struct lock_class_key *key)
25352547
{
2536-
return alloc_buffer(size, flags, order, start, start + range_size, key);
2548+
return alloc_buffer(size, flags, order, start, start + range_size,
2549+
scratch_size, key);
25372550
}
25382551

25392552
/**
@@ -2557,6 +2570,16 @@ bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, unsigned long *kas
25572570
return true;
25582571
}
25592572

2573+
void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size)
2574+
{
2575+
if (!buffer || !buffer->meta)
2576+
return NULL;
2577+
2578+
*size = PAGE_SIZE - sizeof(*buffer->meta);
2579+
2580+
return (void *)buffer->meta + sizeof(*buffer->meta);
2581+
}
2582+
25602583
/**
25612584
* ring_buffer_free - free a ring buffer.
25622585
* @buffer: the buffer to free.

kernel/trace/trace.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9218,7 +9218,7 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
92189218
if (tr->range_addr_start && tr->range_addr_size) {
92199219
buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
92209220
tr->range_addr_start,
9221-
tr->range_addr_size);
9221+
tr->range_addr_size, 0);
92229222

92239223
#ifdef CONFIG_RANDOMIZE_BASE
92249224
if (ring_buffer_last_boot_delta(buf->buffer, &tr->kaslr_addr))

0 commit comments

Comments
 (0)