@@ -1678,7 +1678,7 @@ static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
16781678 * See if the existing memory contains a valid meta section.
16791679 * if so, use that, otherwise initialize it.
16801680 */
1681- static bool rb_meta_init (struct trace_buffer * buffer )
1681+ static bool rb_meta_init (struct trace_buffer * buffer , int scratch_size )
16821682{
16831683 unsigned long ptr = buffer -> range_addr_start ;
16841684 struct ring_buffer_meta * bmeta ;
@@ -1696,6 +1696,7 @@ static bool rb_meta_init(struct trace_buffer *buffer)
16961696 /* The first buffer will start word size after the meta page */
16971697 ptr += sizeof (* bmeta );
16981698 ptr = ALIGN (ptr , sizeof (long ));
1699+ ptr += scratch_size ;
16991700
17001701 if (bmeta -> magic != RING_BUFFER_META_MAGIC ) {
17011702 pr_info ("Ring buffer boot meta mismatch of magic\n" );
@@ -1730,6 +1731,9 @@ static bool rb_meta_init(struct trace_buffer *buffer)
17301731 bmeta -> total_size = total_size ;
17311732 bmeta -> buffers_offset = (void * )ptr - (void * )bmeta ;
17321733
1734+ /* Zero out the scatch pad */
1735+ memset ((void * )bmeta + sizeof (* bmeta ), 0 , bmeta -> buffers_offset - sizeof (* bmeta ));
1736+
17331737 return false;
17341738}
17351739
@@ -1954,7 +1958,7 @@ static void rb_meta_init_text_addr(struct ring_buffer_cpu_meta *meta)
19541958#endif
19551959}
19561960
1957- static void rb_range_meta_init (struct trace_buffer * buffer , int nr_pages )
1961+ static void rb_range_meta_init (struct trace_buffer * buffer , int nr_pages , int scratch_size )
19581962{
19591963 struct ring_buffer_cpu_meta * meta ;
19601964 struct ring_buffer_meta * bmeta ;
@@ -1969,7 +1973,7 @@ static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
19691973 subbuf_mask = bitmap_alloc (nr_pages + 1 , GFP_KERNEL );
19701974 /* If subbuf_mask fails to allocate, then rb_meta_valid() will return false */
19711975
1972- if (rb_meta_init (buffer ))
1976+ if (rb_meta_init (buffer , scratch_size ))
19731977 valid = true;
19741978
19751979 bmeta = buffer -> meta ;
@@ -2367,6 +2371,7 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
23672371static struct trace_buffer * alloc_buffer (unsigned long size , unsigned flags ,
23682372 int order , unsigned long start ,
23692373 unsigned long end ,
2374+ unsigned long scratch_size ,
23702375 struct lock_class_key * key )
23712376{
23722377 struct trace_buffer * buffer ;
@@ -2416,10 +2421,15 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
24162421 /* Make sure that start is word aligned */
24172422 start = ALIGN (start , sizeof (long ));
24182423
2424+ /* scratch_size needs to be aligned too */
2425+ scratch_size = ALIGN (scratch_size , sizeof (long ));
2426+
24192427 /* Subtract the buffer meta data and word aligned */
24202428 buffers_start = start + sizeof (struct ring_buffer_cpu_meta );
24212429 buffers_start = ALIGN (buffers_start , sizeof (long ));
2430+ buffers_start += scratch_size ;
24222431
2432+ /* Calculate the size for the per CPU data */
24232433 size = end - buffers_start ;
24242434 size = size / nr_cpu_ids ;
24252435
@@ -2456,7 +2466,7 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
24562466 buffer -> range_addr_start = start ;
24572467 buffer -> range_addr_end = end ;
24582468
2459- rb_range_meta_init (buffer , nr_pages );
2469+ rb_range_meta_init (buffer , nr_pages , scratch_size );
24602470 } else {
24612471
24622472 /* need at least two pages */
@@ -2509,7 +2519,7 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
25092519 struct lock_class_key * key )
25102520{
25112521 /* Default buffer page size - one system page */
2512- return alloc_buffer (size , flags , 0 , 0 , 0 ,key );
2522+ return alloc_buffer (size , flags , 0 , 0 , 0 , 0 , key );
25132523
25142524}
25152525EXPORT_SYMBOL_GPL (__ring_buffer_alloc );
@@ -2521,6 +2531,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
25212531 * @order: sub-buffer order
25222532 * @start: start of allocated range
25232533 * @range_size: size of allocated range
2534+ * @scratch_size: size of scratch area (for preallocated memory buffers)
25242535 * @key: ring buffer reader_lock_key.
25252536 *
25262537 * Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -2531,9 +2542,11 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
25312542struct trace_buffer * __ring_buffer_alloc_range (unsigned long size , unsigned flags ,
25322543 int order , unsigned long start ,
25332544 unsigned long range_size ,
2545+ unsigned long scratch_size ,
25342546 struct lock_class_key * key )
25352547{
2536- return alloc_buffer (size , flags , order , start , start + range_size , key );
2548+ return alloc_buffer (size , flags , order , start , start + range_size ,
2549+ scratch_size , key );
25372550}
25382551
25392552/**
@@ -2557,6 +2570,16 @@ bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, unsigned long *kas
25572570 return true;
25582571}
25592572
2573+ void * ring_buffer_meta_scratch (struct trace_buffer * buffer , unsigned int * size )
2574+ {
2575+ if (!buffer || !buffer -> meta )
2576+ return NULL ;
2577+
2578+ * size = PAGE_SIZE - sizeof (* buffer -> meta );
2579+
2580+ return (void * )buffer -> meta + sizeof (* buffer -> meta );
2581+ }
2582+
25602583/**
25612584 * ring_buffer_free - free a ring buffer.
25622585 * @buffer: the buffer to free.
0 commit comments