@@ -1678,7 +1678,7 @@ static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
1678
1678
* See if the existing memory contains a valid meta section.
1679
1679
* if so, use that, otherwise initialize it.
1680
1680
*/
1681
- static bool rb_meta_init (struct trace_buffer * buffer )
1681
+ static bool rb_meta_init (struct trace_buffer * buffer , int scratch_size )
1682
1682
{
1683
1683
unsigned long ptr = buffer -> range_addr_start ;
1684
1684
struct ring_buffer_meta * bmeta ;
@@ -1696,6 +1696,7 @@ static bool rb_meta_init(struct trace_buffer *buffer)
1696
1696
/* The first buffer will start word size after the meta page */
1697
1697
ptr += sizeof (* bmeta );
1698
1698
ptr = ALIGN (ptr , sizeof (long ));
1699
+ ptr += scratch_size ;
1699
1700
1700
1701
if (bmeta -> magic != RING_BUFFER_META_MAGIC ) {
1701
1702
pr_info ("Ring buffer boot meta mismatch of magic\n" );
@@ -1730,6 +1731,9 @@ static bool rb_meta_init(struct trace_buffer *buffer)
1730
1731
bmeta -> total_size = total_size ;
1731
1732
bmeta -> buffers_offset = (void * )ptr - (void * )bmeta ;
1732
1733
1734
+ /* Zero out the scatch pad */
1735
+ memset ((void * )bmeta + sizeof (* bmeta ), 0 , bmeta -> buffers_offset - sizeof (* bmeta ));
1736
+
1733
1737
return false;
1734
1738
}
1735
1739
@@ -1954,7 +1958,7 @@ static void rb_meta_init_text_addr(struct ring_buffer_cpu_meta *meta)
1954
1958
#endif
1955
1959
}
1956
1960
1957
- static void rb_range_meta_init (struct trace_buffer * buffer , int nr_pages )
1961
+ static void rb_range_meta_init (struct trace_buffer * buffer , int nr_pages , int scratch_size )
1958
1962
{
1959
1963
struct ring_buffer_cpu_meta * meta ;
1960
1964
struct ring_buffer_meta * bmeta ;
@@ -1969,7 +1973,7 @@ static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
1969
1973
subbuf_mask = bitmap_alloc (nr_pages + 1 , GFP_KERNEL );
1970
1974
/* If subbuf_mask fails to allocate, then rb_meta_valid() will return false */
1971
1975
1972
- if (rb_meta_init (buffer ))
1976
+ if (rb_meta_init (buffer , scratch_size ))
1973
1977
valid = true;
1974
1978
1975
1979
bmeta = buffer -> meta ;
@@ -2367,6 +2371,7 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
2367
2371
static struct trace_buffer * alloc_buffer (unsigned long size , unsigned flags ,
2368
2372
int order , unsigned long start ,
2369
2373
unsigned long end ,
2374
+ unsigned long scratch_size ,
2370
2375
struct lock_class_key * key )
2371
2376
{
2372
2377
struct trace_buffer * buffer ;
@@ -2416,10 +2421,15 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
2416
2421
/* Make sure that start is word aligned */
2417
2422
start = ALIGN (start , sizeof (long ));
2418
2423
2424
+ /* scratch_size needs to be aligned too */
2425
+ scratch_size = ALIGN (scratch_size , sizeof (long ));
2426
+
2419
2427
/* Subtract the buffer meta data and word aligned */
2420
2428
buffers_start = start + sizeof (struct ring_buffer_cpu_meta );
2421
2429
buffers_start = ALIGN (buffers_start , sizeof (long ));
2430
+ buffers_start += scratch_size ;
2422
2431
2432
+ /* Calculate the size for the per CPU data */
2423
2433
size = end - buffers_start ;
2424
2434
size = size / nr_cpu_ids ;
2425
2435
@@ -2456,7 +2466,7 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
2456
2466
buffer -> range_addr_start = start ;
2457
2467
buffer -> range_addr_end = end ;
2458
2468
2459
- rb_range_meta_init (buffer , nr_pages );
2469
+ rb_range_meta_init (buffer , nr_pages , scratch_size );
2460
2470
} else {
2461
2471
2462
2472
/* need at least two pages */
@@ -2509,7 +2519,7 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
2509
2519
struct lock_class_key * key )
2510
2520
{
2511
2521
/* Default buffer page size - one system page */
2512
- return alloc_buffer (size , flags , 0 , 0 , 0 ,key );
2522
+ return alloc_buffer (size , flags , 0 , 0 , 0 , 0 , key );
2513
2523
2514
2524
}
2515
2525
EXPORT_SYMBOL_GPL (__ring_buffer_alloc );
@@ -2521,6 +2531,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
2521
2531
* @order: sub-buffer order
2522
2532
* @start: start of allocated range
2523
2533
* @range_size: size of allocated range
2534
+ * @scratch_size: size of scratch area (for preallocated memory buffers)
2524
2535
* @key: ring buffer reader_lock_key.
2525
2536
*
2526
2537
* Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -2531,9 +2542,11 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
2531
2542
struct trace_buffer * __ring_buffer_alloc_range (unsigned long size , unsigned flags ,
2532
2543
int order , unsigned long start ,
2533
2544
unsigned long range_size ,
2545
+ unsigned long scratch_size ,
2534
2546
struct lock_class_key * key )
2535
2547
{
2536
- return alloc_buffer (size , flags , order , start , start + range_size , key );
2548
+ return alloc_buffer (size , flags , order , start , start + range_size ,
2549
+ scratch_size , key );
2537
2550
}
2538
2551
2539
2552
/**
@@ -2557,6 +2570,16 @@ bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, unsigned long *kas
2557
2570
return true;
2558
2571
}
2559
2572
2573
+ void * ring_buffer_meta_scratch (struct trace_buffer * buffer , unsigned int * size )
2574
+ {
2575
+ if (!buffer || !buffer -> meta )
2576
+ return NULL ;
2577
+
2578
+ * size = PAGE_SIZE - sizeof (* buffer -> meta );
2579
+
2580
+ return (void * )buffer -> meta + sizeof (* buffer -> meta );
2581
+ }
2582
+
2560
2583
/**
2561
2584
* ring_buffer_free - free a ring buffer.
2562
2585
* @buffer: the buffer to free.
0 commit comments