Skip to content

Commit 7af9ded

Browse files
committed
ring-buffer: Use wait_event_interruptible() in ring_buffer_wait()
Convert ring_buffer_wait() over to wait_event_interruptible(). The default condition is to execute the wait loop inside __wait_event() just once. This does not change the ring_buffer_wait() prototype yet, but restructures the code so that it can take a "cond" and "data" parameter and will call wait_event_interruptible() with a helper function as the condition. The helper function (rb_wait_cond) takes the cond function and data parameters. It will first check if the buffer hit the watermark defined by the "full" parameter and then call the passed in condition parameter. If either are true, it returns true. If rb_wait_cond() does not return true, it will set the appropriate "waiters_pending" flag and returns false. Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wgsNgewHFxZAJiAQznwPMqEtQmi1waeS2O1v6L4c_Um5A@mail.gmail.com/ Link: https://lore.kernel.org/linux-trace-kernel/[email protected] Cc: [email protected] Cc: Masami Hiramatsu <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: linke li <[email protected]> Cc: Rabin Vincent <[email protected]> Fixes: f3ddb74 ("tracing: Wake up ring buffer waiters on closing of the file") Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent e36f19a commit 7af9ded

File tree

2 files changed

+69
-48
lines changed

2 files changed

+69
-48
lines changed

include/linux/ring_buffer.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
9898
__ring_buffer_alloc((size), (flags), &__key); \
9999
})
100100

101+
typedef bool (*ring_buffer_cond_fn)(void *data);
101102
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
102103
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
103104
struct file *filp, poll_table *poll_table, int full);

kernel/trace/ring_buffer.c

Lines changed: 68 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -843,43 +843,15 @@ static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
843843
return ret;
844844
}
845845

846-
/**
847-
* ring_buffer_wait - wait for input to the ring buffer
848-
* @buffer: buffer to wait on
849-
* @cpu: the cpu buffer to wait on
850-
* @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
851-
*
852-
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
853-
* as data is added to any of the @buffer's cpu buffers. Otherwise
854-
* it will wait for data to be added to a specific cpu buffer.
855-
*/
856-
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
846+
static inline bool
847+
rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
848+
int cpu, int full, ring_buffer_cond_fn cond, void *data)
857849
{
858-
struct ring_buffer_per_cpu *cpu_buffer;
859-
DEFINE_WAIT(wait);
860-
struct rb_irq_work *work;
861-
int ret = 0;
862-
863-
/*
864-
* Depending on what the caller is waiting for, either any
865-
* data in any cpu buffer, or a specific buffer, put the
866-
* caller on the appropriate wait queue.
867-
*/
868-
if (cpu == RING_BUFFER_ALL_CPUS) {
869-
work = &buffer->irq_work;
870-
/* Full only makes sense on per cpu reads */
871-
full = 0;
872-
} else {
873-
if (!cpumask_test_cpu(cpu, buffer->cpumask))
874-
return -ENODEV;
875-
cpu_buffer = buffer->buffers[cpu];
876-
work = &cpu_buffer->irq_work;
877-
}
850+
if (rb_watermark_hit(buffer, cpu, full))
851+
return true;
878852

879-
if (full)
880-
prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
881-
else
882-
prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
853+
if (cond(data))
854+
return true;
883855

884856
/*
885857
* The events can happen in critical sections where
@@ -902,27 +874,75 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
902874
* a task has been queued. It's OK for spurious wake ups.
903875
*/
904876
if (full)
905-
work->full_waiters_pending = true;
877+
rbwork->full_waiters_pending = true;
906878
else
907-
work->waiters_pending = true;
879+
rbwork->waiters_pending = true;
908880

909-
if (rb_watermark_hit(buffer, cpu, full))
910-
goto out;
881+
return false;
882+
}
911883

912-
if (signal_pending(current)) {
913-
ret = -EINTR;
914-
goto out;
884+
/*
885+
* The default wait condition for ring_buffer_wait() is to just to exit the
886+
* wait loop the first time it is woken up.
887+
*/
888+
static bool rb_wait_once(void *data)
889+
{
890+
long *once = data;
891+
892+
/* wait_event() actually calls this twice before scheduling*/
893+
if (*once > 1)
894+
return true;
895+
896+
(*once)++;
897+
return false;
898+
}
899+
900+
/**
901+
* ring_buffer_wait - wait for input to the ring buffer
902+
* @buffer: buffer to wait on
903+
* @cpu: the cpu buffer to wait on
904+
* @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
905+
*
906+
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
907+
* as data is added to any of the @buffer's cpu buffers. Otherwise
908+
* it will wait for data to be added to a specific cpu buffer.
909+
*/
910+
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
911+
{
912+
struct ring_buffer_per_cpu *cpu_buffer;
913+
struct wait_queue_head *waitq;
914+
ring_buffer_cond_fn cond;
915+
struct rb_irq_work *rbwork;
916+
void *data;
917+
long once = 0;
918+
int ret = 0;
919+
920+
cond = rb_wait_once;
921+
data = &once;
922+
923+
/*
924+
* Depending on what the caller is waiting for, either any
925+
* data in any cpu buffer, or a specific buffer, put the
926+
* caller on the appropriate wait queue.
927+
*/
928+
if (cpu == RING_BUFFER_ALL_CPUS) {
929+
rbwork = &buffer->irq_work;
930+
/* Full only makes sense on per cpu reads */
931+
full = 0;
932+
} else {
933+
if (!cpumask_test_cpu(cpu, buffer->cpumask))
934+
return -ENODEV;
935+
cpu_buffer = buffer->buffers[cpu];
936+
rbwork = &cpu_buffer->irq_work;
915937
}
916938

917-
schedule();
918-
out:
919939
if (full)
920-
finish_wait(&work->full_waiters, &wait);
940+
waitq = &rbwork->full_waiters;
921941
else
922-
finish_wait(&work->waiters, &wait);
942+
waitq = &rbwork->waiters;
923943

924-
if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current))
925-
ret = -EINTR;
944+
ret = wait_event_interruptible((*waitq),
945+
rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
926946

927947
return ret;
928948
}

0 commit comments

Comments
 (0)