Skip to content

Commit 43766c3

Browse files
committed
rcu-tasks: Make RCU Tasks Trace make use of RCU scheduler hooks
This commit makes the calls to rcu_tasks_qs() detect and report quiescent states for RCU tasks trace. If the task is in a quiescent state and if ->trc_reader_checked is not yet set, the task sets its own ->trc_reader_checked. This will cause the grace-period kthread to remove it from the holdout list if it still remains there. [ paulmck: Fix conditional compilation per kbuild test robot feedback. ] Signed-off-by: Paul E. McKenney <[email protected]>
1 parent af051ca commit 43766c3

File tree

4 files changed

+43
-14
lines changed

4 files changed

+43
-14
lines changed

include/linux/rcupdate.h

Lines changed: 37 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -131,20 +131,50 @@ static inline void rcu_init_nohz(void) { }
131131
* This is a macro rather than an inline function to avoid #include hell.
132132
*/
133133
#ifdef CONFIG_TASKS_RCU_GENERIC
134-
#define rcu_tasks_qs(t) \
135-
do { \
136-
if (READ_ONCE((t)->rcu_tasks_holdout)) \
137-
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
134+
135+
# ifdef CONFIG_TASKS_RCU
136+
# define rcu_tasks_classic_qs(t, preempt) \
137+
do { \
138+
if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
139+
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
138140
} while (0)
139-
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
140141
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
141142
void synchronize_rcu_tasks(void);
143+
# else
144+
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
145+
# define call_rcu_tasks call_rcu
146+
# define synchronize_rcu_tasks synchronize_rcu
147+
# endif
148+
149+
# ifdef CONFIG_TASKS_RCU_TRACE
150+
# define rcu_tasks_trace_qs(t) \
151+
do { \
152+
if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
153+
!unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
154+
smp_store_release(&(t)->trc_reader_checked, true); \
155+
smp_mb(); /* Readers partitioned by store. */ \
156+
} \
157+
} while (0)
158+
# else
159+
# define rcu_tasks_trace_qs(t) do { } while (0)
160+
# endif
161+
162+
#define rcu_tasks_qs(t, preempt) \
163+
do { \
164+
rcu_tasks_classic_qs((t), (preempt)); \
165+
rcu_tasks_trace_qs((t)); \
166+
} while (0)
167+
168+
# ifdef CONFIG_TASKS_RUDE_RCU
142169
void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
143170
void synchronize_rcu_tasks_rude(void);
171+
# endif
172+
173+
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
144174
void exit_tasks_rcu_start(void);
145175
void exit_tasks_rcu_finish(void);
146176
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
147-
#define rcu_tasks_qs(t) do { } while (0)
177+
#define rcu_tasks_qs(t, preempt) do { } while (0)
148178
#define rcu_note_voluntary_context_switch(t) do { } while (0)
149179
#define call_rcu_tasks call_rcu
150180
#define synchronize_rcu_tasks synchronize_rcu
@@ -161,7 +191,7 @@ static inline void exit_tasks_rcu_finish(void) { }
161191
*/
162192
#define cond_resched_tasks_rcu_qs() \
163193
do { \
164-
rcu_tasks_qs(current); \
194+
rcu_tasks_qs(current, false); \
165195
cond_resched(); \
166196
} while (0)
167197

include/linux/rcutiny.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ static inline void rcu_softirq_qs(void)
4949
#define rcu_note_context_switch(preempt) \
5050
do { \
5151
rcu_qs(); \
52-
rcu_tasks_qs(current); \
52+
rcu_tasks_qs(current, (preempt)); \
5353
} while (0)
5454

5555
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)

kernel/rcu/tasks.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
180180

181181
/* Pick up any new callbacks. */
182182
raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
183-
smp_mb__after_unlock_lock(); // Order updates vs. GP.
183+
smp_mb__after_spinlock(); // Order updates vs. GP.
184184
list = rtp->cbs_head;
185185
rtp->cbs_head = NULL;
186186
rtp->cbs_tail = &rtp->cbs_head;
@@ -874,7 +874,7 @@ static void rcu_tasks_trace_pertask(struct task_struct *t,
874874
struct list_head *hop)
875875
{
876876
WRITE_ONCE(t->trc_reader_need_end, false);
877-
t->trc_reader_checked = false;
877+
WRITE_ONCE(t->trc_reader_checked, false);
878878
t->trc_ipi_to_cpu = -1;
879879
trc_wait_for_one_reader(t, hop);
880880
}
@@ -983,6 +983,7 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
983983
pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
984984
}
985985
smp_mb(); // Caller's code must be ordered after wakeup.
986+
// Pairs with pretty much every ordering primitive.
986987
}
987988

988989
/* Report any needed quiescent state for this exiting task. */

kernel/rcu/tree_plugin.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -331,8 +331,7 @@ void rcu_note_context_switch(bool preempt)
331331
rcu_qs();
332332
if (rdp->exp_deferred_qs)
333333
rcu_report_exp_rdp(rdp);
334-
if (!preempt)
335-
rcu_tasks_qs(current);
334+
rcu_tasks_qs(current, preempt);
336335
trace_rcu_utilization(TPS("End context switch"));
337336
}
338337
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
@@ -841,8 +840,7 @@ void rcu_note_context_switch(bool preempt)
841840
this_cpu_write(rcu_data.rcu_urgent_qs, false);
842841
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
843842
rcu_momentary_dyntick_idle();
844-
if (!preempt)
845-
rcu_tasks_qs(current);
843+
rcu_tasks_qs(current, preempt);
846844
out:
847845
trace_rcu_utilization(TPS("End context switch"));
848846
}

0 commit comments

Comments
 (0)