Skip to content

Commit 276c410

Browse files
committed
rcu-tasks: Split ->trc_reader_need_end
This commit splits ->trc_reader_need_end by using the rcu_special union. This change permits readers to check to see if a memory barrier is required without any added overhead in the common case where no such barrier is required. This commit also adds the read-side checking. Later commits will add the machinery to properly set the new ->trc_reader_special.b.need_mb field. This commit also makes rcu_read_unlock_trace_special() tolerate nested read-side critical sections within interrupt and NMI handlers. Signed-off-by: Paul E. McKenney <[email protected]>
1 parent b0afa0f commit 276c410

File tree

5 files changed

+31
-19
lines changed

5 files changed

+31
-19
lines changed

include/linux/rcupdate_trace.h

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ static inline int rcu_read_lock_trace_held(void)
3131

3232
#ifdef CONFIG_TASKS_TRACE_RCU
3333

34-
void rcu_read_unlock_trace_special(struct task_struct *t);
34+
void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
3535

3636
/**
3737
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
@@ -50,6 +50,8 @@ static inline void rcu_read_lock_trace(void)
5050
struct task_struct *t = current;
5151

5252
WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
53+
if (t->trc_reader_special.b.need_mb)
54+
smp_mb(); // Pairs with update-side barriers
5355
rcu_lock_acquire(&rcu_trace_lock_map);
5456
}
5557

@@ -69,10 +71,11 @@ static inline void rcu_read_unlock_trace(void)
6971

7072
rcu_lock_release(&rcu_trace_lock_map);
7173
nesting = READ_ONCE(t->trc_reader_nesting) - 1;
72-
WRITE_ONCE(t->trc_reader_nesting, nesting);
73-
if (likely(!READ_ONCE(t->trc_reader_need_end)) || nesting)
74+
if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
75+
WRITE_ONCE(t->trc_reader_nesting, nesting);
7476
return; // We assume shallow reader nesting.
75-
rcu_read_unlock_trace_special(t);
77+
}
78+
rcu_read_unlock_trace_special(t, nesting);
7679
}
7780

7881
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);

include/linux/sched.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -613,7 +613,7 @@ union rcu_special {
613613
u8 blocked;
614614
u8 need_qs;
615615
u8 exp_hint; /* Hint for performance. */
616-
u8 pad; /* No garbage from compiler! */
616+
u8 need_mb; /* Readers need smp_mb(). */
617617
} b; /* Bits. */
618618
u32 s; /* Set of bits. */
619619
};
@@ -727,7 +727,7 @@ struct task_struct {
727727
#ifdef CONFIG_TASKS_TRACE_RCU
728728
int trc_reader_nesting;
729729
int trc_ipi_to_cpu;
730-
bool trc_reader_need_end;
730+
union rcu_special trc_reader_special;
731731
bool trc_reader_checked;
732732
struct list_head trc_holdout_list;
733733
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */

init/init_task.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,7 @@ struct task_struct init_task
143143
#endif
144144
#ifdef CONFIG_TASKS_TRACE_RCU
145145
.trc_reader_nesting = 0,
146+
.trc_reader_special.s = 0,
146147
.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
147148
#endif
148149
#ifdef CONFIG_CPUSETS

kernel/fork.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1685,6 +1685,7 @@ static inline void rcu_copy_process(struct task_struct *p)
16851685
#endif /* #ifdef CONFIG_TASKS_RCU */
16861686
#ifdef CONFIG_TASKS_TRACE_RCU
16871687
p->trc_reader_nesting = 0;
1688+
p->trc_reader_special.s = 0;
16881689
INIT_LIST_HEAD(&p->trc_holdout_list);
16891690
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
16901691
}

kernel/rcu/tasks.h

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -723,10 +723,17 @@ DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
723723
"RCU Tasks Trace");
724724

725725
/* If we are the last reader, wake up the grace-period kthread. */
726-
void rcu_read_unlock_trace_special(struct task_struct *t)
726+
void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
727727
{
728-
WRITE_ONCE(t->trc_reader_need_end, false);
729-
if (atomic_dec_and_test(&trc_n_readers_need_end))
728+
int nq = t->trc_reader_special.b.need_qs;
729+
730+
if (t->trc_reader_special.b.need_mb)
731+
smp_mb(); // Pairs with update-side barriers.
732+
// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
733+
if (nq)
734+
WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
735+
WRITE_ONCE(t->trc_reader_nesting, nesting);
736+
if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
730737
wake_up(&trc_wait);
731738
}
732739
EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
@@ -777,8 +784,8 @@ static void trc_read_check_handler(void *t_in)
777784
// Get here if the task is in a read-side critical section. Set
778785
// its state so that it will awaken the grace-period kthread upon
779786
// exit from that critical section.
780-
WARN_ON_ONCE(t->trc_reader_need_end);
781-
WRITE_ONCE(t->trc_reader_need_end, true);
787+
WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
788+
WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
782789

783790
reset_ipi:
784791
// Allow future IPIs to be sent on CPU and for task.
@@ -804,8 +811,8 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
804811
// exit from that critical section.
805812
if (unlikely(t->trc_reader_nesting)) {
806813
atomic_inc(&trc_n_readers_need_end); // One more to wait on.
807-
WARN_ON_ONCE(t->trc_reader_need_end);
808-
WRITE_ONCE(t->trc_reader_need_end, true);
814+
WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
815+
WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
809816
}
810817
return true;
811818
}
@@ -884,7 +891,7 @@ static void rcu_tasks_trace_pregp_step(void)
884891
static void rcu_tasks_trace_pertask(struct task_struct *t,
885892
struct list_head *hop)
886893
{
887-
WRITE_ONCE(t->trc_reader_need_end, false);
894+
WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
888895
WRITE_ONCE(t->trc_reader_checked, false);
889896
t->trc_ipi_to_cpu = -1;
890897
trc_wait_for_one_reader(t, hop);
@@ -916,7 +923,7 @@ static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
916923
".i"[is_idle_task(t)],
917924
".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
918925
t->trc_reader_nesting,
919-
" N"[!!t->trc_reader_need_end],
926+
" N"[!!t->trc_reader_special.b.need_qs],
920927
cpu);
921928
sched_show_task(t);
922929
}
@@ -980,11 +987,11 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
980987
break; // Count reached zero.
981988
// Stall warning time, so make a list of the offenders.
982989
for_each_process_thread(g, t)
983-
if (READ_ONCE(t->trc_reader_need_end))
990+
if (READ_ONCE(t->trc_reader_special.b.need_qs))
984991
trc_add_holdout(t, &holdouts);
985992
firstreport = true;
986993
list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
987-
if (READ_ONCE(t->trc_reader_need_end)) {
994+
if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
988995
show_stalled_task_trace(t, &firstreport);
989996
trc_del_holdout(t);
990997
}
@@ -1003,8 +1010,8 @@ void exit_tasks_rcu_finish_trace(struct task_struct *t)
10031010
WRITE_ONCE(t->trc_reader_checked, true);
10041011
WARN_ON_ONCE(t->trc_reader_nesting);
10051012
WRITE_ONCE(t->trc_reader_nesting, 0);
1006-
if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_need_end)))
1007-
rcu_read_unlock_trace_special(t);
1013+
if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1014+
rcu_read_unlock_trace_special(t, 0);
10081015
}
10091016

10101017
/**

0 commit comments

Comments
 (0)