Skip to content

Commit ff5c4f5

Browse files
committed
rcu/tree: Mark the idle relevant functions noinstr
These functions are invoked from context tracking and other places in the low level entry code. Move them into the .noinstr.text section to exclude them from instrumentation. Mark the places which are safe to invoke traceable functions with instrumentation_begin/end() so objtool won't complain. Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Alexandre Chartre <[email protected]> Acked-by: Peter Zijlstra <[email protected]> Acked-by: Paul E. McKenney <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 0d00449 commit ff5c4f5

File tree

3 files changed

+49
-41
lines changed

3 files changed

+49
-41
lines changed

kernel/rcu/tree.c

Lines changed: 46 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,6 @@
8888
*/
8989
#define RCU_DYNTICK_CTRL_MASK 0x1
9090
#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
91-
#ifndef rcu_eqs_special_exit
92-
#define rcu_eqs_special_exit() do { } while (0)
93-
#endif
9491

9592
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
9693
.dynticks_nesting = 1,
@@ -242,7 +239,7 @@ void rcu_softirq_qs(void)
242239
* RCU is watching prior to the call to this function and is no longer
243240
* watching upon return.
244241
*/
245-
static void rcu_dynticks_eqs_enter(void)
242+
static noinstr void rcu_dynticks_eqs_enter(void)
246243
{
247244
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
248245
int seq;
@@ -267,7 +264,7 @@ static void rcu_dynticks_eqs_enter(void)
267264
* called from an extended quiescent state, that is, RCU is not watching
268265
* prior to the call to this function and is watching upon return.
269266
*/
270-
static void rcu_dynticks_eqs_exit(void)
267+
static noinstr void rcu_dynticks_eqs_exit(void)
271268
{
272269
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
273270
int seq;
@@ -285,8 +282,6 @@ static void rcu_dynticks_eqs_exit(void)
285282
if (seq & RCU_DYNTICK_CTRL_MASK) {
286283
atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
287284
smp_mb__after_atomic(); /* _exit after clearing mask. */
288-
/* Prefer duplicate flushes to losing a flush. */
289-
rcu_eqs_special_exit();
290285
}
291286
}
292287

@@ -314,7 +309,7 @@ static void rcu_dynticks_eqs_online(void)
314309
*
315310
* No ordering, as we are sampling CPU-local information.
316311
*/
317-
static bool rcu_dynticks_curr_cpu_in_eqs(void)
312+
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
318313
{
319314
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
320315

@@ -603,7 +598,7 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
603598
* the possibility of usermode upcalls having messed up our count
604599
* of interrupt nesting level during the prior busy period.
605600
*/
606-
static void rcu_eqs_enter(bool user)
601+
static noinstr void rcu_eqs_enter(bool user)
607602
{
608603
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
609604

@@ -618,12 +613,14 @@ static void rcu_eqs_enter(bool user)
618613
}
619614

620615
lockdep_assert_irqs_disabled();
616+
instrumentation_begin();
621617
trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
622618
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
623619
rdp = this_cpu_ptr(&rcu_data);
624620
do_nocb_deferred_wakeup(rdp);
625621
rcu_prepare_for_idle();
626622
rcu_preempt_deferred_qs(current);
623+
instrumentation_end();
627624
WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
628625
// RCU is watching here ...
629626
rcu_dynticks_eqs_enter();
@@ -660,7 +657,7 @@ void rcu_idle_enter(void)
660657
* If you add or remove a call to rcu_user_enter(), be sure to test with
661658
* CONFIG_RCU_EQS_DEBUG=y.
662659
*/
663-
void rcu_user_enter(void)
660+
noinstr void rcu_user_enter(void)
664661
{
665662
lockdep_assert_irqs_disabled();
666663
rcu_eqs_enter(true);
@@ -693,19 +690,23 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
693690
* leave it in non-RCU-idle state.
694691
*/
695692
if (rdp->dynticks_nmi_nesting != 1) {
693+
instrumentation_begin();
696694
trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
697695
atomic_read(&rdp->dynticks));
698696
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
699697
rdp->dynticks_nmi_nesting - 2);
698+
instrumentation_end();
700699
return;
701700
}
702701

702+
instrumentation_begin();
703703
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
704704
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
705705
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
706706

707707
if (irq)
708708
rcu_prepare_for_idle();
709+
instrumentation_end();
709710

710711
// RCU is watching here ...
711712
rcu_dynticks_eqs_enter();
@@ -721,7 +722,7 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
721722
* If you add or remove a call to rcu_nmi_exit(), be sure to test
722723
* with CONFIG_RCU_EQS_DEBUG=y.
723724
*/
724-
void rcu_nmi_exit(void)
725+
void noinstr rcu_nmi_exit(void)
725726
{
726727
rcu_nmi_exit_common(false);
727728
}
@@ -745,7 +746,7 @@ void rcu_nmi_exit(void)
745746
* If you add or remove a call to rcu_irq_exit(), be sure to test with
746747
* CONFIG_RCU_EQS_DEBUG=y.
747748
*/
748-
void rcu_irq_exit(void)
749+
void noinstr rcu_irq_exit(void)
749750
{
750751
lockdep_assert_irqs_disabled();
751752
rcu_nmi_exit_common(true);
@@ -774,7 +775,7 @@ void rcu_irq_exit_irqson(void)
774775
* allow for the possibility of usermode upcalls messing up our count of
775776
* interrupt nesting level during the busy period that is just now starting.
776777
*/
777-
static void rcu_eqs_exit(bool user)
778+
static void noinstr rcu_eqs_exit(bool user)
778779
{
779780
struct rcu_data *rdp;
780781
long oldval;
@@ -792,12 +793,14 @@ static void rcu_eqs_exit(bool user)
792793
// RCU is not watching here ...
793794
rcu_dynticks_eqs_exit();
794795
// ... but is watching here.
796+
instrumentation_begin();
795797
rcu_cleanup_after_idle();
796798
trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
797799
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
798800
WRITE_ONCE(rdp->dynticks_nesting, 1);
799801
WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
800802
WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
803+
instrumentation_end();
801804
}
802805

803806
/**
@@ -828,7 +831,7 @@ void rcu_idle_exit(void)
828831
* If you add or remove a call to rcu_user_exit(), be sure to test with
829832
* CONFIG_RCU_EQS_DEBUG=y.
830833
*/
831-
void rcu_user_exit(void)
834+
void noinstr rcu_user_exit(void)
832835
{
833836
rcu_eqs_exit(1);
834837
}
@@ -876,28 +879,35 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
876879
rcu_cleanup_after_idle();
877880

878881
incby = 1;
879-
} else if (irq && tick_nohz_full_cpu(rdp->cpu) &&
880-
rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
881-
READ_ONCE(rdp->rcu_urgent_qs) &&
882-
!READ_ONCE(rdp->rcu_forced_tick)) {
883-
// We get here only if we had already exited the extended
884-
// quiescent state and this was an interrupt (not an NMI).
885-
// Therefore, (1) RCU is already watching and (2) The fact
886-
// that we are in an interrupt handler and that the rcu_node
887-
// lock is an irq-disabled lock prevents self-deadlock.
888-
// So we can safely recheck under the lock.
889-
raw_spin_lock_rcu_node(rdp->mynode);
890-
if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
891-
// A nohz_full CPU is in the kernel and RCU
892-
// needs a quiescent state. Turn on the tick!
893-
WRITE_ONCE(rdp->rcu_forced_tick, true);
894-
tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
882+
} else if (irq) {
883+
instrumentation_begin();
884+
if (tick_nohz_full_cpu(rdp->cpu) &&
885+
rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
886+
READ_ONCE(rdp->rcu_urgent_qs) &&
887+
!READ_ONCE(rdp->rcu_forced_tick)) {
888+
// We get here only if we had already exited the
889+
// extended quiescent state and this was an
890+
// interrupt (not an NMI). Therefore, (1) RCU is
891+
// already watching and (2) The fact that we are in
892+
// an interrupt handler and that the rcu_node lock
893+
// is an irq-disabled lock prevents self-deadlock.
894+
// So we can safely recheck under the lock.
895+
raw_spin_lock_rcu_node(rdp->mynode);
896+
if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
897+
// A nohz_full CPU is in the kernel and RCU
898+
// needs a quiescent state. Turn on the tick!
899+
WRITE_ONCE(rdp->rcu_forced_tick, true);
900+
tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
901+
}
902+
raw_spin_unlock_rcu_node(rdp->mynode);
895903
}
896-
raw_spin_unlock_rcu_node(rdp->mynode);
904+
instrumentation_end();
897905
}
906+
instrumentation_begin();
898907
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
899908
rdp->dynticks_nmi_nesting,
900909
rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
910+
instrumentation_end();
901911
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
902912
rdp->dynticks_nmi_nesting + incby);
903913
barrier();
@@ -906,11 +916,10 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
906916
/**
907917
* rcu_nmi_enter - inform RCU of entry to NMI context
908918
*/
909-
void rcu_nmi_enter(void)
919+
noinstr void rcu_nmi_enter(void)
910920
{
911921
rcu_nmi_enter_common(false);
912922
}
913-
NOKPROBE_SYMBOL(rcu_nmi_enter);
914923

915924
/**
916925
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
@@ -934,7 +943,7 @@ NOKPROBE_SYMBOL(rcu_nmi_enter);
934943
* If you add or remove a call to rcu_irq_enter(), be sure to test with
935944
* CONFIG_RCU_EQS_DEBUG=y.
936945
*/
937-
void rcu_irq_enter(void)
946+
noinstr void rcu_irq_enter(void)
938947
{
939948
lockdep_assert_irqs_disabled();
940949
rcu_nmi_enter_common(true);
@@ -979,7 +988,7 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
979988
* if the current CPU is not in its idle loop or is in an interrupt or
980989
* NMI handler, return true.
981990
*/
982-
bool notrace rcu_is_watching(void)
991+
bool rcu_is_watching(void)
983992
{
984993
bool ret;
985994

@@ -1031,12 +1040,12 @@ bool rcu_lockdep_current_cpu_online(void)
10311040

10321041
if (in_nmi() || !rcu_scheduler_fully_active)
10331042
return true;
1034-
preempt_disable();
1043+
preempt_disable_notrace();
10351044
rdp = this_cpu_ptr(&rcu_data);
10361045
rnp = rdp->mynode;
10371046
if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
10381047
ret = true;
1039-
preempt_enable();
1048+
preempt_enable_notrace();
10401049
return ret;
10411050
}
10421051
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);

kernel/rcu/tree_plugin.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2539,15 +2539,15 @@ static void rcu_bind_gp_kthread(void)
25392539
}
25402540

25412541
/* Record the current task on dyntick-idle entry. */
2542-
static void rcu_dynticks_task_enter(void)
2542+
static void noinstr rcu_dynticks_task_enter(void)
25432543
{
25442544
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
25452545
WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
25462546
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
25472547
}
25482548

25492549
/* Record no current task on dyntick-idle exit. */
2550-
static void rcu_dynticks_task_exit(void)
2550+
static void noinstr rcu_dynticks_task_exit(void)
25512551
{
25522552
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
25532553
WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);

kernel/rcu/update.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -284,13 +284,12 @@ struct lockdep_map rcu_callback_map =
284284
STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
285285
EXPORT_SYMBOL_GPL(rcu_callback_map);
286286

287-
int notrace debug_lockdep_rcu_enabled(void)
287+
noinstr int notrace debug_lockdep_rcu_enabled(void)
288288
{
289289
return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
290290
current->lockdep_recursion == 0;
291291
}
292292
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
293-
NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
294293

295294
/**
296295
* rcu_read_lock_held() - might we be in RCU read-side critical section?

0 commit comments

Comments
 (0)