@@ -165,6 +165,12 @@ module_param(gp_init_delay, int, 0444);
165
165
static int gp_cleanup_delay ;
166
166
module_param (gp_cleanup_delay , int , 0444 );
167
167
168
+ // Add delay to rcu_read_unlock() for strict grace periods.
169
+ static int rcu_unlock_delay ;
170
+ #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
171
+ module_param (rcu_unlock_delay , int , 0444 );
172
+ #endif
173
+
168
174
/*
169
175
* This rcu parameter is runtime-read-only. It reflects
170
176
* a minimum allowed number of objects which can be cached
@@ -455,24 +461,25 @@ static int rcu_is_cpu_rrupt_from_idle(void)
455
461
return __this_cpu_read (rcu_data .dynticks_nesting ) == 0 ;
456
462
}
457
463
458
- #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */
459
- #define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */
464
+ #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
465
+ // Maximum callbacks per rcu_do_batch ...
466
+ #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
460
467
static long blimit = DEFAULT_RCU_BLIMIT ;
461
- #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
468
+ #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
462
469
static long qhimark = DEFAULT_RCU_QHIMARK ;
463
- #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */
470
+ #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
464
471
static long qlowmark = DEFAULT_RCU_QLOMARK ;
465
472
#define DEFAULT_RCU_QOVLD_MULT 2
466
473
#define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
467
- static long qovld = DEFAULT_RCU_QOVLD ; /* If this many pending, hammer QS. */
468
- static long qovld_calc = -1 ; /* No pre-initialization lock acquisitions! */
474
+ static long qovld = DEFAULT_RCU_QOVLD ; // If this many pending, hammer QS.
475
+ static long qovld_calc = -1 ; // No pre-initialization lock acquisitions!
469
476
470
477
module_param (blimit , long , 0444 );
471
478
module_param (qhimark , long , 0444 );
472
479
module_param (qlowmark , long , 0444 );
473
480
module_param (qovld , long , 0444 );
474
481
475
- static ulong jiffies_till_first_fqs = ULONG_MAX ;
482
+ static ulong jiffies_till_first_fqs = IS_ENABLED ( CONFIG_RCU_STRICT_GRACE_PERIOD ) ? 0 : ULONG_MAX ;
476
483
static ulong jiffies_till_next_fqs = ULONG_MAX ;
477
484
static bool rcu_kick_kthreads ;
478
485
static int rcu_divisor = 7 ;
@@ -1571,6 +1578,19 @@ static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1571
1578
raw_spin_unlock_rcu_node (rnp );
1572
1579
}
1573
1580
1581
+ /*
1582
+ * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1583
+ * quiescent state. This is intended to be invoked when the CPU notices
1584
+ * a new grace period.
1585
+ */
1586
+ static void rcu_strict_gp_check_qs (void )
1587
+ {
1588
+ if (IS_ENABLED (CONFIG_RCU_STRICT_GRACE_PERIOD )) {
1589
+ rcu_read_lock ();
1590
+ rcu_read_unlock ();
1591
+ }
1592
+ }
1593
+
1574
1594
/*
1575
1595
* Update CPU-local rcu_data state to record the beginnings and ends of
1576
1596
* grace periods. The caller must hold the ->lock of the leaf rcu_node
@@ -1641,6 +1661,7 @@ static void note_gp_changes(struct rcu_data *rdp)
1641
1661
}
1642
1662
needwake = __note_gp_changes (rnp , rdp );
1643
1663
raw_spin_unlock_irqrestore_rcu_node (rnp , flags );
1664
+ rcu_strict_gp_check_qs ();
1644
1665
if (needwake )
1645
1666
rcu_gp_kthread_wake ();
1646
1667
}
@@ -1678,6 +1699,15 @@ static void rcu_gp_torture_wait(void)
1678
1699
}
1679
1700
}
1680
1701
1702
+ /*
1703
+ * Handler for on_each_cpu() to invoke the target CPU's RCU core
1704
+ * processing.
1705
+ */
1706
+ static void rcu_strict_gp_boundary (void * unused )
1707
+ {
1708
+ invoke_rcu_core ();
1709
+ }
1710
+
1681
1711
/*
1682
1712
* Initialize a new grace period. Return false if no grace period required.
1683
1713
*/
@@ -1809,6 +1839,10 @@ static bool rcu_gp_init(void)
1809
1839
WRITE_ONCE (rcu_state .gp_activity , jiffies );
1810
1840
}
1811
1841
1842
+ // If strict, make all CPUs aware of new grace period.
1843
+ if (IS_ENABLED (CONFIG_RCU_STRICT_GRACE_PERIOD ))
1844
+ on_each_cpu (rcu_strict_gp_boundary , NULL , 0 );
1845
+
1812
1846
return true;
1813
1847
}
1814
1848
@@ -2025,6 +2059,10 @@ static void rcu_gp_cleanup(void)
2025
2059
rcu_state .gp_flags & RCU_GP_FLAG_INIT );
2026
2060
}
2027
2061
raw_spin_unlock_irq_rcu_node (rnp );
2062
+
2063
+ // If strict, make all CPUs aware of the end of the old grace period.
2064
+ if (IS_ENABLED (CONFIG_RCU_STRICT_GRACE_PERIOD ))
2065
+ on_each_cpu (rcu_strict_gp_boundary , NULL , 0 );
2028
2066
}
2029
2067
2030
2068
/*
@@ -2203,7 +2241,7 @@ rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2203
2241
* structure. This must be called from the specified CPU.
2204
2242
*/
2205
2243
static void
2206
- rcu_report_qs_rdp (int cpu , struct rcu_data * rdp )
2244
+ rcu_report_qs_rdp (struct rcu_data * rdp )
2207
2245
{
2208
2246
unsigned long flags ;
2209
2247
unsigned long mask ;
@@ -2212,6 +2250,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
2212
2250
rcu_segcblist_is_offloaded (& rdp -> cblist );
2213
2251
struct rcu_node * rnp ;
2214
2252
2253
+ WARN_ON_ONCE (rdp -> cpu != smp_processor_id ());
2215
2254
rnp = rdp -> mynode ;
2216
2255
raw_spin_lock_irqsave_rcu_node (rnp , flags );
2217
2256
if (rdp -> cpu_no_qs .b .norm || rdp -> gp_seq != rnp -> gp_seq ||
@@ -2228,8 +2267,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
2228
2267
return ;
2229
2268
}
2230
2269
mask = rdp -> grpmask ;
2231
- if (rdp -> cpu == smp_processor_id ())
2232
- rdp -> core_needs_qs = false;
2270
+ rdp -> core_needs_qs = false;
2233
2271
if ((rnp -> qsmask & mask ) == 0 ) {
2234
2272
raw_spin_unlock_irqrestore_rcu_node (rnp , flags );
2235
2273
} else {
@@ -2278,7 +2316,7 @@ rcu_check_quiescent_state(struct rcu_data *rdp)
2278
2316
* Tell RCU we are done (but rcu_report_qs_rdp() will be the
2279
2317
* judge of that).
2280
2318
*/
2281
- rcu_report_qs_rdp (rdp -> cpu , rdp );
2319
+ rcu_report_qs_rdp (rdp );
2282
2320
}
2283
2321
2284
2322
/*
@@ -2621,6 +2659,14 @@ void rcu_force_quiescent_state(void)
2621
2659
}
2622
2660
EXPORT_SYMBOL_GPL (rcu_force_quiescent_state );
2623
2661
2662
+ // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2663
+ // grace periods.
2664
+ static void strict_work_handler (struct work_struct * work )
2665
+ {
2666
+ rcu_read_lock ();
2667
+ rcu_read_unlock ();
2668
+ }
2669
+
2624
2670
/* Perform RCU core processing work for the current CPU. */
2625
2671
static __latent_entropy void rcu_core (void )
2626
2672
{
@@ -2665,6 +2711,10 @@ static __latent_entropy void rcu_core(void)
2665
2711
/* Do any needed deferred wakeups of rcuo kthreads. */
2666
2712
do_nocb_deferred_wakeup (rdp );
2667
2713
trace_rcu_utilization (TPS ("End RCU core" ));
2714
+
2715
+ // If strict GPs, schedule an RCU reader in a clean environment.
2716
+ if (IS_ENABLED (CONFIG_RCU_STRICT_GRACE_PERIOD ))
2717
+ queue_work_on (rdp -> cpu , rcu_gp_wq , & rdp -> strict_work );
2668
2718
}
2669
2719
2670
2720
static void rcu_core_si (struct softirq_action * h )
@@ -3862,6 +3912,7 @@ rcu_boot_init_percpu_data(int cpu)
3862
3912
3863
3913
/* Set up local state, ensuring consistent view of global state. */
3864
3914
rdp -> grpmask = leaf_node_cpu_bit (rdp -> mynode , cpu );
3915
+ INIT_WORK (& rdp -> strict_work , strict_work_handler );
3865
3916
WARN_ON_ONCE (rdp -> dynticks_nesting != 1 );
3866
3917
WARN_ON_ONCE (rcu_dynticks_in_eqs (rcu_dynticks_snap (rdp )));
3867
3918
rdp -> rcu_ofl_gp_seq = rcu_state .gp_seq ;
0 commit comments