@@ -1663,23 +1663,34 @@ struct rcu_fwd_cb {
1663
1663
struct rcu_fwd_cb * rfc_next ;
1664
1664
int rfc_gps ;
1665
1665
};
1666
- static DEFINE_SPINLOCK (rcu_fwd_lock );
1667
- static struct rcu_fwd_cb * rcu_fwd_cb_head ;
1668
- static struct rcu_fwd_cb * * rcu_fwd_cb_tail = & rcu_fwd_cb_head ;
1669
- static long n_launders_cb ;
1670
- static unsigned long rcu_fwd_startat ;
1671
- static bool rcu_fwd_emergency_stop ;
1666
+
1672
1667
#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1673
1668
#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1674
1669
#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1675
1670
#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
1671
+ #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1672
+
1676
1673
struct rcu_launder_hist {
1677
1674
long n_launders ;
1678
1675
unsigned long launder_gp_seq ;
1679
1676
};
1680
- #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1681
- static struct rcu_launder_hist n_launders_hist [N_LAUNDERS_HIST ];
1682
- static unsigned long rcu_launder_gp_seq_start ;
1677
+
1678
+ struct rcu_fwd {
1679
+ spinlock_t rcu_fwd_lock ;
1680
+ struct rcu_fwd_cb * rcu_fwd_cb_head ;
1681
+ struct rcu_fwd_cb * * rcu_fwd_cb_tail ;
1682
+ long n_launders_cb ;
1683
+ unsigned long rcu_fwd_startat ;
1684
+ struct rcu_launder_hist n_launders_hist [N_LAUNDERS_HIST ];
1685
+ unsigned long rcu_launder_gp_seq_start ;
1686
+ };
1687
+
1688
+ struct rcu_fwd rcu_fwds = {
1689
+ .rcu_fwd_lock = __SPIN_LOCK_UNLOCKED (rcu_fwds .rcu_fwd_lock ),
1690
+ .rcu_fwd_cb_tail = & rcu_fwds .rcu_fwd_cb_head ,
1691
+ };
1692
+
1693
+ bool rcu_fwd_emergency_stop ;
1683
1694
1684
1695
static void rcu_torture_fwd_cb_hist (void )
1685
1696
{
@@ -1688,16 +1699,17 @@ static void rcu_torture_fwd_cb_hist(void)
1688
1699
int i ;
1689
1700
int j ;
1690
1701
1691
- for (i = ARRAY_SIZE (n_launders_hist ) - 1 ; i > 0 ; i -- )
1692
- if (n_launders_hist [i ].n_launders > 0 )
1702
+ for (i = ARRAY_SIZE (rcu_fwds . n_launders_hist ) - 1 ; i > 0 ; i -- )
1703
+ if (rcu_fwds . n_launders_hist [i ].n_launders > 0 )
1693
1704
break ;
1694
1705
pr_alert ("%s: Callback-invocation histogram (duration %lu jiffies):" ,
1695
- __func__ , jiffies - rcu_fwd_startat );
1696
- gps_old = rcu_launder_gp_seq_start ;
1706
+ __func__ , jiffies - rcu_fwds . rcu_fwd_startat );
1707
+ gps_old = rcu_fwds . rcu_launder_gp_seq_start ;
1697
1708
for (j = 0 ; j <= i ; j ++ ) {
1698
- gps = n_launders_hist [j ].launder_gp_seq ;
1709
+ gps = rcu_fwds . n_launders_hist [j ].launder_gp_seq ;
1699
1710
pr_cont (" %ds/%d: %ld:%ld" ,
1700
- j + 1 , FWD_CBS_HIST_DIV , n_launders_hist [j ].n_launders ,
1711
+ j + 1 , FWD_CBS_HIST_DIV ,
1712
+ rcu_fwds .n_launders_hist [j ].n_launders ,
1701
1713
rcutorture_seq_diff (gps , gps_old ));
1702
1714
gps_old = gps ;
1703
1715
}
@@ -1714,17 +1726,17 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1714
1726
1715
1727
rfcp -> rfc_next = NULL ;
1716
1728
rfcp -> rfc_gps ++ ;
1717
- spin_lock_irqsave (& rcu_fwd_lock , flags );
1718
- rfcpp = rcu_fwd_cb_tail ;
1719
- rcu_fwd_cb_tail = & rfcp -> rfc_next ;
1729
+ spin_lock_irqsave (& rcu_fwds . rcu_fwd_lock , flags );
1730
+ rfcpp = rcu_fwds . rcu_fwd_cb_tail ;
1731
+ rcu_fwds . rcu_fwd_cb_tail = & rfcp -> rfc_next ;
1720
1732
WRITE_ONCE (* rfcpp , rfcp );
1721
- WRITE_ONCE (n_launders_cb , n_launders_cb + 1 );
1722
- i = ((jiffies - rcu_fwd_startat ) / (HZ / FWD_CBS_HIST_DIV ));
1723
- if (i >= ARRAY_SIZE (n_launders_hist ))
1724
- i = ARRAY_SIZE (n_launders_hist ) - 1 ;
1725
- n_launders_hist [i ].n_launders ++ ;
1726
- n_launders_hist [i ].launder_gp_seq = cur_ops -> get_gp_seq ();
1727
- spin_unlock_irqrestore (& rcu_fwd_lock , flags );
1733
+ WRITE_ONCE (rcu_fwds . n_launders_cb , rcu_fwds . n_launders_cb + 1 );
1734
+ i = ((jiffies - rcu_fwds . rcu_fwd_startat ) / (HZ / FWD_CBS_HIST_DIV ));
1735
+ if (i >= ARRAY_SIZE (rcu_fwds . n_launders_hist ))
1736
+ i = ARRAY_SIZE (rcu_fwds . n_launders_hist ) - 1 ;
1737
+ rcu_fwds . n_launders_hist [i ].n_launders ++ ;
1738
+ rcu_fwds . n_launders_hist [i ].launder_gp_seq = cur_ops -> get_gp_seq ();
1739
+ spin_unlock_irqrestore (& rcu_fwds . rcu_fwd_lock , flags );
1728
1740
}
1729
1741
1730
1742
// Give the scheduler a chance, even on nohz_full CPUs.
@@ -1751,16 +1763,16 @@ static unsigned long rcu_torture_fwd_prog_cbfree(void)
1751
1763
struct rcu_fwd_cb * rfcp ;
1752
1764
1753
1765
for (;;) {
1754
- spin_lock_irqsave (& rcu_fwd_lock , flags );
1755
- rfcp = rcu_fwd_cb_head ;
1766
+ spin_lock_irqsave (& rcu_fwds . rcu_fwd_lock , flags );
1767
+ rfcp = rcu_fwds . rcu_fwd_cb_head ;
1756
1768
if (!rfcp ) {
1757
- spin_unlock_irqrestore (& rcu_fwd_lock , flags );
1769
+ spin_unlock_irqrestore (& rcu_fwds . rcu_fwd_lock , flags );
1758
1770
break ;
1759
1771
}
1760
- rcu_fwd_cb_head = rfcp -> rfc_next ;
1761
- if (!rcu_fwd_cb_head )
1762
- rcu_fwd_cb_tail = & rcu_fwd_cb_head ;
1763
- spin_unlock_irqrestore (& rcu_fwd_lock , flags );
1772
+ rcu_fwds . rcu_fwd_cb_head = rfcp -> rfc_next ;
1773
+ if (!rcu_fwds . rcu_fwd_cb_head )
1774
+ rcu_fwds . rcu_fwd_cb_tail = & rcu_fwds . rcu_fwd_cb_head ;
1775
+ spin_unlock_irqrestore (& rcu_fwds . rcu_fwd_lock , flags );
1764
1776
kfree (rfcp );
1765
1777
freed ++ ;
1766
1778
rcu_torture_fwd_prog_cond_resched (freed );
@@ -1804,8 +1816,8 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
1804
1816
sd = cur_ops -> stall_dur () + 1 ;
1805
1817
sd4 = (sd + fwd_progress_div - 1 ) / fwd_progress_div ;
1806
1818
dur = sd4 + torture_random (& trs ) % (sd - sd4 );
1807
- WRITE_ONCE (rcu_fwd_startat , jiffies );
1808
- stopat = rcu_fwd_startat + dur ;
1819
+ WRITE_ONCE (rcu_fwds . rcu_fwd_startat , jiffies );
1820
+ stopat = rcu_fwds . rcu_fwd_startat + dur ;
1809
1821
while (time_before (jiffies , stopat ) &&
1810
1822
!shutdown_time_arrived () &&
1811
1823
!READ_ONCE (rcu_fwd_emergency_stop ) && !torture_must_stop ()) {
@@ -1864,31 +1876,31 @@ static void rcu_torture_fwd_prog_cr(void)
1864
1876
/* Loop continuously posting RCU callbacks. */
1865
1877
WRITE_ONCE (rcu_fwd_cb_nodelay , true);
1866
1878
cur_ops -> sync (); /* Later readers see above write. */
1867
- WRITE_ONCE (rcu_fwd_startat , jiffies );
1868
- stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES ;
1879
+ WRITE_ONCE (rcu_fwds . rcu_fwd_startat , jiffies );
1880
+ stopat = rcu_fwds . rcu_fwd_startat + MAX_FWD_CB_JIFFIES ;
1869
1881
n_launders = 0 ;
1870
- n_launders_cb = 0 ;
1882
+ rcu_fwds . n_launders_cb = 0 ; // Hoist initialization for multi-kthread
1871
1883
n_launders_sa = 0 ;
1872
1884
n_max_cbs = 0 ;
1873
1885
n_max_gps = 0 ;
1874
- for (i = 0 ; i < ARRAY_SIZE (n_launders_hist ); i ++ )
1875
- n_launders_hist [i ].n_launders = 0 ;
1886
+ for (i = 0 ; i < ARRAY_SIZE (rcu_fwds . n_launders_hist ); i ++ )
1887
+ rcu_fwds . n_launders_hist [i ].n_launders = 0 ;
1876
1888
cver = READ_ONCE (rcu_torture_current_version );
1877
1889
gps = cur_ops -> get_gp_seq ();
1878
- rcu_launder_gp_seq_start = gps ;
1890
+ rcu_fwds . rcu_launder_gp_seq_start = gps ;
1879
1891
tick_dep_set_task (current , TICK_DEP_BIT_RCU );
1880
1892
while (time_before (jiffies , stopat ) &&
1881
1893
!shutdown_time_arrived () &&
1882
1894
!READ_ONCE (rcu_fwd_emergency_stop ) && !torture_must_stop ()) {
1883
- rfcp = READ_ONCE (rcu_fwd_cb_head );
1895
+ rfcp = READ_ONCE (rcu_fwds . rcu_fwd_cb_head );
1884
1896
rfcpn = NULL ;
1885
1897
if (rfcp )
1886
1898
rfcpn = READ_ONCE (rfcp -> rfc_next );
1887
1899
if (rfcpn ) {
1888
1900
if (rfcp -> rfc_gps >= MIN_FWD_CB_LAUNDERS &&
1889
1901
++ n_max_gps >= MIN_FWD_CBS_LAUNDERED )
1890
1902
break ;
1891
- rcu_fwd_cb_head = rfcpn ;
1903
+ rcu_fwds . rcu_fwd_cb_head = rfcpn ;
1892
1904
n_launders ++ ;
1893
1905
n_launders_sa ++ ;
1894
1906
} else {
@@ -1910,7 +1922,7 @@ static void rcu_torture_fwd_prog_cr(void)
1910
1922
}
1911
1923
}
1912
1924
stoppedat = jiffies ;
1913
- n_launders_cb_snap = READ_ONCE (n_launders_cb );
1925
+ n_launders_cb_snap = READ_ONCE (rcu_fwds . n_launders_cb );
1914
1926
cver = READ_ONCE (rcu_torture_current_version ) - cver ;
1915
1927
gps = rcutorture_seq_diff (cur_ops -> get_gp_seq (), gps );
1916
1928
cur_ops -> cb_barrier (); /* Wait for callbacks to be invoked. */
@@ -1921,7 +1933,8 @@ static void rcu_torture_fwd_prog_cr(void)
1921
1933
WARN_ON (n_max_gps < MIN_FWD_CBS_LAUNDERED );
1922
1934
pr_alert ("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n" ,
1923
1935
__func__ ,
1924
- stoppedat - rcu_fwd_startat , jiffies - stoppedat ,
1936
+ stoppedat - rcu_fwds .rcu_fwd_startat ,
1937
+ jiffies - stoppedat ,
1925
1938
n_launders + n_max_cbs - n_launders_cb_snap ,
1926
1939
n_launders , n_launders_sa ,
1927
1940
n_max_gps , n_max_cbs , cver , gps );
@@ -1943,7 +1956,7 @@ static int rcutorture_oom_notify(struct notifier_block *self,
1943
1956
WARN (1 , "%s invoked upon OOM during forward-progress testing.\n" ,
1944
1957
__func__ );
1945
1958
rcu_torture_fwd_cb_hist ();
1946
- rcu_fwd_progress_check (1 + (jiffies - READ_ONCE (rcu_fwd_startat )) / 2 );
1959
+ rcu_fwd_progress_check (1 + (jiffies - READ_ONCE (rcu_fwds . rcu_fwd_startat )) / 2 );
1947
1960
WRITE_ONCE (rcu_fwd_emergency_stop , true);
1948
1961
smp_mb (); /* Emergency stop before free and wait to avoid hangs. */
1949
1962
pr_info ("%s: Freed %lu RCU callbacks.\n" ,
0 commit comments