88 *
99 * Key points :
1010 *
11- * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
11+ * - Use a seqcount on 32-bit
1212 * - The whole thing is a no-op on 64-bit architectures.
1313 *
1414 * Usage constraints:
2020 * writer and also spin forever.
2121 *
2222 * 3) Write side must use the _irqsave() variant if other writers, or a reader,
23- * can be invoked from an IRQ context.
23+ * can be invoked from an IRQ context. On 64bit systems this variant does not
24+ * disable interrupts.
2425 *
2526 * 4) If reader fetches several counters, there is no guarantee the whole values
2627 * are consistent w.r.t. each other (remember point #2: seqcounts are not
2930 * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
3031 * pure reads.
3132 *
32- * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
33- * might be updated from a hardirq or softirq context (remember point #1:
34- * seqcounts are not used for UP kernels). 32-bit UP stat readers could read
35- * corrupted 64-bit values otherwise.
36- *
3733 * Usage :
3834 *
3935 * Stats producer (writer) should use following template granted it already got
6662#include <linux/seqlock.h>
6763
6864struct u64_stats_sync {
69- #if BITS_PER_LONG == 32 && (defined( CONFIG_SMP ) || defined( CONFIG_PREEMPT_RT ))
65+ #if BITS_PER_LONG == 32
7066 seqcount_t seq ;
7167#endif
7268};
@@ -98,7 +94,22 @@ static inline void u64_stats_inc(u64_stats_t *p)
9894 local64_inc (& p -> v );
9995}
10096
101- #else
97+ static inline void u64_stats_init (struct u64_stats_sync * syncp ) { }
98+ static inline void __u64_stats_update_begin (struct u64_stats_sync * syncp ) { }
99+ static inline void __u64_stats_update_end (struct u64_stats_sync * syncp ) { }
100+ static inline unsigned long __u64_stats_irqsave (void ) { return 0 ; }
101+ static inline void __u64_stats_irqrestore (unsigned long flags ) { }
102+ static inline unsigned int __u64_stats_fetch_begin (const struct u64_stats_sync * syncp )
103+ {
104+ return 0 ;
105+ }
106+ static inline bool __u64_stats_fetch_retry (const struct u64_stats_sync * syncp ,
107+ unsigned int start )
108+ {
109+ return false;
110+ }
111+
112+ #else /* 64 bit */
102113
103114typedef struct {
104115 u64 v ;
@@ -123,123 +134,95 @@ static inline void u64_stats_inc(u64_stats_t *p)
123134{
124135 p -> v ++ ;
125136}
126- #endif
127137
128- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
129- #define u64_stats_init (syncp ) seqcount_init(&(syncp)->seq)
130- #else
131138static inline void u64_stats_init (struct u64_stats_sync * syncp )
132139{
140+ seqcount_init (& syncp -> seq );
133141}
134- #endif
135142
136- static inline void u64_stats_update_begin (struct u64_stats_sync * syncp )
143+ static inline void __u64_stats_update_begin (struct u64_stats_sync * syncp )
137144{
138- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
139- if (IS_ENABLED (CONFIG_PREEMPT_RT ))
140- preempt_disable ();
145+ preempt_disable_nested ();
141146 write_seqcount_begin (& syncp -> seq );
142- #endif
143147}
144148
145- static inline void u64_stats_update_end (struct u64_stats_sync * syncp )
149+ static inline void __u64_stats_update_end (struct u64_stats_sync * syncp )
146150{
147- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
148151 write_seqcount_end (& syncp -> seq );
149- if (IS_ENABLED (CONFIG_PREEMPT_RT ))
150- preempt_enable ();
151- #endif
152+ preempt_enable_nested ();
152153}
153154
154- static inline unsigned long
155- u64_stats_update_begin_irqsave (struct u64_stats_sync * syncp )
155+ static inline unsigned long __u64_stats_irqsave (void )
156156{
157- unsigned long flags = 0 ;
157+ unsigned long flags ;
158158
159- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
160- if (IS_ENABLED (CONFIG_PREEMPT_RT ))
161- preempt_disable ();
162- else
163- local_irq_save (flags );
164- write_seqcount_begin (& syncp -> seq );
165- #endif
159+ local_irq_save (flags );
166160 return flags ;
167161}
168162
169- static inline void
170- u64_stats_update_end_irqrestore (struct u64_stats_sync * syncp ,
171- unsigned long flags )
163+ static inline void __u64_stats_irqrestore (unsigned long flags )
172164{
173- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
174- write_seqcount_end (& syncp -> seq );
175- if (IS_ENABLED (CONFIG_PREEMPT_RT ))
176- preempt_enable ();
177- else
178- local_irq_restore (flags );
179- #endif
165+ local_irq_restore (flags );
180166}
181167
182168static inline unsigned int __u64_stats_fetch_begin (const struct u64_stats_sync * syncp )
183169{
184- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
185170 return read_seqcount_begin (& syncp -> seq );
186- #else
187- return 0 ;
188- #endif
189171}
190172
191- static inline unsigned int u64_stats_fetch_begin (const struct u64_stats_sync * syncp )
173+ static inline bool __u64_stats_fetch_retry (const struct u64_stats_sync * syncp ,
174+ unsigned int start )
192175{
193- #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP ) && !defined(CONFIG_PREEMPT_RT ))
194- preempt_disable ();
195- #endif
196- return __u64_stats_fetch_begin (syncp );
176+ return read_seqcount_retry (& syncp -> seq , start );
197177}
178+ #endif /* !64 bit */
198179
199- static inline bool __u64_stats_fetch_retry (const struct u64_stats_sync * syncp ,
200- unsigned int start )
180+ static inline void u64_stats_update_begin (struct u64_stats_sync * syncp )
201181{
202- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
203- return read_seqcount_retry (& syncp -> seq , start );
204- #else
205- return false;
206- #endif
182+ __u64_stats_update_begin (syncp );
183+ }
184+
185+ static inline void u64_stats_update_end (struct u64_stats_sync * syncp )
186+ {
187+ __u64_stats_update_end (syncp );
188+ }
189+
190+ static inline unsigned long u64_stats_update_begin_irqsave (struct u64_stats_sync * syncp )
191+ {
192+ unsigned long flags = __u64_stats_irqsave ();
193+
194+ __u64_stats_update_begin (syncp );
195+ return flags ;
196+ }
197+
198+ static inline void u64_stats_update_end_irqrestore (struct u64_stats_sync * syncp ,
199+ unsigned long flags )
200+ {
201+ __u64_stats_update_end (syncp );
202+ __u64_stats_irqrestore (flags );
203+ }
204+
205+ static inline unsigned int u64_stats_fetch_begin (const struct u64_stats_sync * syncp )
206+ {
207+ return __u64_stats_fetch_begin (syncp );
207208}
208209
209210static inline bool u64_stats_fetch_retry (const struct u64_stats_sync * syncp ,
210211 unsigned int start )
211212{
212- #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP ) && !defined(CONFIG_PREEMPT_RT ))
213- preempt_enable ();
214- #endif
215213 return __u64_stats_fetch_retry (syncp , start );
216214}
217215
218- /*
219- * In case irq handlers can update u64 counters, readers can use following helpers
220- * - SMP 32bit arches use seqcount protection, irq safe.
221- * - UP 32bit must disable irqs.
222- * - 64bit have no problem atomically reading u64 values, irq safe.
223- */
216+ /* Obsolete interfaces */
224217static inline unsigned int u64_stats_fetch_begin_irq (const struct u64_stats_sync * syncp )
225218{
226- #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT )
227- preempt_disable ();
228- #elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP )
229- local_irq_disable ();
230- #endif
231- return __u64_stats_fetch_begin (syncp );
219+ return u64_stats_fetch_begin (syncp );
232220}
233221
234222static inline bool u64_stats_fetch_retry_irq (const struct u64_stats_sync * syncp ,
235223 unsigned int start )
236224{
237- #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT )
238- preempt_enable ();
239- #elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP )
240- local_irq_enable ();
241- #endif
242- return __u64_stats_fetch_retry (syncp , start );
225+ return u64_stats_fetch_retry (syncp , start );
243226}
244227
245228#endif /* _LINUX_U64_STATS_SYNC_H */
0 commit comments