8
8
*
9
9
* Key points :
10
10
*
11
- * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
11
+ * - Use a seqcount on 32-bit
12
12
* - The whole thing is a no-op on 64-bit architectures.
13
13
*
14
14
* Usage constraints:
20
20
* writer and also spin forever.
21
21
*
22
22
* 3) Write side must use the _irqsave() variant if other writers, or a reader,
23
- * can be invoked from an IRQ context.
23
+ * can be invoked from an IRQ context. On 64bit systems this variant does not
24
+ * disable interrupts.
24
25
*
25
26
* 4) If reader fetches several counters, there is no guarantee the whole values
26
27
* are consistent w.r.t. each other (remember point #2: seqcounts are not
29
30
* 5) Readers are allowed to sleep or be preempted/interrupted: they perform
30
31
* pure reads.
31
32
*
32
- * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
33
- * might be updated from a hardirq or softirq context (remember point #1:
34
- * seqcounts are not used for UP kernels). 32-bit UP stat readers could read
35
- * corrupted 64-bit values otherwise.
36
- *
37
33
* Usage :
38
34
*
39
35
* Stats producer (writer) should use following template granted it already got
66
62
#include <linux/seqlock.h>
67
63
68
64
struct u64_stats_sync {
69
- #if BITS_PER_LONG == 32 && (defined( CONFIG_SMP ) || defined( CONFIG_PREEMPT_RT ))
65
+ #if BITS_PER_LONG == 32
70
66
seqcount_t seq ;
71
67
#endif
72
68
};
@@ -98,7 +94,22 @@ static inline void u64_stats_inc(u64_stats_t *p)
98
94
local64_inc (& p -> v );
99
95
}
100
96
101
- #else
97
+ static inline void u64_stats_init (struct u64_stats_sync * syncp ) { }
98
+ static inline void __u64_stats_update_begin (struct u64_stats_sync * syncp ) { }
99
+ static inline void __u64_stats_update_end (struct u64_stats_sync * syncp ) { }
100
+ static inline unsigned long __u64_stats_irqsave (void ) { return 0 ; }
101
+ static inline void __u64_stats_irqrestore (unsigned long flags ) { }
102
+ static inline unsigned int __u64_stats_fetch_begin (const struct u64_stats_sync * syncp )
103
+ {
104
+ return 0 ;
105
+ }
106
+ static inline bool __u64_stats_fetch_retry (const struct u64_stats_sync * syncp ,
107
+ unsigned int start )
108
+ {
109
+ return false;
110
+ }
111
+
112
+ #else /* 64 bit */
102
113
103
114
typedef struct {
104
115
u64 v ;
@@ -123,123 +134,95 @@ static inline void u64_stats_inc(u64_stats_t *p)
123
134
{
124
135
p -> v ++ ;
125
136
}
126
- #endif
127
137
128
- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
129
- #define u64_stats_init (syncp ) seqcount_init(&(syncp)->seq)
130
- #else
131
138
static inline void u64_stats_init (struct u64_stats_sync * syncp )
132
139
{
140
+ seqcount_init (& syncp -> seq );
133
141
}
134
- #endif
135
142
136
- static inline void u64_stats_update_begin (struct u64_stats_sync * syncp )
143
+ static inline void __u64_stats_update_begin (struct u64_stats_sync * syncp )
137
144
{
138
- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
139
- if (IS_ENABLED (CONFIG_PREEMPT_RT ))
140
- preempt_disable ();
145
+ preempt_disable_nested ();
141
146
write_seqcount_begin (& syncp -> seq );
142
- #endif
143
147
}
144
148
145
- static inline void u64_stats_update_end (struct u64_stats_sync * syncp )
149
+ static inline void __u64_stats_update_end (struct u64_stats_sync * syncp )
146
150
{
147
- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
148
151
write_seqcount_end (& syncp -> seq );
149
- if (IS_ENABLED (CONFIG_PREEMPT_RT ))
150
- preempt_enable ();
151
- #endif
152
+ preempt_enable_nested ();
152
153
}
153
154
154
- static inline unsigned long
155
- u64_stats_update_begin_irqsave (struct u64_stats_sync * syncp )
155
+ static inline unsigned long __u64_stats_irqsave (void )
156
156
{
157
- unsigned long flags = 0 ;
157
+ unsigned long flags ;
158
158
159
- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
160
- if (IS_ENABLED (CONFIG_PREEMPT_RT ))
161
- preempt_disable ();
162
- else
163
- local_irq_save (flags );
164
- write_seqcount_begin (& syncp -> seq );
165
- #endif
159
+ local_irq_save (flags );
166
160
return flags ;
167
161
}
168
162
169
- static inline void
170
- u64_stats_update_end_irqrestore (struct u64_stats_sync * syncp ,
171
- unsigned long flags )
163
+ static inline void __u64_stats_irqrestore (unsigned long flags )
172
164
{
173
- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
174
- write_seqcount_end (& syncp -> seq );
175
- if (IS_ENABLED (CONFIG_PREEMPT_RT ))
176
- preempt_enable ();
177
- else
178
- local_irq_restore (flags );
179
- #endif
165
+ local_irq_restore (flags );
180
166
}
181
167
182
168
static inline unsigned int __u64_stats_fetch_begin (const struct u64_stats_sync * syncp )
183
169
{
184
- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
185
170
return read_seqcount_begin (& syncp -> seq );
186
- #else
187
- return 0 ;
188
- #endif
189
171
}
190
172
191
- static inline unsigned int u64_stats_fetch_begin (const struct u64_stats_sync * syncp )
173
+ static inline bool __u64_stats_fetch_retry (const struct u64_stats_sync * syncp ,
174
+ unsigned int start )
192
175
{
193
- #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP ) && !defined(CONFIG_PREEMPT_RT ))
194
- preempt_disable ();
195
- #endif
196
- return __u64_stats_fetch_begin (syncp );
176
+ return read_seqcount_retry (& syncp -> seq , start );
197
177
}
178
+ #endif /* !64 bit */
198
179
199
- static inline bool __u64_stats_fetch_retry (const struct u64_stats_sync * syncp ,
200
- unsigned int start )
180
+ static inline void u64_stats_update_begin (struct u64_stats_sync * syncp )
201
181
{
202
- #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP ) || defined(CONFIG_PREEMPT_RT ))
203
- return read_seqcount_retry (& syncp -> seq , start );
204
- #else
205
- return false;
206
- #endif
182
+ __u64_stats_update_begin (syncp );
183
+ }
184
+
185
+ static inline void u64_stats_update_end (struct u64_stats_sync * syncp )
186
+ {
187
+ __u64_stats_update_end (syncp );
188
+ }
189
+
190
+ static inline unsigned long u64_stats_update_begin_irqsave (struct u64_stats_sync * syncp )
191
+ {
192
+ unsigned long flags = __u64_stats_irqsave ();
193
+
194
+ __u64_stats_update_begin (syncp );
195
+ return flags ;
196
+ }
197
+
198
+ static inline void u64_stats_update_end_irqrestore (struct u64_stats_sync * syncp ,
199
+ unsigned long flags )
200
+ {
201
+ __u64_stats_update_end (syncp );
202
+ __u64_stats_irqrestore (flags );
203
+ }
204
+
205
+ static inline unsigned int u64_stats_fetch_begin (const struct u64_stats_sync * syncp )
206
+ {
207
+ return __u64_stats_fetch_begin (syncp );
207
208
}
208
209
209
210
static inline bool u64_stats_fetch_retry (const struct u64_stats_sync * syncp ,
210
211
unsigned int start )
211
212
{
212
- #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP ) && !defined(CONFIG_PREEMPT_RT ))
213
- preempt_enable ();
214
- #endif
215
213
return __u64_stats_fetch_retry (syncp , start );
216
214
}
217
215
218
- /*
219
- * In case irq handlers can update u64 counters, readers can use following helpers
220
- * - SMP 32bit arches use seqcount protection, irq safe.
221
- * - UP 32bit must disable irqs.
222
- * - 64bit have no problem atomically reading u64 values, irq safe.
223
- */
216
+ /* Obsolete interfaces */
224
217
static inline unsigned int u64_stats_fetch_begin_irq (const struct u64_stats_sync * syncp )
225
218
{
226
- #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT )
227
- preempt_disable ();
228
- #elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP )
229
- local_irq_disable ();
230
- #endif
231
- return __u64_stats_fetch_begin (syncp );
219
+ return u64_stats_fetch_begin (syncp );
232
220
}
233
221
234
222
static inline bool u64_stats_fetch_retry_irq (const struct u64_stats_sync * syncp ,
235
223
unsigned int start )
236
224
{
237
- #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT )
238
- preempt_enable ();
239
- #elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP )
240
- local_irq_enable ();
241
- #endif
242
- return __u64_stats_fetch_retry (syncp , start );
225
+ return u64_stats_fetch_retry (syncp , start );
243
226
}
244
227
245
228
#endif /* _LINUX_U64_STATS_SYNC_H */
0 commit comments