144
144
#define __pcpu_reg_imm_4 (x ) "ri" (x)
145
145
#define __pcpu_reg_imm_8 (x ) "re" (x)
146
146
147
+ #ifdef CONFIG_USE_X86_SEG_SUPPORT
148
+
149
+ #define __raw_cpu_read (qual , pcp ) \
150
+ ({ \
151
+ *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)); \
152
+ })
153
+
154
+ #define __raw_cpu_write (qual , pcp , val ) \
155
+ do { \
156
+ *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val); \
157
+ } while (0)
158
+
159
+ #else /* CONFIG_USE_X86_SEG_SUPPORT */
160
+
161
+ #define percpu_from_op (size , qual , op , _var ) \
162
+ ({ \
163
+ __pcpu_type_##size pfo_val__; \
164
+ asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \
165
+ : [val] __pcpu_reg_##size("=", pfo_val__) \
166
+ : [var] "m" (__my_cpu_var(_var))); \
167
+ (typeof(_var))(unsigned long) pfo_val__; \
168
+ })
169
+
147
170
#define percpu_to_op (size , qual , op , _var , _val ) \
148
171
do { \
149
172
__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
@@ -157,6 +180,17 @@ do { \
157
180
: [val] __pcpu_reg_imm_##size(pto_val__)); \
158
181
} while (0)
159
182
183
+ #endif /* CONFIG_USE_X86_SEG_SUPPORT */
184
+
185
+ #define percpu_stable_op (size , op , _var ) \
186
+ ({ \
187
+ __pcpu_type_##size pfo_val__; \
188
+ asm(__pcpu_op2_##size(op, __force_percpu_arg(a[var]), "%[val]") \
189
+ : [val] __pcpu_reg_##size("=", pfo_val__) \
190
+ : [var] "i" (&(_var))); \
191
+ (typeof(_var))(unsigned long) pfo_val__; \
192
+ })
193
+
160
194
#define percpu_unary_op (size , qual , op , _var ) \
161
195
({ \
162
196
asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \
@@ -198,24 +232,6 @@ do { \
198
232
percpu_binary_op(size, qual, "add", var, val); \
199
233
} while (0)
200
234
201
- #define percpu_from_op (size , qual , op , _var ) \
202
- ({ \
203
- __pcpu_type_##size pfo_val__; \
204
- asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \
205
- : [val] __pcpu_reg_##size("=", pfo_val__) \
206
- : [var] "m" (__my_cpu_var(_var))); \
207
- (typeof(_var))(unsigned long) pfo_val__; \
208
- })
209
-
210
- #define percpu_stable_op (size , op , _var ) \
211
- ({ \
212
- __pcpu_type_##size pfo_val__; \
213
- asm(__pcpu_op2_##size(op, __force_percpu_arg(a[var]), "%[val]") \
214
- : [val] __pcpu_reg_##size("=", pfo_val__) \
215
- : [var] "i" (&(_var))); \
216
- (typeof(_var))(unsigned long) pfo_val__; \
217
- })
218
-
219
235
/*
220
236
* Add return operation
221
237
*/
@@ -433,17 +449,6 @@ do { \
433
449
#define this_cpu_read_stable (pcp ) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
434
450
435
451
#ifdef CONFIG_USE_X86_SEG_SUPPORT
436
-
437
- #define __raw_cpu_read (qual , pcp ) \
438
- ({ \
439
- *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)); \
440
- })
441
-
442
- #define __raw_cpu_write (qual , pcp , val ) \
443
- do { \
444
- *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val); \
445
- } while (0)
446
-
447
452
#define raw_cpu_read_1 (pcp ) __raw_cpu_read(, pcp)
448
453
#define raw_cpu_read_2 (pcp ) __raw_cpu_read(, pcp)
449
454
#define raw_cpu_read_4 (pcp ) __raw_cpu_read(, pcp)
0 commit comments