Blame view

include/linux/percpu-defs.h 17.7 KB
62fde5412   Tejun Heo   percpu: include/a...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
  /*
   * linux/percpu-defs.h - basic definitions for percpu areas
   *
   * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
   *
   * This file is separate from linux/percpu.h to avoid cyclic inclusion
   * dependency from arch header files.  Only to be included from
   * asm/percpu.h.
   *
   * This file includes macros necessary to declare percpu sections and
   * variables, and definitions of percpu accessors and operations.  It
   * should provide enough percpu features to arch header files even when
   * they can only include asm/percpu.h to avoid cyclic inclusion dependency.
   */
5028eaa97   David Howells   PERCPU: Collect t...
15
16
  #ifndef _LINUX_PERCPU_DEFS_H
  #define _LINUX_PERCPU_DEFS_H
62fde5412   Tejun Heo   percpu: include/a...
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
  #ifdef CONFIG_SMP
  
  #ifdef MODULE
  #define PER_CPU_SHARED_ALIGNED_SECTION ""
  #define PER_CPU_ALIGNED_SECTION ""
  #else
  #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
  #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
  #endif
  #define PER_CPU_FIRST_SECTION "..first"
  
  #else
  
  #define PER_CPU_SHARED_ALIGNED_SECTION ""
  #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
  #define PER_CPU_FIRST_SECTION ""
  
  #endif
5028eaa97   David Howells   PERCPU: Collect t...
35
  /*
5028eaa97   David Howells   PERCPU: Collect t...
36
37
   * Base implementations of per-CPU variable declarations and definitions, where
   * the section in which the variable is to be placed is provided by the
7c756e6e1   Tejun Heo   percpu: implement...
38
   * 'sec' argument.  This may be used to affect the parameters governing the
5028eaa97   David Howells   PERCPU: Collect t...
39
40
41
42
43
44
   * variable's storage.
   *
   * NOTE!  The sections for the DECLARE and for the DEFINE must match, lest
   * linkage errors occur due the compiler generating the wrong code to access
   * that section.
   */
7c756e6e1   Tejun Heo   percpu: implement...
45
  #define __PCPU_ATTRS(sec)						\
e0fdb0e05   Rusty Russell   percpu: add __per...
46
  	__percpu __attribute__((section(PER_CPU_BASE_SECTION sec)))	\
7c756e6e1   Tejun Heo   percpu: implement...
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
  	PER_CPU_ATTRIBUTES
  
  #define __PCPU_DUMMY_ATTRS						\
  	__attribute__((section(".discard"), unused))
  
  /*
   * s390 and alpha modules require percpu variables to be defined as
   * weak to force the compiler to generate GOT based external
   * references for them.  This is necessary because percpu sections
   * will be located outside of the usually addressable area.
   *
   * This definition puts the following two extra restrictions when
   * defining percpu variables.
   *
   * 1. The symbol must be globally unique, even the static ones.
   * 2. Static percpu variables cannot be defined inside a function.
   *
   * Archs which need weak percpu definitions should define
   * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
   *
   * To ensure that the generic code observes the above two
   * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
   * definition is used for all cases.
   */
  #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
  /*
   * __pcpu_scope_* dummy variable is used to enforce scope.  It
   * receives the static modifier when it's used in front of
   * DEFINE_PER_CPU() and will trigger build failure if
   * DECLARE_PER_CPU() is used for the same variable.
   *
   * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
   * such that hidden weak symbol collision, which will cause unrelated
   * variables to share the same address, can be detected during build.
   */
  #define DECLARE_PER_CPU_SECTION(type, name, sec)			\
  	extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;		\
dd17c8f72   Rusty Russell   percpu: remove pe...
84
  	extern __PCPU_ATTRS(sec) __typeof__(type) name
7c756e6e1   Tejun Heo   percpu: implement...
85
86
87
  
  #define DEFINE_PER_CPU_SECTION(type, name, sec)				\
  	__PCPU_DUMMY_ATTRS char __pcpu_scope_##name;			\
0f5e4816d   Tejun Heo   percpu: remove so...
88
  	extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;		\
7c756e6e1   Tejun Heo   percpu: implement...
89
  	__PCPU_DUMMY_ATTRS char __pcpu_unique_##name;			\
b1a0fbfdd   Tejun Heo   percpu: fix spuri...
90
  	extern __PCPU_ATTRS(sec) __typeof__(type) name;			\
c43768cbb   Tejun Heo   Merge branch 'mas...
91
  	__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak			\
dd17c8f72   Rusty Russell   percpu: remove pe...
92
  	__typeof__(type) name
7c756e6e1   Tejun Heo   percpu: implement...
93
94
95
96
97
  #else
  /*
   * Normal declaration and definition macros.
   */
  #define DECLARE_PER_CPU_SECTION(type, name, sec)			\
dd17c8f72   Rusty Russell   percpu: remove pe...
98
  	extern __PCPU_ATTRS(sec) __typeof__(type) name
7c756e6e1   Tejun Heo   percpu: implement...
99
100
  
  #define DEFINE_PER_CPU_SECTION(type, name, sec)				\
c43768cbb   Tejun Heo   Merge branch 'mas...
101
  	__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES			\
dd17c8f72   Rusty Russell   percpu: remove pe...
102
  	__typeof__(type) name
7c756e6e1   Tejun Heo   percpu: implement...
103
  #endif
5028eaa97   David Howells   PERCPU: Collect t...
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
  
  /*
   * Variant on the per-CPU variable declaration/definition theme used for
   * ordinary per-CPU variables.
   */
  #define DECLARE_PER_CPU(type, name)					\
  	DECLARE_PER_CPU_SECTION(type, name, "")
  
  #define DEFINE_PER_CPU(type, name)					\
  	DEFINE_PER_CPU_SECTION(type, name, "")
  
  /*
   * Declaration/definition used for per-CPU variables that must come first in
   * the set of variables.
   */
  #define DECLARE_PER_CPU_FIRST(type, name)				\
  	DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
  
  #define DEFINE_PER_CPU_FIRST(type, name)				\
  	DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
  
  /*
   * Declaration/definition used for per-CPU variables that must be cacheline
   * aligned under SMP conditions so that, whilst a particular instance of the
   * data corresponds to a particular CPU, inefficiencies due to direct access by
   * other CPUs are reduced by preventing the data from unnecessarily spanning
   * cachelines.
   *
   * An example of this would be statistical data, where each CPU's set of data
   * is updated by that CPU alone, but the data from across all CPUs is collated
   * by a CPU processing a read from a proc file.
   */
  #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name)			\
  	DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
  	____cacheline_aligned_in_smp
  
  #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)			\
  	DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
  	____cacheline_aligned_in_smp
53f824520   Jeremy Fitzhardinge   x86/i386: Put ali...
143
144
145
146
147
148
149
  #define DECLARE_PER_CPU_ALIGNED(type, name)				\
  	DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)	\
  	____cacheline_aligned
  
  #define DEFINE_PER_CPU_ALIGNED(type, name)				\
  	DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)	\
  	____cacheline_aligned
5028eaa97   David Howells   PERCPU: Collect t...
150
151
152
  /*
   * Declaration/definition used for per-CPU variables that must be page aligned.
   */
3e352aa8e   Tejun Heo   x86, percpu: Fix ...
153
  #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name)			\
3d9a854c2   Denys Vlasenko   Rename .data[.per...
154
  	DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")		\
3e352aa8e   Tejun Heo   x86, percpu: Fix ...
155
  	__aligned(PAGE_SIZE)
5028eaa97   David Howells   PERCPU: Collect t...
156
157
  
  #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)				\
3d9a854c2   Denys Vlasenko   Rename .data[.per...
158
  	DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")		\
3e352aa8e   Tejun Heo   x86, percpu: Fix ...
159
  	__aligned(PAGE_SIZE)
5028eaa97   David Howells   PERCPU: Collect t...
160
161
  
  /*
c957ef2c5   Shaohua Li   percpu: Introduce...
162
163
164
   * Declaration/definition used for per-CPU variables that must be read mostly.
   */
  #define DECLARE_PER_CPU_READ_MOSTLY(type, name)			\
330d28221   Zhengyu He   core: fix typo in...
165
  	DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
c957ef2c5   Shaohua Li   percpu: Introduce...
166
167
  
  #define DEFINE_PER_CPU_READ_MOSTLY(type, name)				\
330d28221   Zhengyu He   core: fix typo in...
168
  	DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
c957ef2c5   Shaohua Li   percpu: Introduce...
169
170
  
  /*
545695fb4   Tejun Heo   percpu: make acce...
171
172
173
   * Intermodule exports for per-CPU variables.  sparse forgets about
   * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
   * noop if __CHECKER__.
5028eaa97   David Howells   PERCPU: Collect t...
174
   */
545695fb4   Tejun Heo   percpu: make acce...
175
  #ifndef __CHECKER__
dd17c8f72   Rusty Russell   percpu: remove pe...
176
177
  #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
  #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
545695fb4   Tejun Heo   percpu: make acce...
178
179
180
181
  #else
  #define EXPORT_PER_CPU_SYMBOL(var)
  #define EXPORT_PER_CPU_SYMBOL_GPL(var)
  #endif
5028eaa97   David Howells   PERCPU: Collect t...
182

62fde5412   Tejun Heo   percpu: include/a...
183
184
185
186
  /*
   * Accessors and operations.
   */
  #ifndef __ASSEMBLY__
9c28278a2   Tejun Heo   percpu: reorder m...
187
  /*
6fbc07bbe   Tejun Heo   percpu: invoke __...
188
189
190
191
192
193
   * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating
   * @ptr and is invoked once before a percpu area is accessed by all
   * accessors and operations.  This is performed in the generic part of
   * percpu and arch overrides don't need to worry about it; however, if an
   * arch wants to implement an arch-specific percpu accessor or operation,
   * it may use __verify_pcpu_ptr() to verify the parameters.
9c28278a2   Tejun Heo   percpu: reorder m...
194
195
196
197
   *
   * + 0 is required in order to convert the pointer type from a
   * potential array type to a pointer to a single item of the array.
   */
eba117889   Tejun Heo   percpu: preffity ...
198
199
  #define __verify_pcpu_ptr(ptr)						\
  do {									\
9c28278a2   Tejun Heo   percpu: reorder m...
200
201
202
  	const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;	\
  	(void)__vpp_verify;						\
  } while (0)
62fde5412   Tejun Heo   percpu: include/a...
203
204
205
206
207
208
209
  #ifdef CONFIG_SMP
  
  /*
   * Add an offset to a pointer but keep the pointer as-is.  Use RELOC_HIDE()
   * to prevent the compiler from making incorrect assumptions about the
   * pointer value.  The weird cast keeps both GCC and sparse happy.
   */
eba117889   Tejun Heo   percpu: preffity ...
210
  #define SHIFT_PERCPU_PTR(__p, __offset)					\
6fbc07bbe   Tejun Heo   percpu: invoke __...
211
212
213
  	RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
  
  #define per_cpu_ptr(ptr, cpu)						\
eba117889   Tejun Heo   percpu: preffity ...
214
  ({									\
6fbc07bbe   Tejun Heo   percpu: invoke __...
215
216
  	__verify_pcpu_ptr(ptr);						\
  	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)));			\
62fde5412   Tejun Heo   percpu: include/a...
217
  })
6fbc07bbe   Tejun Heo   percpu: invoke __...
218
219
220
221
222
  #define raw_cpu_ptr(ptr)						\
  ({									\
  	__verify_pcpu_ptr(ptr);						\
  	arch_raw_cpu_ptr(ptr);						\
  })
62fde5412   Tejun Heo   percpu: include/a...
223
224
  
  #ifdef CONFIG_DEBUG_PREEMPT
6fbc07bbe   Tejun Heo   percpu: invoke __...
225
226
227
228
229
  #define this_cpu_ptr(ptr)						\
  ({									\
  	__verify_pcpu_ptr(ptr);						\
  	SHIFT_PERCPU_PTR(ptr, my_cpu_offset);				\
  })
62fde5412   Tejun Heo   percpu: include/a...
230
231
232
  #else
  #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
  #endif
62fde5412   Tejun Heo   percpu: include/a...
233
  #else	/* CONFIG_SMP */
eba117889   Tejun Heo   percpu: preffity ...
234
235
236
237
  #define VERIFY_PERCPU_PTR(__p)						\
  ({									\
  	__verify_pcpu_ptr(__p);						\
  	(typeof(*(__p)) __kernel __force *)(__p);			\
62fde5412   Tejun Heo   percpu: include/a...
238
  })
eba117889   Tejun Heo   percpu: preffity ...
239
  #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
3b8ed91d6   Tejun Heo   percpu: reorganiz...
240
241
  #define raw_cpu_ptr(ptr)	per_cpu_ptr(ptr, 0)
  #define this_cpu_ptr(ptr)	raw_cpu_ptr(ptr)
62fde5412   Tejun Heo   percpu: include/a...
242
243
  
  #endif	/* CONFIG_SMP */
3b8ed91d6   Tejun Heo   percpu: reorganiz...
244
  #define per_cpu(var, cpu)	(*per_cpu_ptr(&(var), cpu))
3b8ed91d6   Tejun Heo   percpu: reorganiz...
245

9defda18f   Tejun Heo   percpu: move acce...
246
247
248
249
  /*
   * Must be an lvalue. Since @var must be a simple identifier,
   * we force a syntax error here if it isn't.
   */
eba117889   Tejun Heo   percpu: preffity ...
250
251
252
253
254
  #define get_cpu_var(var)						\
  (*({									\
  	preempt_disable();						\
  	this_cpu_ptr(&var);						\
  }))
9defda18f   Tejun Heo   percpu: move acce...
255
256
257
258
259
  
  /*
   * The weird & is necessary because sparse considers (void)(var) to be
   * a direct dereference of percpu variable (var).
   */
eba117889   Tejun Heo   percpu: preffity ...
260
261
262
263
  #define put_cpu_var(var)						\
  do {									\
  	(void)&(var);							\
  	preempt_enable();						\
9defda18f   Tejun Heo   percpu: move acce...
264
  } while (0)
eba117889   Tejun Heo   percpu: preffity ...
265
266
267
268
269
  #define get_cpu_ptr(var)						\
  ({									\
  	preempt_disable();						\
  	this_cpu_ptr(var);						\
  })
9defda18f   Tejun Heo   percpu: move acce...
270

eba117889   Tejun Heo   percpu: preffity ...
271
272
273
274
  #define put_cpu_ptr(var)						\
  do {									\
  	(void)(var);							\
  	preempt_enable();						\
9defda18f   Tejun Heo   percpu: move acce...
275
  } while (0)
a32f8d8ed   Tejun Heo   percpu: move {raw...
276
277
278
279
280
281
282
283
284
285
286
287
288
289
  /*
   * Branching function to split up a function into a set of functions that
   * are called for different scalar sizes of the objects handled.
   */
  
  extern void __bad_size_call_parameter(void);
  
  #ifdef CONFIG_DEBUG_PREEMPT
  extern void __this_cpu_preempt_check(const char *op);
  #else
  static inline void __this_cpu_preempt_check(const char *op) { }
  #endif
  
  #define __pcpu_size_call_return(stem, variable)				\
eba117889   Tejun Heo   percpu: preffity ...
290
291
  ({									\
  	typeof(variable) pscr_ret__;					\
a32f8d8ed   Tejun Heo   percpu: move {raw...
292
293
  	__verify_pcpu_ptr(&(variable));					\
  	switch(sizeof(variable)) {					\
eba117889   Tejun Heo   percpu: preffity ...
294
295
296
297
  	case 1: pscr_ret__ = stem##1(variable); break;			\
  	case 2: pscr_ret__ = stem##2(variable); break;			\
  	case 4: pscr_ret__ = stem##4(variable); break;			\
  	case 8: pscr_ret__ = stem##8(variable); break;			\
a32f8d8ed   Tejun Heo   percpu: move {raw...
298
  	default:							\
eba117889   Tejun Heo   percpu: preffity ...
299
  		__bad_size_call_parameter(); break;			\
a32f8d8ed   Tejun Heo   percpu: move {raw...
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  	}								\
  	pscr_ret__;							\
  })
  
  #define __pcpu_size_call_return2(stem, variable, ...)			\
  ({									\
  	typeof(variable) pscr2_ret__;					\
  	__verify_pcpu_ptr(&(variable));					\
  	switch(sizeof(variable)) {					\
  	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
  	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
  	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
  	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
  	default:							\
  		__bad_size_call_parameter(); break;			\
  	}								\
  	pscr2_ret__;							\
  })
  
  /*
   * Special handling for cmpxchg_double.  cmpxchg_double is passed two
   * percpu variables.  The first has to be aligned to a double word
   * boundary and the second has to follow directly thereafter.
   * We enforce this on all architectures even if they don't support
   * a double cmpxchg instruction, since it's a cheap requirement, and it
   * avoids breaking the requirement for architectures with the instruction.
   */
  #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\
  ({									\
  	bool pdcrb_ret__;						\
eba117889   Tejun Heo   percpu: preffity ...
330
  	__verify_pcpu_ptr(&(pcp1));					\
a32f8d8ed   Tejun Heo   percpu: move {raw...
331
  	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\
eba117889   Tejun Heo   percpu: preffity ...
332
333
334
  	VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1)));	\
  	VM_BUG_ON((unsigned long)(&(pcp2)) !=				\
  		  (unsigned long)(&(pcp1)) + sizeof(pcp1));		\
a32f8d8ed   Tejun Heo   percpu: move {raw...
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
  	switch(sizeof(pcp1)) {						\
  	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\
  	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\
  	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\
  	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\
  	default:							\
  		__bad_size_call_parameter(); break;			\
  	}								\
  	pdcrb_ret__;							\
  })
  
  #define __pcpu_size_call(stem, variable, ...)				\
  do {									\
  	__verify_pcpu_ptr(&(variable));					\
  	switch(sizeof(variable)) {					\
  		case 1: stem##1(variable, __VA_ARGS__);break;		\
  		case 2: stem##2(variable, __VA_ARGS__);break;		\
  		case 4: stem##4(variable, __VA_ARGS__);break;		\
  		case 8: stem##8(variable, __VA_ARGS__);break;		\
  		default: 						\
  			__bad_size_call_parameter();break;		\
  	}								\
  } while (0)
  
  /*
   * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
   *
   * Optimized manipulation for memory allocated through the per cpu
   * allocator or for addresses of per cpu variables.
   *
   * These operation guarantee exclusivity of access for other operations
   * on the *same* processor. The assumption is that per cpu data is only
   * accessed by a single processor instance (the current one).
   *
   * The arch code can provide optimized implementation by defining macros
   * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
   * cpu atomic operations for 2 byte sized RMW actions. If arch code does
   * not provide operations for a scalar size then the fallback in the
   * generic code will be used.
eba117889   Tejun Heo   percpu: preffity ...
374
375
376
377
378
379
   *
   * cmpxchg_double replaces two adjacent scalars at once.  The first two
   * parameters are per cpu variables which have to be of the same size.  A
   * truth value is returned to indicate success or failure (since a double
   * register result is difficult to handle).  There is very limited hardware
   * support for these operations, so only certain sizes may work.
a32f8d8ed   Tejun Heo   percpu: move {raw...
380
381
382
   */
  
  /*
eba117889   Tejun Heo   percpu: preffity ...
383
384
385
   * Operations for contexts where we do not want to do any checks for
   * preemptions.  Unless strictly necessary, always use [__]this_cpu_*()
   * instead.
a32f8d8ed   Tejun Heo   percpu: move {raw...
386
   *
eba117889   Tejun Heo   percpu: preffity ...
387
388
389
390
391
   * If there is no other protection through preempt disable and/or disabling
   * interupts then one of these RMW operations can show unexpected behavior
   * because the execution thread was rescheduled on another processor or an
   * interrupt occurred and the same percpu variable was modified from the
   * interrupt context.
a32f8d8ed   Tejun Heo   percpu: move {raw...
392
   */
eba117889   Tejun Heo   percpu: preffity ...
393
394
395
396
397
398
399
400
  #define raw_cpu_read(pcp)		__pcpu_size_call_return(raw_cpu_read_, pcp)
  #define raw_cpu_write(pcp, val)		__pcpu_size_call(raw_cpu_write_, pcp, val)
  #define raw_cpu_add(pcp, val)		__pcpu_size_call(raw_cpu_add_, pcp, val)
  #define raw_cpu_and(pcp, val)		__pcpu_size_call(raw_cpu_and_, pcp, val)
  #define raw_cpu_or(pcp, val)		__pcpu_size_call(raw_cpu_or_, pcp, val)
  #define raw_cpu_add_return(pcp, val)	__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
  #define raw_cpu_xchg(pcp, nval)		__pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
  #define raw_cpu_cmpxchg(pcp, oval, nval) \
a32f8d8ed   Tejun Heo   percpu: move {raw...
401
  	__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
eba117889   Tejun Heo   percpu: preffity ...
402
403
404
405
406
407
408
409
410
  #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
  	__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
  
  #define raw_cpu_sub(pcp, val)		raw_cpu_add(pcp, -(val))
  #define raw_cpu_inc(pcp)		raw_cpu_add(pcp, 1)
  #define raw_cpu_dec(pcp)		raw_cpu_sub(pcp, 1)
  #define raw_cpu_sub_return(pcp, val)	raw_cpu_add_return(pcp, -(typeof(pcp))(val))
  #define raw_cpu_inc_return(pcp)		raw_cpu_add_return(pcp, 1)
  #define raw_cpu_dec_return(pcp)		raw_cpu_add_return(pcp, -1)
a32f8d8ed   Tejun Heo   percpu: move {raw...
411
412
  
  /*
eba117889   Tejun Heo   percpu: preffity ...
413
414
   * Operations for contexts that are safe from preemption/interrupts.  These
   * operations verify that preemption is disabled.
a32f8d8ed   Tejun Heo   percpu: move {raw...
415
   */
eba117889   Tejun Heo   percpu: preffity ...
416
417
418
419
420
  #define __this_cpu_read(pcp)						\
  ({									\
  	__this_cpu_preempt_check("read");				\
  	raw_cpu_read(pcp);						\
  })
a32f8d8ed   Tejun Heo   percpu: move {raw...
421

eba117889   Tejun Heo   percpu: preffity ...
422
423
424
425
426
  #define __this_cpu_write(pcp, val)					\
  ({									\
  	__this_cpu_preempt_check("write");				\
  	raw_cpu_write(pcp, val);					\
  })
a32f8d8ed   Tejun Heo   percpu: move {raw...
427

eba117889   Tejun Heo   percpu: preffity ...
428
429
430
  #define __this_cpu_add(pcp, val)					\
  ({									\
  	__this_cpu_preempt_check("add");				\
cadb1c4db   Tejun Heo   percpu: use raw_c...
431
  	raw_cpu_add(pcp, val);						\
eba117889   Tejun Heo   percpu: preffity ...
432
  })
a32f8d8ed   Tejun Heo   percpu: move {raw...
433

eba117889   Tejun Heo   percpu: preffity ...
434
435
436
  #define __this_cpu_and(pcp, val)					\
  ({									\
  	__this_cpu_preempt_check("and");				\
cadb1c4db   Tejun Heo   percpu: use raw_c...
437
  	raw_cpu_and(pcp, val);						\
eba117889   Tejun Heo   percpu: preffity ...
438
  })
a32f8d8ed   Tejun Heo   percpu: move {raw...
439

eba117889   Tejun Heo   percpu: preffity ...
440
441
442
  #define __this_cpu_or(pcp, val)						\
  ({									\
  	__this_cpu_preempt_check("or");					\
cadb1c4db   Tejun Heo   percpu: use raw_c...
443
  	raw_cpu_or(pcp, val);						\
eba117889   Tejun Heo   percpu: preffity ...
444
  })
a32f8d8ed   Tejun Heo   percpu: move {raw...
445

eba117889   Tejun Heo   percpu: preffity ...
446
447
448
449
450
  #define __this_cpu_add_return(pcp, val)					\
  ({									\
  	__this_cpu_preempt_check("add_return");				\
  	raw_cpu_add_return(pcp, val);					\
  })
a32f8d8ed   Tejun Heo   percpu: move {raw...
451

eba117889   Tejun Heo   percpu: preffity ...
452
453
454
455
456
  #define __this_cpu_xchg(pcp, nval)					\
  ({									\
  	__this_cpu_preempt_check("xchg");				\
  	raw_cpu_xchg(pcp, nval);					\
  })
a32f8d8ed   Tejun Heo   percpu: move {raw...
457

eba117889   Tejun Heo   percpu: preffity ...
458
459
460
461
462
  #define __this_cpu_cmpxchg(pcp, oval, nval)				\
  ({									\
  	__this_cpu_preempt_check("cmpxchg");				\
  	raw_cpu_cmpxchg(pcp, oval, nval);				\
  })
a32f8d8ed   Tejun Heo   percpu: move {raw...
463

eba117889   Tejun Heo   percpu: preffity ...
464
465
466
467
  #define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
  ({	__this_cpu_preempt_check("cmpxchg_double");			\
  	raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2);	\
  })
a32f8d8ed   Tejun Heo   percpu: move {raw...
468

eba117889   Tejun Heo   percpu: preffity ...
469
470
471
472
473
474
  #define __this_cpu_sub(pcp, val)	__this_cpu_add(pcp, -(typeof(pcp))(val))
  #define __this_cpu_inc(pcp)		__this_cpu_add(pcp, 1)
  #define __this_cpu_dec(pcp)		__this_cpu_sub(pcp, 1)
  #define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(typeof(pcp))(val))
  #define __this_cpu_inc_return(pcp)	__this_cpu_add_return(pcp, 1)
  #define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1)
a32f8d8ed   Tejun Heo   percpu: move {raw...
475
476
  
  /*
83cb8557e   Tejun Heo   percpu: update in...
477
478
   * Operations with implied preemption/interrupt protection.  These
   * operations can be used without worrying about preemption or interrupt.
a32f8d8ed   Tejun Heo   percpu: move {raw...
479
   */
eba117889   Tejun Heo   percpu: preffity ...
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
  #define this_cpu_read(pcp)		__pcpu_size_call_return(this_cpu_read_, pcp)
  #define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, pcp, val)
  #define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, pcp, val)
  #define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, pcp, val)
  #define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, pcp, val)
  #define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
  #define this_cpu_xchg(pcp, nval)	__pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
  #define this_cpu_cmpxchg(pcp, oval, nval) \
  	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
  #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
  	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
  
  #define this_cpu_sub(pcp, val)		this_cpu_add(pcp, -(typeof(pcp))(val))
  #define this_cpu_inc(pcp)		this_cpu_add(pcp, 1)
  #define this_cpu_dec(pcp)		this_cpu_sub(pcp, 1)
a32f8d8ed   Tejun Heo   percpu: move {raw...
495
496
497
  #define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(typeof(pcp))(val))
  #define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
  #define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
a32f8d8ed   Tejun Heo   percpu: move {raw...
498

62fde5412   Tejun Heo   percpu: include/a...
499
  #endif /* __ASSEMBLY__ */
5028eaa97   David Howells   PERCPU: Collect t...
500
  #endif /* _LINUX_PERCPU_DEFS_H */