Blame view

include/linux/percpu.h 26.5 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
  #ifndef __LINUX_PERCPU_H
  #define __LINUX_PERCPU_H
7ff6f0829   Martin Peschke   [PATCH] CPU hotpl...
3

0a3021f4e   Robert P. J. Day   Remove unnecessar...
4
  #include <linux/preempt.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
  #include <linux/smp.h>
7ff6f0829   Martin Peschke   [PATCH] CPU hotpl...
6
  #include <linux/cpumask.h>
6a242909b   Tejun Heo   percpu: clean up ...
7
  #include <linux/pfn.h>
de380b55f   Tejun Heo   percpu: don't imp...
8
  #include <linux/init.h>
7ff6f0829   Martin Peschke   [PATCH] CPU hotpl...
9

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
10
  #include <asm/percpu.h>
6a242909b   Tejun Heo   percpu: clean up ...
11
  /* enough to cover all DEFINE_PER_CPUs in modules */
b00742d39   Jeremy Fitzhardinge   [PATCH] x86-64: A...
12
  #ifdef CONFIG_MODULES
6a242909b   Tejun Heo   percpu: clean up ...
13
  #define PERCPU_MODULE_RESERVE		(8 << 10)
b00742d39   Jeremy Fitzhardinge   [PATCH] x86-64: A...
14
  #else
6a242909b   Tejun Heo   percpu: clean up ...
15
  #define PERCPU_MODULE_RESERVE		0
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
  #endif
6a242909b   Tejun Heo   percpu: clean up ...
17
  #ifndef PERCPU_ENOUGH_ROOM
b00742d39   Jeremy Fitzhardinge   [PATCH] x86-64: A...
18
  #define PERCPU_ENOUGH_ROOM						\
6a242909b   Tejun Heo   percpu: clean up ...
19
20
21
  	(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +	\
  	 PERCPU_MODULE_RESERVE)
  #endif
b00742d39   Jeremy Fitzhardinge   [PATCH] x86-64: A...
22

632bbfeee   Jan Blunck   [PATCH] trigger a...
23
24
25
26
27
  /*
   * Must be an lvalue. Since @var must be a simple identifier,
   * we force a syntax error here if it isn't.
   */
  #define get_cpu_var(var) (*({				\
632bbfeee   Jan Blunck   [PATCH] trigger a...
28
29
  	preempt_disable();				\
  	&__get_cpu_var(var); }))
f7b64fe80   Tejun Heo   percpu: make acce...
30

e0fdb0e05   Rusty Russell   percpu: add __per...
31
32
33
34
  /*
   * The weird & is necessary because sparse considers (void)(var) to be
   * a direct dereference of percpu variable (var).
   */
f7b64fe80   Tejun Heo   percpu: make acce...
35
  #define put_cpu_var(var) do {				\
e0fdb0e05   Rusty Russell   percpu: add __per...
36
  	(void)&(var);					\
f7b64fe80   Tejun Heo   percpu: make acce...
37
38
  	preempt_enable();				\
  } while (0)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39

8b8e2ec1e   Peter Zijlstra   percpu: Add {get,...
40
41
42
43
44
45
46
47
  #define get_cpu_ptr(var) ({				\
  	preempt_disable();				\
  	this_cpu_ptr(var); })
  
  #define put_cpu_ptr(var) do {				\
  	(void)(var);					\
  	preempt_enable();				\
  } while (0)
8d408b4be   Tejun Heo   percpu: give more...
48
  /* minimum unit size, also is the maximum supported allocation size */
6abad5aca   Tejun Heo   percpu: reduce PC...
49
  #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
8d408b4be   Tejun Heo   percpu: give more...
50
51
  
  /*
099a19d91   Tejun Heo   percpu: allow lim...
52
53
54
55
56
57
58
59
60
61
   * Percpu allocator can serve percpu allocations before slab is
   * initialized which allows slab to depend on the percpu allocator.
   * The following two parameters decide how much resource to
   * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
   * larger than PERCPU_DYNAMIC_EARLY_SIZE.
   */
  #define PERCPU_DYNAMIC_EARLY_SLOTS	128
  #define PERCPU_DYNAMIC_EARLY_SIZE	(12 << 10)
  
  /*
8d408b4be   Tejun Heo   percpu: give more...
62
   * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
6b19b0c24   Tejun Heo   x86, percpu: setu...
63
64
65
   * back on the first chunk for dynamic percpu allocation if arch is
   * manually allocating and mapping it for faster access (as a part of
   * large page mapping for example).
8d408b4be   Tejun Heo   percpu: give more...
66
   *
6b19b0c24   Tejun Heo   x86, percpu: setu...
67
68
69
70
   * The following values give between one and two pages of free space
   * after typical minimal boot (2-way SMP, single disk and NIC) with
   * both defconfig and a distro config on x86_64 and 32.  More
   * intelligent way to determine this would be nice.
8d408b4be   Tejun Heo   percpu: give more...
71
   */
6b19b0c24   Tejun Heo   x86, percpu: setu...
72
73
74
75
76
  #if BITS_PER_LONG > 32
  #define PERCPU_DYNAMIC_RESERVE		(20 << 10)
  #else
  #define PERCPU_DYNAMIC_RESERVE		(12 << 10)
  #endif
8d408b4be   Tejun Heo   percpu: give more...
77

fbf59bc9d   Tejun Heo   percpu: implement...
78
  extern void *pcpu_base_addr;
fb435d523   Tejun Heo   percpu: add pcpu_...
79
  extern const unsigned long *pcpu_unit_offsets;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
80

fd1e8a1fe   Tejun Heo   percpu: introduce...
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  struct pcpu_group_info {
  	int			nr_units;	/* aligned # of units */
  	unsigned long		base_offset;	/* base address offset */
  	unsigned int		*cpu_map;	/* unit->cpu map, empty
  						 * entries contain NR_CPUS */
  };
  
  struct pcpu_alloc_info {
  	size_t			static_size;
  	size_t			reserved_size;
  	size_t			dyn_size;
  	size_t			unit_size;
  	size_t			atom_size;
  	size_t			alloc_size;
  	size_t			__ai_size;	/* internal, don't use */
  	int			nr_groups;	/* 0 if grouping unnecessary */
  	struct pcpu_group_info	groups[];
  };
f58dc01ba   Tejun Heo   percpu: generaliz...
99
100
101
102
  enum pcpu_fc {
  	PCPU_FC_AUTO,
  	PCPU_FC_EMBED,
  	PCPU_FC_PAGE,
f58dc01ba   Tejun Heo   percpu: generaliz...
103
104
105
106
107
108
  
  	PCPU_FC_NR,
  };
  extern const char *pcpu_fc_names[PCPU_FC_NR];
  
  extern enum pcpu_fc pcpu_chosen_fc;
3cbc85652   Tejun Heo   percpu: add @alig...
109
110
  typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
  				     size_t align);
d4b95f803   Tejun Heo   x86,percpu: gener...
111
112
  typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
  typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
a530b7958   Tejun Heo   percpu: teach lar...
113
  typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
fbf59bc9d   Tejun Heo   percpu: implement...
114

fd1e8a1fe   Tejun Heo   percpu: introduce...
115
116
117
  extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
  							     int nr_units);
  extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
fb435d523   Tejun Heo   percpu: add pcpu_...
118
119
  extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
  					 void *base_addr);
8d408b4be   Tejun Heo   percpu: give more...
120

08fc45806   Tejun Heo   percpu: build fir...
121
  #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
4ba6ce250   Tejun Heo   percpu: make @dyn...
122
  extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
c8826dd53   Tejun Heo   percpu: update em...
123
124
125
126
  				size_t atom_size,
  				pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
  				pcpu_fc_alloc_fn_t alloc_fn,
  				pcpu_fc_free_fn_t free_fn);
08fc45806   Tejun Heo   percpu: build fir...
127
  #endif
66c3a7577   Tejun Heo   percpu: generaliz...
128

08fc45806   Tejun Heo   percpu: build fir...
129
  #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
fb435d523   Tejun Heo   percpu: add pcpu_...
130
  extern int __init pcpu_page_first_chunk(size_t reserved_size,
d4b95f803   Tejun Heo   x86,percpu: gener...
131
132
133
  				pcpu_fc_alloc_fn_t alloc_fn,
  				pcpu_fc_free_fn_t free_fn,
  				pcpu_fc_populate_pte_fn_t populate_pte_fn);
08fc45806   Tejun Heo   percpu: build fir...
134
  #endif
d4b95f803   Tejun Heo   x86,percpu: gener...
135

f2a8205c4   Tejun Heo   percpu: kill perc...
136
137
138
139
140
  /*
   * Use this to get to a cpu's version of the per-cpu object
   * dynamically allocated. Non-atomic access to the current CPU's
   * version should probably be combined with get_cpu()/put_cpu().
   */
bbddff054   Tejun Heo   percpu: use percp...
141
  #ifdef CONFIG_SMP
fbf59bc9d   Tejun Heo   percpu: implement...
142
  #define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
bbddff054   Tejun Heo   percpu: use percp...
143
144
145
  #else
  #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
  #endif
fbf59bc9d   Tejun Heo   percpu: implement...
146

e0fdb0e05   Rusty Russell   percpu: add __per...
147
  extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
10fad5e46   Tejun Heo   percpu, module: i...
148
  extern bool is_kernel_percpu_address(unsigned long addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
149

bbddff054   Tejun Heo   percpu: use percp...
150
  #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
e74e39620   Tejun Heo   percpu: use dynam...
151
152
  extern void __init setup_per_cpu_areas(void);
  #endif
099a19d91   Tejun Heo   percpu: allow lim...
153
  extern void __init percpu_init_late(void);
e74e39620   Tejun Heo   percpu: use dynam...
154

de380b55f   Tejun Heo   percpu: don't imp...
155
156
157
  extern void __percpu *__alloc_percpu(size_t size, size_t align);
  extern void free_percpu(void __percpu *__pdata);
  extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
64ef291f4   Tejun Heo   percpu: make allo...
158
  #define alloc_percpu(type)	\
e0fdb0e05   Rusty Russell   percpu: add __per...
159
  	(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
160

066123a53   Tejun Heo   percpu: unbreak a...
161
162
163
164
165
166
167
  /*
   * Optional methods for optimized non-lvalue per-cpu variable access.
   *
   * @var can be a percpu variable or a field of it and its size should
   * equal char, int or long.  percpu_read() evaluates to a lvalue and
   * all others to void.
   *
933393f58   Christoph Lameter   percpu: Remove ir...
168
169
   * These operations are guaranteed to be atomic.
   * The generic versions disable interrupts.  Archs are
066123a53   Tejun Heo   percpu: unbreak a...
170
   * encouraged to implement single-instruction alternatives which don't
933393f58   Christoph Lameter   percpu: Remove ir...
171
   * require protection.
066123a53   Tejun Heo   percpu: unbreak a...
172
173
174
175
   */
  #ifndef percpu_read
  # define percpu_read(var)						\
    ({									\
f7b64fe80   Tejun Heo   percpu: make acce...
176
177
178
179
180
  	typeof(var) *pr_ptr__ = &(var);					\
  	typeof(var) pr_ret__;						\
  	pr_ret__ = get_cpu_var(*pr_ptr__);				\
  	put_cpu_var(*pr_ptr__);						\
  	pr_ret__;							\
066123a53   Tejun Heo   percpu: unbreak a...
181
182
183
184
185
    })
  #endif
  
  #define __percpu_generic_to_op(var, val, op)				\
  do {									\
f7b64fe80   Tejun Heo   percpu: make acce...
186
187
188
  	typeof(var) *pgto_ptr__ = &(var);				\
  	get_cpu_var(*pgto_ptr__) op val;				\
  	put_cpu_var(*pgto_ptr__);					\
066123a53   Tejun Heo   percpu: unbreak a...
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
  } while (0)
  
  #ifndef percpu_write
  # define percpu_write(var, val)		__percpu_generic_to_op(var, (val), =)
  #endif
  
  #ifndef percpu_add
  # define percpu_add(var, val)		__percpu_generic_to_op(var, (val), +=)
  #endif
  
  #ifndef percpu_sub
  # define percpu_sub(var, val)		__percpu_generic_to_op(var, (val), -=)
  #endif
  
  #ifndef percpu_and
  # define percpu_and(var, val)		__percpu_generic_to_op(var, (val), &=)
  #endif
  
  #ifndef percpu_or
  # define percpu_or(var, val)		__percpu_generic_to_op(var, (val), |=)
  #endif
  
  #ifndef percpu_xor
  # define percpu_xor(var, val)		__percpu_generic_to_op(var, (val), ^=)
  #endif
7340a0b15   Christoph Lameter   this_cpu: Introdu...
214
215
216
217
218
219
  /*
   * Branching function to split up a function into a set of functions that
   * are called for different scalar sizes of the objects handled.
   */
  
  extern void __bad_size_call_parameter(void);
0f5e4816d   Tejun Heo   percpu: remove so...
220
221
  #define __pcpu_size_call_return(stem, variable)				\
  ({	typeof(variable) pscr_ret__;					\
545695fb4   Tejun Heo   percpu: make acce...
222
  	__verify_pcpu_ptr(&(variable));					\
7340a0b15   Christoph Lameter   this_cpu: Introdu...
223
  	switch(sizeof(variable)) {					\
0f5e4816d   Tejun Heo   percpu: remove so...
224
225
226
227
  	case 1: pscr_ret__ = stem##1(variable);break;			\
  	case 2: pscr_ret__ = stem##2(variable);break;			\
  	case 4: pscr_ret__ = stem##4(variable);break;			\
  	case 8: pscr_ret__ = stem##8(variable);break;			\
7340a0b15   Christoph Lameter   this_cpu: Introdu...
228
229
230
  	default:							\
  		__bad_size_call_parameter();break;			\
  	}								\
0f5e4816d   Tejun Heo   percpu: remove so...
231
  	pscr_ret__;							\
7340a0b15   Christoph Lameter   this_cpu: Introdu...
232
  })
a663ffff1   Christoph Lameter   percpu: Generic s...
233
234
235
236
237
238
239
240
241
242
243
244
245
246
  #define __pcpu_size_call_return2(stem, variable, ...)			\
  ({									\
  	typeof(variable) pscr2_ret__;					\
  	__verify_pcpu_ptr(&(variable));					\
  	switch(sizeof(variable)) {					\
  	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
  	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
  	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
  	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
  	default:							\
  		__bad_size_call_parameter(); break;			\
  	}								\
  	pscr2_ret__;							\
  })
7c3343392   Christoph Lameter   percpu: Generic s...
247
248
249
250
  /*
   * Special handling for cmpxchg_double.  cmpxchg_double is passed two
   * percpu variables.  The first has to be aligned to a double word
   * boundary and the second has to follow directly thereafter.
d4d84fef6   Chris Metcalf   slub: always alig...
251
252
253
   * We enforce this on all architectures even if they don't support
   * a double cmpxchg instruction, since it's a cheap requirement, and it
   * avoids breaking the requirement for architectures with the instruction.
7c3343392   Christoph Lameter   percpu: Generic s...
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
   */
  #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\
  ({									\
  	bool pdcrb_ret__;						\
  	__verify_pcpu_ptr(&pcp1);					\
  	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\
  	VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));		\
  	VM_BUG_ON((unsigned long)(&pcp2) !=				\
  		  (unsigned long)(&pcp1) + sizeof(pcp1));		\
  	switch(sizeof(pcp1)) {						\
  	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\
  	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\
  	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\
  	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\
  	default:							\
  		__bad_size_call_parameter(); break;			\
  	}								\
  	pdcrb_ret__;							\
  })
0f5e4816d   Tejun Heo   percpu: remove so...
273
  #define __pcpu_size_call(stem, variable, ...)				\
7340a0b15   Christoph Lameter   this_cpu: Introdu...
274
  do {									\
545695fb4   Tejun Heo   percpu: make acce...
275
  	__verify_pcpu_ptr(&(variable));					\
7340a0b15   Christoph Lameter   this_cpu: Introdu...
276
277
278
279
280
281
282
283
284
285
286
287
  	switch(sizeof(variable)) {					\
  		case 1: stem##1(variable, __VA_ARGS__);break;		\
  		case 2: stem##2(variable, __VA_ARGS__);break;		\
  		case 4: stem##4(variable, __VA_ARGS__);break;		\
  		case 8: stem##8(variable, __VA_ARGS__);break;		\
  		default: 						\
  			__bad_size_call_parameter();break;		\
  	}								\
  } while (0)
  
  /*
   * Optimized manipulation for memory allocated through the per cpu
dd17c8f72   Rusty Russell   percpu: remove pe...
288
   * allocator or for addresses of per cpu variables.
7340a0b15   Christoph Lameter   this_cpu: Introdu...
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
   *
   * These operation guarantee exclusivity of access for other operations
   * on the *same* processor. The assumption is that per cpu data is only
   * accessed by a single processor instance (the current one).
   *
   * The first group is used for accesses that must be done in a
   * preemption safe way since we know that the context is not preempt
   * safe. Interrupts may occur. If the interrupt modifies the variable
   * too then RMW actions will not be reliable.
   *
   * The arch code can provide optimized functions in two ways:
   *
   * 1. Override the function completely. F.e. define this_cpu_add().
   *    The arch must then ensure that the various scalar format passed
   *    are handled correctly.
   *
   * 2. Provide functions for certain scalar sizes. F.e. provide
   *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
   *    sized RMW actions. If arch code does not provide operations for
   *    a scalar size then the fallback in the generic code will be
   *    used.
   */
  
  #define _this_cpu_generic_read(pcp)					\
  ({	typeof(pcp) ret__;						\
  	preempt_disable();						\
  	ret__ = *this_cpu_ptr(&(pcp));					\
  	preempt_enable();						\
  	ret__;								\
  })
  
  #ifndef this_cpu_read
  # ifndef this_cpu_read_1
  #  define this_cpu_read_1(pcp)	_this_cpu_generic_read(pcp)
  # endif
  # ifndef this_cpu_read_2
  #  define this_cpu_read_2(pcp)	_this_cpu_generic_read(pcp)
  # endif
  # ifndef this_cpu_read_4
  #  define this_cpu_read_4(pcp)	_this_cpu_generic_read(pcp)
  # endif
  # ifndef this_cpu_read_8
  #  define this_cpu_read_8(pcp)	_this_cpu_generic_read(pcp)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
333
  # define this_cpu_read(pcp)	__pcpu_size_call_return(this_cpu_read_, (pcp))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
334
335
336
337
  #endif
  
  #define _this_cpu_generic_to_op(pcp, val, op)				\
  do {									\
933393f58   Christoph Lameter   percpu: Remove ir...
338
339
  	unsigned long flags;						\
  	local_irq_save(flags);						\
f7b64fe80   Tejun Heo   percpu: make acce...
340
  	*__this_cpu_ptr(&(pcp)) op val;					\
933393f58   Christoph Lameter   percpu: Remove ir...
341
  	local_irq_restore(flags);					\
7340a0b15   Christoph Lameter   this_cpu: Introdu...
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
  } while (0)
  
  #ifndef this_cpu_write
  # ifndef this_cpu_write_1
  #  define this_cpu_write_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
  # endif
  # ifndef this_cpu_write_2
  #  define this_cpu_write_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
  # endif
  # ifndef this_cpu_write_4
  #  define this_cpu_write_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
  # endif
  # ifndef this_cpu_write_8
  #  define this_cpu_write_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
357
  # define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
  #endif
  
  #ifndef this_cpu_add
  # ifndef this_cpu_add_1
  #  define this_cpu_add_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
  # endif
  # ifndef this_cpu_add_2
  #  define this_cpu_add_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
  # endif
  # ifndef this_cpu_add_4
  #  define this_cpu_add_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
  # endif
  # ifndef this_cpu_add_8
  #  define this_cpu_add_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
373
  # define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
  #endif
  
  #ifndef this_cpu_sub
  # define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(val))
  #endif
  
  #ifndef this_cpu_inc
  # define this_cpu_inc(pcp)		this_cpu_add((pcp), 1)
  #endif
  
  #ifndef this_cpu_dec
  # define this_cpu_dec(pcp)		this_cpu_sub((pcp), 1)
  #endif
  
  #ifndef this_cpu_and
  # ifndef this_cpu_and_1
  #  define this_cpu_and_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
  # endif
  # ifndef this_cpu_and_2
  #  define this_cpu_and_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
  # endif
  # ifndef this_cpu_and_4
  #  define this_cpu_and_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
  # endif
  # ifndef this_cpu_and_8
  #  define this_cpu_and_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
401
  # define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
  #endif
  
  #ifndef this_cpu_or
  # ifndef this_cpu_or_1
  #  define this_cpu_or_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
  # endif
  # ifndef this_cpu_or_2
  #  define this_cpu_or_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
  # endif
  # ifndef this_cpu_or_4
  #  define this_cpu_or_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
  # endif
  # ifndef this_cpu_or_8
  #  define this_cpu_or_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
417
  # define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
  #endif
  
  #ifndef this_cpu_xor
  # ifndef this_cpu_xor_1
  #  define this_cpu_xor_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
  # endif
  # ifndef this_cpu_xor_2
  #  define this_cpu_xor_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
  # endif
  # ifndef this_cpu_xor_4
  #  define this_cpu_xor_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
  # endif
  # ifndef this_cpu_xor_8
  #  define this_cpu_xor_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
433
  # define this_cpu_xor(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
434
  #endif
403047754   Tejun Heo   percpu,x86: reloc...
435
436
437
  #define _this_cpu_generic_add_return(pcp, val)				\
  ({									\
  	typeof(pcp) ret__;						\
933393f58   Christoph Lameter   percpu: Remove ir...
438
439
  	unsigned long flags;						\
  	local_irq_save(flags);						\
403047754   Tejun Heo   percpu,x86: reloc...
440
441
  	__this_cpu_add(pcp, val);					\
  	ret__ = __this_cpu_read(pcp);					\
933393f58   Christoph Lameter   percpu: Remove ir...
442
  	local_irq_restore(flags);					\
403047754   Tejun Heo   percpu,x86: reloc...
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
  	ret__;								\
  })
  
  #ifndef this_cpu_add_return
  # ifndef this_cpu_add_return_1
  #  define this_cpu_add_return_1(pcp, val)	_this_cpu_generic_add_return(pcp, val)
  # endif
  # ifndef this_cpu_add_return_2
  #  define this_cpu_add_return_2(pcp, val)	_this_cpu_generic_add_return(pcp, val)
  # endif
  # ifndef this_cpu_add_return_4
  #  define this_cpu_add_return_4(pcp, val)	_this_cpu_generic_add_return(pcp, val)
  # endif
  # ifndef this_cpu_add_return_8
  #  define this_cpu_add_return_8(pcp, val)	_this_cpu_generic_add_return(pcp, val)
  # endif
  # define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
  #endif
  
  #define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(val))
  #define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
  #define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
2b7124428   Christoph Lameter   percpu: Generic t...
465
466
  #define _this_cpu_generic_xchg(pcp, nval)				\
  ({	typeof(pcp) ret__;						\
933393f58   Christoph Lameter   percpu: Remove ir...
467
468
  	unsigned long flags;						\
  	local_irq_save(flags);						\
2b7124428   Christoph Lameter   percpu: Generic t...
469
470
  	ret__ = __this_cpu_read(pcp);					\
  	__this_cpu_write(pcp, nval);					\
933393f58   Christoph Lameter   percpu: Remove ir...
471
  	local_irq_restore(flags);					\
2b7124428   Christoph Lameter   percpu: Generic t...
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
  	ret__;								\
  })
  
  #ifndef this_cpu_xchg
  # ifndef this_cpu_xchg_1
  #  define this_cpu_xchg_1(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
  # endif
  # ifndef this_cpu_xchg_2
  #  define this_cpu_xchg_2(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
  # endif
  # ifndef this_cpu_xchg_4
  #  define this_cpu_xchg_4(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
  # endif
  # ifndef this_cpu_xchg_8
  #  define this_cpu_xchg_8(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
  # endif
  # define this_cpu_xchg(pcp, nval)	\
  	__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
  #endif
  
  #define _this_cpu_generic_cmpxchg(pcp, oval, nval)			\
933393f58   Christoph Lameter   percpu: Remove ir...
493
494
495
496
  ({									\
  	typeof(pcp) ret__;						\
  	unsigned long flags;						\
  	local_irq_save(flags);						\
2b7124428   Christoph Lameter   percpu: Generic t...
497
498
499
  	ret__ = __this_cpu_read(pcp);					\
  	if (ret__ == (oval))						\
  		__this_cpu_write(pcp, nval);				\
933393f58   Christoph Lameter   percpu: Remove ir...
500
  	local_irq_restore(flags);					\
2b7124428   Christoph Lameter   percpu: Generic t...
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
  	ret__;								\
  })
  
  #ifndef this_cpu_cmpxchg
  # ifndef this_cpu_cmpxchg_1
  #  define this_cpu_cmpxchg_1(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
  # endif
  # ifndef this_cpu_cmpxchg_2
  #  define this_cpu_cmpxchg_2(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
  # endif
  # ifndef this_cpu_cmpxchg_4
  #  define this_cpu_cmpxchg_4(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
  # endif
  # ifndef this_cpu_cmpxchg_8
  #  define this_cpu_cmpxchg_8(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
  # endif
  # define this_cpu_cmpxchg(pcp, oval, nval)	\
  	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
  #endif
7340a0b15   Christoph Lameter   this_cpu: Introdu...
520
  /*
7c3343392   Christoph Lameter   percpu: Generic s...
521
522
523
524
525
526
527
528
529
530
   * cmpxchg_double replaces two adjacent scalars at once.  The first
   * two parameters are per cpu variables which have to be of the same
   * size.  A truth value is returned to indicate success or failure
   * (since a double register result is difficult to handle).  There is
   * very limited hardware support for these operations, so only certain
   * sizes may work.
   */
  #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  ({									\
  	int ret__;							\
933393f58   Christoph Lameter   percpu: Remove ir...
531
532
  	unsigned long flags;						\
  	local_irq_save(flags);						\
7c3343392   Christoph Lameter   percpu: Generic s...
533
534
  	ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
  			oval1, oval2, nval1, nval2);			\
933393f58   Christoph Lameter   percpu: Remove ir...
535
  	local_irq_restore(flags);					\
7c3343392   Christoph Lameter   percpu: Generic s...
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
  	ret__;								\
  })
  
  #ifndef this_cpu_cmpxchg_double
  # ifndef this_cpu_cmpxchg_double_1
  #  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
  # endif
  # ifndef this_cpu_cmpxchg_double_2
  #  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
  # endif
  # ifndef this_cpu_cmpxchg_double_4
  #  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
  # endif
  # ifndef this_cpu_cmpxchg_double_8
  #  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
  # endif
  # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
  #endif
  
  /*
933393f58   Christoph Lameter   percpu: Remove ir...
561
   * Generic percpu operations for context that are safe from preemption/interrupts.
7340a0b15   Christoph Lameter   this_cpu: Introdu...
562
   * Either we do not care about races or the caller has the
933393f58   Christoph Lameter   percpu: Remove ir...
563
   * responsibility of handling preemption/interrupt issues. Arch code can still
7340a0b15   Christoph Lameter   this_cpu: Introdu...
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
   * override these instructions since the arch per cpu code may be more
   * efficient and may actually get race freeness for free (that is the
   * case for x86 for example).
   *
   * If there is no other protection through preempt disable and/or
   * disabling interupts then one of these RMW operations can show unexpected
   * behavior because the execution thread was rescheduled on another processor
   * or an interrupt occurred and the same percpu variable was modified from
   * the interrupt context.
   */
  #ifndef __this_cpu_read
  # ifndef __this_cpu_read_1
  #  define __this_cpu_read_1(pcp)	(*__this_cpu_ptr(&(pcp)))
  # endif
  # ifndef __this_cpu_read_2
  #  define __this_cpu_read_2(pcp)	(*__this_cpu_ptr(&(pcp)))
  # endif
  # ifndef __this_cpu_read_4
  #  define __this_cpu_read_4(pcp)	(*__this_cpu_ptr(&(pcp)))
  # endif
  # ifndef __this_cpu_read_8
  #  define __this_cpu_read_8(pcp)	(*__this_cpu_ptr(&(pcp)))
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
587
  # define __this_cpu_read(pcp)	__pcpu_size_call_return(__this_cpu_read_, (pcp))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
  #endif
  
  #define __this_cpu_generic_to_op(pcp, val, op)				\
  do {									\
  	*__this_cpu_ptr(&(pcp)) op val;					\
  } while (0)
  
  #ifndef __this_cpu_write
  # ifndef __this_cpu_write_1
  #  define __this_cpu_write_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
  # endif
  # ifndef __this_cpu_write_2
  #  define __this_cpu_write_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
  # endif
  # ifndef __this_cpu_write_4
  #  define __this_cpu_write_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
  # endif
  # ifndef __this_cpu_write_8
  #  define __this_cpu_write_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
608
  # define __this_cpu_write(pcp, val)	__pcpu_size_call(__this_cpu_write_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
  #endif
  
  #ifndef __this_cpu_add
  # ifndef __this_cpu_add_1
  #  define __this_cpu_add_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
  # endif
  # ifndef __this_cpu_add_2
  #  define __this_cpu_add_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
  # endif
  # ifndef __this_cpu_add_4
  #  define __this_cpu_add_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
  # endif
  # ifndef __this_cpu_add_8
  #  define __this_cpu_add_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
624
  # define __this_cpu_add(pcp, val)	__pcpu_size_call(__this_cpu_add_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
  #endif
  
  #ifndef __this_cpu_sub
  # define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(val))
  #endif
  
  #ifndef __this_cpu_inc
  # define __this_cpu_inc(pcp)		__this_cpu_add((pcp), 1)
  #endif
  
  #ifndef __this_cpu_dec
  # define __this_cpu_dec(pcp)		__this_cpu_sub((pcp), 1)
  #endif
  
  #ifndef __this_cpu_and
  # ifndef __this_cpu_and_1
  #  define __this_cpu_and_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
  # endif
  # ifndef __this_cpu_and_2
  #  define __this_cpu_and_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
  # endif
  # ifndef __this_cpu_and_4
  #  define __this_cpu_and_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
  # endif
  # ifndef __this_cpu_and_8
  #  define __this_cpu_and_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
652
  # define __this_cpu_and(pcp, val)	__pcpu_size_call(__this_cpu_and_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
  #endif
  
  #ifndef __this_cpu_or
  # ifndef __this_cpu_or_1
  #  define __this_cpu_or_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
  # endif
  # ifndef __this_cpu_or_2
  #  define __this_cpu_or_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
  # endif
  # ifndef __this_cpu_or_4
  #  define __this_cpu_or_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
  # endif
  # ifndef __this_cpu_or_8
  #  define __this_cpu_or_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
668
  # define __this_cpu_or(pcp, val)	__pcpu_size_call(__this_cpu_or_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
  #endif
  
  #ifndef __this_cpu_xor
  # ifndef __this_cpu_xor_1
  #  define __this_cpu_xor_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
  # endif
  # ifndef __this_cpu_xor_2
  #  define __this_cpu_xor_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
  # endif
  # ifndef __this_cpu_xor_4
  #  define __this_cpu_xor_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
  # endif
  # ifndef __this_cpu_xor_8
  #  define __this_cpu_xor_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
  # endif
0f5e4816d   Tejun Heo   percpu: remove so...
684
  # define __this_cpu_xor(pcp, val)	__pcpu_size_call(__this_cpu_xor_, (pcp), (val))
7340a0b15   Christoph Lameter   this_cpu: Introdu...
685
  #endif
a663ffff1   Christoph Lameter   percpu: Generic s...
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
  #define __this_cpu_generic_add_return(pcp, val)				\
  ({									\
  	__this_cpu_add(pcp, val);					\
  	__this_cpu_read(pcp);						\
  })
  
  #ifndef __this_cpu_add_return
  # ifndef __this_cpu_add_return_1
  #  define __this_cpu_add_return_1(pcp, val)	__this_cpu_generic_add_return(pcp, val)
  # endif
  # ifndef __this_cpu_add_return_2
  #  define __this_cpu_add_return_2(pcp, val)	__this_cpu_generic_add_return(pcp, val)
  # endif
  # ifndef __this_cpu_add_return_4
  #  define __this_cpu_add_return_4(pcp, val)	__this_cpu_generic_add_return(pcp, val)
  # endif
  # ifndef __this_cpu_add_return_8
  #  define __this_cpu_add_return_8(pcp, val)	__this_cpu_generic_add_return(pcp, val)
  # endif
  # define __this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
  #endif
  
  #define __this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(val))
  #define __this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
  #define __this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
2b7124428   Christoph Lameter   percpu: Generic t...
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
  #define __this_cpu_generic_xchg(pcp, nval)				\
  ({	typeof(pcp) ret__;						\
  	ret__ = __this_cpu_read(pcp);					\
  	__this_cpu_write(pcp, nval);					\
  	ret__;								\
  })
  
  #ifndef __this_cpu_xchg
  # ifndef __this_cpu_xchg_1
  #  define __this_cpu_xchg_1(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
  # endif
  # ifndef __this_cpu_xchg_2
  #  define __this_cpu_xchg_2(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
  # endif
  # ifndef __this_cpu_xchg_4
  #  define __this_cpu_xchg_4(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
  # endif
  # ifndef __this_cpu_xchg_8
  #  define __this_cpu_xchg_8(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
  # endif
  # define __this_cpu_xchg(pcp, nval)	\
  	__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
  #endif
  
  #define __this_cpu_generic_cmpxchg(pcp, oval, nval)			\
  ({									\
  	typeof(pcp) ret__;						\
  	ret__ = __this_cpu_read(pcp);					\
  	if (ret__ == (oval))						\
  		__this_cpu_write(pcp, nval);				\
  	ret__;								\
  })
  
  #ifndef __this_cpu_cmpxchg
  # ifndef __this_cpu_cmpxchg_1
  #  define __this_cpu_cmpxchg_1(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
  # endif
  # ifndef __this_cpu_cmpxchg_2
  #  define __this_cpu_cmpxchg_2(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
  # endif
  # ifndef __this_cpu_cmpxchg_4
  #  define __this_cpu_cmpxchg_4(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
  # endif
  # ifndef __this_cpu_cmpxchg_8
  #  define __this_cpu_cmpxchg_8(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
  # endif
  # define __this_cpu_cmpxchg(pcp, oval, nval)	\
  	__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
  #endif
7c3343392   Christoph Lameter   percpu: Generic s...
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
  #define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  ({									\
  	int __ret = 0;							\
  	if (__this_cpu_read(pcp1) == (oval1) &&				\
  			 __this_cpu_read(pcp2)  == (oval2)) {		\
  		__this_cpu_write(pcp1, (nval1));			\
  		__this_cpu_write(pcp2, (nval2));			\
  		__ret = 1;						\
  	}								\
  	(__ret);							\
  })
  
  #ifndef __this_cpu_cmpxchg_double
  # ifndef __this_cpu_cmpxchg_double_1
  #  define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
  # endif
  # ifndef __this_cpu_cmpxchg_double_2
  #  define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
  # endif
  # ifndef __this_cpu_cmpxchg_double_4
  #  define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
  # endif
  # ifndef __this_cpu_cmpxchg_double_8
  #  define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
  # endif
  # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
  	__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
792
  #endif /* __LINUX_PERCPU_H */