Blame view

include/linux/compiler.h 12 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
  #ifndef __LINUX_COMPILER_H
  #define __LINUX_COMPILER_H
a9772285a   Will Deacon   linux/compiler.h:...
4
  #include <linux/compiler_types.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5

a9772285a   Will Deacon   linux/compiler.h:...
6
  #ifndef __ASSEMBLY__
6f33d5879   Rusty Russell   __UNIQUE_ID()
7

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
  #ifdef __KERNEL__
2ed84eeb8   Steven Rostedt   trace: rename unl...
9
10
11
12
  /*
   * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
   * to disable branch tracing on a per file basis.
   */
d9ad8bc0c   Bart Van Assche   branch tracer: Fi...
13
14
  #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
      && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
134e6a034   Steven Rostedt (VMware)   tracing: Show num...
15
  void ftrace_likely_update(struct ftrace_likely_data *f, int val,
d45ae1f70   Steven Rostedt (VMware)   tracing: Process ...
16
  			  int expect, int is_constant);
1f0d69a9f   Steven Rostedt   tracing: profile ...
17
18
19
  
  #define likely_notrace(x)	__builtin_expect(!!(x), 1)
  #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
d45ae1f70   Steven Rostedt (VMware)   tracing: Process ...
20
  #define __branch_check__(x, expect, is_constant) ({			\
26e03f8dc   Mikulas Patocka   branch-check: fix...
21
  			long ______r;					\
134e6a034   Steven Rostedt (VMware)   tracing: Show num...
22
  			static struct ftrace_likely_data		\
1f0d69a9f   Steven Rostedt   tracing: profile ...
23
  				__attribute__((__aligned__(4)))		\
45b797492   Steven Rostedt   trace: consolidat...
24
  				__attribute__((section("_ftrace_annotated_branch"))) \
1f0d69a9f   Steven Rostedt   tracing: profile ...
25
  				______f = {				\
134e6a034   Steven Rostedt (VMware)   tracing: Show num...
26
27
28
  				.data.func = __func__,			\
  				.data.file = __FILE__,			\
  				.data.line = __LINE__,			\
1f0d69a9f   Steven Rostedt   tracing: profile ...
29
  			};						\
d45ae1f70   Steven Rostedt (VMware)   tracing: Process ...
30
31
32
  			______r = __builtin_expect(!!(x), expect);	\
  			ftrace_likely_update(&______f, ______r,		\
  					     expect, is_constant);	\
1f0d69a9f   Steven Rostedt   tracing: profile ...
33
34
35
36
37
38
39
40
41
  			______r;					\
  		})
  
  /*
   * Using __builtin_constant_p(x) to ignore cases where the return
   * value is always the same.  This idea is taken from a similar patch
   * written by Daniel Walker.
   */
  # ifndef likely
d45ae1f70   Steven Rostedt (VMware)   tracing: Process ...
42
  #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
1f0d69a9f   Steven Rostedt   tracing: profile ...
43
44
  # endif
  # ifndef unlikely
d45ae1f70   Steven Rostedt (VMware)   tracing: Process ...
45
  #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
1f0d69a9f   Steven Rostedt   tracing: profile ...
46
  # endif
2bcd521a6   Steven Rostedt   trace: profile al...
47
48
49
50
51
52
  
  #ifdef CONFIG_PROFILE_ALL_BRANCHES
  /*
   * "Define 'is'", Bill Clinton
   * "Define 'if'", Steven Rostedt
   */
ab3c9c686   Linus Torvalds   branch tracer, in...
53
54
  #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
  #define __trace_if(cond) \
b33c8ff44   Arnd Bergmann   tracing: Fix frea...
55
  	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
2bcd521a6   Steven Rostedt   trace: profile al...
56
57
58
59
60
61
62
63
64
65
66
  	({								\
  		int ______r;						\
  		static struct ftrace_branch_data			\
  			__attribute__((__aligned__(4)))			\
  			__attribute__((section("_ftrace_branch")))	\
  			______f = {					\
  				.func = __func__,			\
  				.file = __FILE__,			\
  				.line = __LINE__,			\
  			};						\
  		______r = !!(cond);					\
97e7e4f39   Witold Baryluk   tracing: optimiza...
67
  		______f.miss_hit[______r]++;					\
2bcd521a6   Steven Rostedt   trace: profile al...
68
69
70
  		______r;						\
  	}))
  #endif /* CONFIG_PROFILE_ALL_BRANCHES */
1f0d69a9f   Steven Rostedt   tracing: profile ...
71
72
73
74
  #else
  # define likely(x)	__builtin_expect(!!(x), 1)
  # define unlikely(x)	__builtin_expect(!!(x), 0)
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
75
76
77
78
79
  
  /* Optimization barrier */
  #ifndef barrier
  # define barrier() __memory_barrier()
  #endif
7829fb09a   Daniel Borkmann   lib: make memzero...
80
81
82
  #ifndef barrier_data
  # define barrier_data(ptr) barrier()
  #endif
305eb32d4   Arnd Bergmann   bug.h: work aroun...
83
84
85
86
  /* workaround for GCC PR82365 if needed */
  #ifndef barrier_before_unreachable
  # define barrier_before_unreachable() do { } while (0)
  #endif
38938c879   David Daney   Add support for G...
87
  /* Unreachable code */
649ea4d5a   Josh Poimboeuf   objtool: Assume u...
88
89
90
91
92
93
94
95
  #ifdef CONFIG_STACK_VALIDATION
  #define annotate_reachable() ({						\
  	asm("%c0:
  \t"							\
  	    ".pushsection .discard.reachable
  \t"			\
  	    ".long %c0b - .
  \t"					\
ec1e1b610   Josh Poimboeuf   objtool: Prevent ...
96
97
  	    ".popsection
  \t" : : "i" (__COUNTER__));			\
649ea4d5a   Josh Poimboeuf   objtool: Assume u...
98
99
100
101
102
103
104
105
  })
  #define annotate_unreachable() ({					\
  	asm("%c0:
  \t"							\
  	    ".pushsection .discard.unreachable
  \t"			\
  	    ".long %c0b - .
  \t"					\
ec1e1b610   Josh Poimboeuf   objtool: Prevent ...
106
107
  	    ".popsection
  \t" : : "i" (__COUNTER__));			\
649ea4d5a   Josh Poimboeuf   objtool: Assume u...
108
109
110
111
112
113
114
115
116
117
118
119
120
121
  })
  #define ASM_UNREACHABLE							\
  	"999:
  \t"							\
  	".pushsection .discard.unreachable
  \t"				\
  	".long 999b - .
  \t"						\
  	".popsection
  \t"
  #else
  #define annotate_reachable()
  #define annotate_unreachable()
  #endif
aa5d1b815   Kees Cook   x86/asm: Add ASM_...
122
123
124
  #ifndef ASM_UNREACHABLE
  # define ASM_UNREACHABLE
  #endif
38938c879   David Daney   Add support for G...
125
  #ifndef unreachable
649ea4d5a   Josh Poimboeuf   objtool: Assume u...
126
  # define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
38938c879   David Daney   Add support for G...
127
  #endif
b67067f11   Nicholas Piggin   kbuild: allow arc...
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  /*
   * KENTRY - kernel entry point
   * This can be used to annotate symbols (functions or data) that are used
   * without their linker symbol being referenced explicitly. For example,
   * interrupt vector handlers, or functions in the kernel image that are found
   * programatically.
   *
   * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
   * are handled in their own way (with KEEP() in linker scripts).
   *
   * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
   * linker script. For example an architecture could KEEP() its entire
   * boot/exception vector code rather than annotate each function and data.
   */
  #ifndef KENTRY
  # define KENTRY(sym)						\
  	extern typeof(sym) sym;					\
  	static const unsigned long __kentry_##sym		\
  	__used							\
  	__attribute__((section("___kentry" "+" #sym ), used))	\
  	= (unsigned long)&sym;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
150
151
152
153
154
155
  #ifndef RELOC_HIDE
  # define RELOC_HIDE(ptr, off)					\
    ({ unsigned long __ptr;					\
       __ptr = (unsigned long) (ptr);				\
      (typeof(ptr)) (__ptr + (off)); })
  #endif
fe8c8a126   Cesar Eduardo Barros   crypto: more robu...
156
157
158
  #ifndef OPTIMIZER_HIDE_VAR
  #define OPTIMIZER_HIDE_VAR(var) barrier()
  #endif
6f33d5879   Rusty Russell   __UNIQUE_ID()
159
160
161
162
  /* Not-quite-unique ID. */
  #ifndef __UNIQUE_ID
  # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
  #endif
230fa253d   Christian Borntraeger   kernel: Provide R...
163
  #include <uapi/linux/types.h>
d976441f4   Andrey Ryabinin   compiler, atomics...
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
  #define __READ_ONCE_SIZE						\
  ({									\
  	switch (size) {							\
  	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
  	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
  	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
  	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
  	default:							\
  		barrier();						\
  		__builtin_memcpy((void *)res, (const void *)p, size);	\
  		barrier();						\
  	}								\
  })
  
  static __always_inline
  void __read_once_size(const volatile void *p, void *res, int size)
230fa253d   Christian Borntraeger   kernel: Provide R...
180
  {
d976441f4   Andrey Ryabinin   compiler, atomics...
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
  	__READ_ONCE_SIZE;
  }
  
  #ifdef CONFIG_KASAN
  /*
   * This function is not 'inline' because __no_sanitize_address confilcts
   * with inlining. Attempt to inline it may cause a build failure.
   * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
   * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
   */
  static __no_sanitize_address __maybe_unused
  void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  {
  	__READ_ONCE_SIZE;
  }
  #else
  static __always_inline
  void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  {
  	__READ_ONCE_SIZE;
230fa253d   Christian Borntraeger   kernel: Provide R...
201
  }
d976441f4   Andrey Ryabinin   compiler, atomics...
202
  #endif
230fa253d   Christian Borntraeger   kernel: Provide R...
203

43239cbe7   Christian Borntraeger   kernel: Change AS...
204
  static __always_inline void __write_once_size(volatile void *p, void *res, int size)
230fa253d   Christian Borntraeger   kernel: Provide R...
205
206
207
208
209
  {
  	switch (size) {
  	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
  	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
  	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
230fa253d   Christian Borntraeger   kernel: Provide R...
210
  	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
230fa253d   Christian Borntraeger   kernel: Provide R...
211
212
213
  	default:
  		barrier();
  		__builtin_memcpy((void *)p, (const void *)res, size);
230fa253d   Christian Borntraeger   kernel: Provide R...
214
215
216
217
218
219
220
  		barrier();
  	}
  }
  
  /*
   * Prevent the compiler from merging or refetching reads or writes. The
   * compiler is also forbidden from reordering successive instances of
43239cbe7   Christian Borntraeger   kernel: Change AS...
221
   * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
230fa253d   Christian Borntraeger   kernel: Provide R...
222
223
   * compiler is aware of some particular ordering.  One way to make the
   * compiler aware of ordering is to put the two invocations of READ_ONCE,
43239cbe7   Christian Borntraeger   kernel: Change AS...
224
   * WRITE_ONCE or ACCESS_ONCE() in different C statements.
230fa253d   Christian Borntraeger   kernel: Provide R...
225
226
227
228
   *
   * In contrast to ACCESS_ONCE these two macros will also work on aggregate
   * data types like structs or unions. If the size of the accessed data
   * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
fed0764fa   Konrad Rzeszutek Wilk   locking/atomics: ...
229
230
231
   * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
   * least two memcpy()s: one for the __builtin_memcpy() and then one for
   * the macro doing the copy of variable - '__u' allocated on the stack.
230fa253d   Christian Borntraeger   kernel: Provide R...
232
233
234
235
236
237
238
239
   *
   * Their two major use cases are: (1) Mediating communication between
   * process-level code and irq/NMI handlers, all running on the same CPU,
   * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
   * mutilate accesses that either do not require ordering or that interact
   * with an explicit memory barrier or atomic instruction that provides the
   * required ordering.
   */
a9772285a   Will Deacon   linux/compiler.h:...
240
  #include <asm/barrier.h>
230fa253d   Christian Borntraeger   kernel: Provide R...
241

d976441f4   Andrey Ryabinin   compiler, atomics...
242
243
244
245
246
247
248
  #define __READ_ONCE(x, check)						\
  ({									\
  	union { typeof(x) __val; char __c[1]; } __u;			\
  	if (check)							\
  		__read_once_size(&(x), __u.__c, sizeof(x));		\
  	else								\
  		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
1aedecaf1   Will Deacon   locking/barriers:...
249
  	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
d976441f4   Andrey Ryabinin   compiler, atomics...
250
251
252
253
254
255
256
257
258
  	__u.__val;							\
  })
  #define READ_ONCE(x) __READ_ONCE(x, 1)
  
  /*
   * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
   * to hide memory access from KASAN.
   */
  #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
230fa253d   Christian Borntraeger   kernel: Provide R...
259

43239cbe7   Christian Borntraeger   kernel: Change AS...
260
  #define WRITE_ONCE(x, val) \
ba33034ff   Christian Borntraeger   locking, compiler...
261
262
263
264
265
266
  ({							\
  	union { typeof(x) __val; char __c[1]; } __u =	\
  		{ .__val = (__force typeof(x)) (val) }; \
  	__write_once_size(&(x), __u.__c, sizeof(x));	\
  	__u.__val;					\
  })
230fa253d   Christian Borntraeger   kernel: Provide R...
267

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
268
269
270
  #endif /* __KERNEL__ */
  
  #endif /* __ASSEMBLY__ */
045e5161a   Geert Uytterhoeven   compiler-gcc.h: I...
271
272
273
  #ifndef __optimize
  # define __optimize(level)
  #endif
9f0cf4adb   Arjan van de Ven   x86: Use __builti...
274
275
276
277
  /* Compile time object size, -1 for unknown */
  #ifndef __compiletime_object_size
  # define __compiletime_object_size(obj) -1
  #endif
4a3127693   Arjan van de Ven   x86: Turn the cop...
278
279
280
  #ifndef __compiletime_warning
  # define __compiletime_warning(message)
  #endif
63312b6a6   Arjan van de Ven   x86: Add a Kconfi...
281
282
  #ifndef __compiletime_error
  # define __compiletime_error(message)
2c0d259e0   James Hogan   compiler.h: avoid...
283
284
285
286
287
288
289
290
  /*
   * Sparse complains of variable sized arrays due to the temporary variable in
   * __compiletime_assert. Unfortunately we can't just expand it out to make
   * sparse see a constant array size without breaking compiletime_assert on old
   * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
   */
  # ifndef __CHECKER__
  #  define __compiletime_error_fallback(condition) \
9a8ab1c39   Daniel Santos   bug.h, compiler.h...
291
  	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
2c0d259e0   James Hogan   compiler.h: avoid...
292
293
294
  # endif
  #endif
  #ifndef __compiletime_error_fallback
c361d3e54   Daniel Santos   compiler.h, bug.h...
295
  # define __compiletime_error_fallback(condition) do { } while (0)
63312b6a6   Arjan van de Ven   x86: Add a Kconfi...
296
  #endif
c361d3e54   Daniel Santos   compiler.h, bug.h...
297

c03567a8e   Joe Stringer   include/linux/com...
298
299
  #ifdef __OPTIMIZE__
  # define __compiletime_assert(condition, msg, prefix, suffix)		\
9a8ab1c39   Daniel Santos   bug.h, compiler.h...
300
301
302
303
304
305
306
  	do {								\
  		bool __cond = !(condition);				\
  		extern void prefix ## suffix(void) __compiletime_error(msg); \
  		if (__cond)						\
  			prefix ## suffix();				\
  		__compiletime_error_fallback(__cond);			\
  	} while (0)
c03567a8e   Joe Stringer   include/linux/com...
307
308
309
  #else
  # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
  #endif
9a8ab1c39   Daniel Santos   bug.h, compiler.h...
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
  
  #define _compiletime_assert(condition, msg, prefix, suffix) \
  	__compiletime_assert(condition, msg, prefix, suffix)
  
  /**
   * compiletime_assert - break build and emit msg if condition is false
   * @condition: a compile-time constant condition to check
   * @msg:       a message to emit if condition is false
   *
   * In tradition of POSIX assert, this macro will break the build if the
   * supplied condition is *false*, emitting the supplied error message if the
   * compiler has support to do so.
   */
  #define compiletime_assert(condition, msg) \
  	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
47933ad41   Peter Zijlstra   arch: Introduce s...
325
326
327
  #define compiletime_assert_atomic_type(t)				\
  	compiletime_assert(__native_word(t),				\
  		"Need native word sized stores/loads for atomicity.")
9c3cdc1f8   Linus Torvalds   Move ACCESS_ONCE(...
328
329
330
331
332
333
334
  /*
   * Prevent the compiler from merging or refetching accesses.  The compiler
   * is also forbidden from reordering successive instances of ACCESS_ONCE(),
   * but only when the compiler is aware of some particular ordering.  One way
   * to make the compiler aware of ordering is to put the two invocations of
   * ACCESS_ONCE() in different C statements.
   *
927609d62   Christian Borntraeger   kernel: tighten r...
335
336
337
338
339
340
341
342
343
344
345
   * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
   * on a union member will work as long as the size of the member matches the
   * size of the union and the size is smaller than word size.
   *
   * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
   * between process-level code and irq/NMI handlers, all running on the same CPU,
   * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
   * mutilate accesses that either do not require ordering or that interact
   * with an explicit memory barrier or atomic instruction that provides the
   * required ordering.
   *
663fdcbee   Preeti U Murthy   kernel: Replace r...
346
   * If possible use READ_ONCE()/WRITE_ONCE() instead.
9c3cdc1f8   Linus Torvalds   Move ACCESS_ONCE(...
347
   */
927609d62   Christian Borntraeger   kernel: tighten r...
348
  #define __ACCESS_ONCE(x) ({ \
c5b19946e   Christian Borntraeger   kernel: Fix spars...
349
  	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
927609d62   Christian Borntraeger   kernel: tighten r...
350
351
  	(volatile typeof(x) *)&(x); })
  #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
9c3cdc1f8   Linus Torvalds   Move ACCESS_ONCE(...
352

0a04b0166   Peter Zijlstra   rcu: Move lockles...
353
354
355
356
357
358
359
  /**
   * lockless_dereference() - safely load a pointer for later dereference
   * @p: The pointer to load
   *
   * Similar to rcu_dereference(), but for situations where the pointed-to
   * object's lifetime is managed by something other than RCU.  That
   * "something other" might be reference counting or simple immortality.
331b6d8c7   Peter Zijlstra   locking/barriers:...
360
   *
d7127b5e5   Johannes Berg   locking/barriers:...
361
362
363
   * The seemingly unused variable ___typecheck_p validates that @p is
   * indeed a pointer type by using a pointer to typeof(*p) as the type.
   * Taking a pointer to typeof(*p) again is needed in case p is void *.
0a04b0166   Peter Zijlstra   rcu: Move lockles...
364
365
366
   */
  #define lockless_dereference(p) \
  ({ \
38183b9c3   Stephen Rothwell   rcu: merge fix fo...
367
  	typeof(p) _________p1 = READ_ONCE(p); \
d7127b5e5   Johannes Berg   locking/barriers:...
368
  	typeof(*(p)) *___typecheck_p __maybe_unused; \
0a04b0166   Peter Zijlstra   rcu: Move lockles...
369
370
371
  	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
  	(_________p1); \
  })
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
372
  #endif /* __LINUX_COMPILER_H */