Commit 892a7c67c12da63fa4b51728bbe5b982356a090a

Authored by Heiko Carstens
Committed by Ingo Molnar
1 parent 69d0ee7377

locking: Allow arch-inlined spinlocks

This allows an architecture to specify per lock variant if the
locking code should be kept out-of-line or inlined.

If an architecure wants out-of-line locking code no change is
needed. To force inlining of e.g. spin_lock() the line:

  #define __always_inline__spin_lock

needs to be added to arch/<...>/include/asm/spinlock.h

If CONFIG_DEBUG_SPINLOCK or CONFIG_GENERIC_LOCKBREAK are
defined the per architecture defines are (partly) ignored and
still out-of-line spinlock code will be generated.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124418.375299024@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 2 changed files with 175 additions and 0 deletions Side-by-side Diff

include/linux/spinlock_api_smp.h
... ... @@ -60,6 +60,125 @@
60 60 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 61 __releases(lock);
62 62  
  63 +#ifndef CONFIG_DEBUG_SPINLOCK
  64 +#ifndef CONFIG_GENERIC_LOCKBREAK
  65 +
  66 +#ifdef __always_inline__spin_lock
  67 +#define _spin_lock(lock) __spin_lock(lock)
  68 +#endif
  69 +
  70 +#ifdef __always_inline__read_lock
  71 +#define _read_lock(lock) __read_lock(lock)
  72 +#endif
  73 +
  74 +#ifdef __always_inline__write_lock
  75 +#define _write_lock(lock) __write_lock(lock)
  76 +#endif
  77 +
  78 +#ifdef __always_inline__spin_lock_bh
  79 +#define _spin_lock_bh(lock) __spin_lock_bh(lock)
  80 +#endif
  81 +
  82 +#ifdef __always_inline__read_lock_bh
  83 +#define _read_lock_bh(lock) __read_lock_bh(lock)
  84 +#endif
  85 +
  86 +#ifdef __always_inline__write_lock_bh
  87 +#define _write_lock_bh(lock) __write_lock_bh(lock)
  88 +#endif
  89 +
  90 +#ifdef __always_inline__spin_lock_irq
  91 +#define _spin_lock_irq(lock) __spin_lock_irq(lock)
  92 +#endif
  93 +
  94 +#ifdef __always_inline__read_lock_irq
  95 +#define _read_lock_irq(lock) __read_lock_irq(lock)
  96 +#endif
  97 +
  98 +#ifdef __always_inline__write_lock_irq
  99 +#define _write_lock_irq(lock) __write_lock_irq(lock)
  100 +#endif
  101 +
  102 +#ifdef __always_inline__spin_lock_irqsave
  103 +#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
  104 +#endif
  105 +
  106 +#ifdef __always_inline__read_lock_irqsave
  107 +#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
  108 +#endif
  109 +
  110 +#ifdef __always_inline__write_lock_irqsave
  111 +#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
  112 +#endif
  113 +
  114 +#endif /* !CONFIG_GENERIC_LOCKBREAK */
  115 +
  116 +#ifdef __always_inline__spin_trylock
  117 +#define _spin_trylock(lock) __spin_trylock(lock)
  118 +#endif
  119 +
  120 +#ifdef __always_inline__read_trylock
  121 +#define _read_trylock(lock) __read_trylock(lock)
  122 +#endif
  123 +
  124 +#ifdef __always_inline__write_trylock
  125 +#define _write_trylock(lock) __write_trylock(lock)
  126 +#endif
  127 +
  128 +#ifdef __always_inline__spin_trylock_bh
  129 +#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
  130 +#endif
  131 +
  132 +#ifdef __always_inline__spin_unlock
  133 +#define _spin_unlock(lock) __spin_unlock(lock)
  134 +#endif
  135 +
  136 +#ifdef __always_inline__read_unlock
  137 +#define _read_unlock(lock) __read_unlock(lock)
  138 +#endif
  139 +
  140 +#ifdef __always_inline__write_unlock
  141 +#define _write_unlock(lock) __write_unlock(lock)
  142 +#endif
  143 +
  144 +#ifdef __always_inline__spin_unlock_bh
  145 +#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
  146 +#endif
  147 +
  148 +#ifdef __always_inline__read_unlock_bh
  149 +#define _read_unlock_bh(lock) __read_unlock_bh(lock)
  150 +#endif
  151 +
  152 +#ifdef __always_inline__write_unlock_bh
  153 +#define _write_unlock_bh(lock) __write_unlock_bh(lock)
  154 +#endif
  155 +
  156 +#ifdef __always_inline__spin_unlock_irq
  157 +#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
  158 +#endif
  159 +
  160 +#ifdef __always_inline__read_unlock_irq
  161 +#define _read_unlock_irq(lock) __read_unlock_irq(lock)
  162 +#endif
  163 +
  164 +#ifdef __always_inline__write_unlock_irq
  165 +#define _write_unlock_irq(lock) __write_unlock_irq(lock)
  166 +#endif
  167 +
  168 +#ifdef __always_inline__spin_unlock_irqrestore
  169 +#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
  170 +#endif
  171 +
  172 +#ifdef __always_inline__read_unlock_irqrestore
  173 +#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
  174 +#endif
  175 +
  176 +#ifdef __always_inline__write_unlock_irqrestore
  177 +#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
  178 +#endif
  179 +
  180 +#endif /* CONFIG_DEBUG_SPINLOCK */
  181 +
63 182 static inline int __spin_trylock(spinlock_t *lock)
64 183 {
65 184 preempt_disable();
... ... @@ -21,23 +21,29 @@
21 21 #include <linux/debug_locks.h>
22 22 #include <linux/module.h>
23 23  
  24 +#ifndef _spin_trylock
24 25 int __lockfunc _spin_trylock(spinlock_t *lock)
25 26 {
26 27 return __spin_trylock(lock);
27 28 }
28 29 EXPORT_SYMBOL(_spin_trylock);
  30 +#endif
29 31  
  32 +#ifndef _read_trylock
30 33 int __lockfunc _read_trylock(rwlock_t *lock)
31 34 {
32 35 return __read_trylock(lock);
33 36 }
34 37 EXPORT_SYMBOL(_read_trylock);
  38 +#endif
35 39  
  40 +#ifndef _write_trylock
36 41 int __lockfunc _write_trylock(rwlock_t *lock)
37 42 {
38 43 return __write_trylock(lock);
39 44 }
40 45 EXPORT_SYMBOL(_write_trylock);
  46 +#endif
41 47  
42 48 /*
43 49 * If lockdep is enabled then we use the non-preemption spin-ops
44 50  
45 51  
46 52  
47 53  
48 54  
49 55  
50 56  
51 57  
52 58  
53 59  
54 60  
55 61  
56 62  
57 63  
58 64  
59 65  
60 66  
61 67  
62 68  
63 69  
64 70  
65 71  
66 72  
... ... @@ -46,77 +52,101 @@
46 52 */
47 53 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
48 54  
  55 +#ifndef _read_lock
49 56 void __lockfunc _read_lock(rwlock_t *lock)
50 57 {
51 58 __read_lock(lock);
52 59 }
53 60 EXPORT_SYMBOL(_read_lock);
  61 +#endif
54 62  
  63 +#ifndef _spin_lock_irqsave
55 64 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
56 65 {
57 66 return __spin_lock_irqsave(lock);
58 67 }
59 68 EXPORT_SYMBOL(_spin_lock_irqsave);
  69 +#endif
60 70  
  71 +#ifndef _spin_lock_irq
61 72 void __lockfunc _spin_lock_irq(spinlock_t *lock)
62 73 {
63 74 __spin_lock_irq(lock);
64 75 }
65 76 EXPORT_SYMBOL(_spin_lock_irq);
  77 +#endif
66 78  
  79 +#ifndef _spin_lock_bh
67 80 void __lockfunc _spin_lock_bh(spinlock_t *lock)
68 81 {
69 82 __spin_lock_bh(lock);
70 83 }
71 84 EXPORT_SYMBOL(_spin_lock_bh);
  85 +#endif
72 86  
  87 +#ifndef _read_lock_irqsave
73 88 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
74 89 {
75 90 return __read_lock_irqsave(lock);
76 91 }
77 92 EXPORT_SYMBOL(_read_lock_irqsave);
  93 +#endif
78 94  
  95 +#ifndef _read_lock_irq
79 96 void __lockfunc _read_lock_irq(rwlock_t *lock)
80 97 {
81 98 __read_lock_irq(lock);
82 99 }
83 100 EXPORT_SYMBOL(_read_lock_irq);
  101 +#endif
84 102  
  103 +#ifndef _read_lock_bh
85 104 void __lockfunc _read_lock_bh(rwlock_t *lock)
86 105 {
87 106 __read_lock_bh(lock);
88 107 }
89 108 EXPORT_SYMBOL(_read_lock_bh);
  109 +#endif
90 110  
  111 +#ifndef _write_lock_irqsave
91 112 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
92 113 {
93 114 return __write_lock_irqsave(lock);
94 115 }
95 116 EXPORT_SYMBOL(_write_lock_irqsave);
  117 +#endif
96 118  
  119 +#ifndef _write_lock_irq
97 120 void __lockfunc _write_lock_irq(rwlock_t *lock)
98 121 {
99 122 __write_lock_irq(lock);
100 123 }
101 124 EXPORT_SYMBOL(_write_lock_irq);
  125 +#endif
102 126  
  127 +#ifndef _write_lock_bh
103 128 void __lockfunc _write_lock_bh(rwlock_t *lock)
104 129 {
105 130 __write_lock_bh(lock);
106 131 }
107 132 EXPORT_SYMBOL(_write_lock_bh);
  133 +#endif
108 134  
  135 +#ifndef _spin_lock
109 136 void __lockfunc _spin_lock(spinlock_t *lock)
110 137 {
111 138 __spin_lock(lock);
112 139 }
113 140 EXPORT_SYMBOL(_spin_lock);
  141 +#endif
114 142  
  143 +#ifndef _write_lock
115 144 void __lockfunc _write_lock(rwlock_t *lock)
116 145 {
117 146 __write_lock(lock);
118 147 }
119 148 EXPORT_SYMBOL(_write_lock);
  149 +#endif
120 150  
121 151 #else /* CONFIG_PREEMPT: */
122 152  
123 153  
124 154  
125 155  
126 156  
127 157  
128 158  
129 159  
130 160  
131 161  
132 162  
133 163  
134 164  
135 165  
136 166  
137 167  
138 168  
139 169  
140 170  
141 171  
142 172  
143 173  
144 174  
145 175  
146 176  
147 177  
... ... @@ -242,83 +272,109 @@
242 272  
243 273 #endif
244 274  
  275 +#ifndef _spin_unlock
245 276 void __lockfunc _spin_unlock(spinlock_t *lock)
246 277 {
247 278 __spin_unlock(lock);
248 279 }
249 280 EXPORT_SYMBOL(_spin_unlock);
  281 +#endif
250 282  
  283 +#ifndef _write_unlock
251 284 void __lockfunc _write_unlock(rwlock_t *lock)
252 285 {
253 286 __write_unlock(lock);
254 287 }
255 288 EXPORT_SYMBOL(_write_unlock);
  289 +#endif
256 290  
  291 +#ifndef _read_unlock
257 292 void __lockfunc _read_unlock(rwlock_t *lock)
258 293 {
259 294 __read_unlock(lock);
260 295 }
261 296 EXPORT_SYMBOL(_read_unlock);
  297 +#endif
262 298  
  299 +#ifndef _spin_unlock_irqrestore
263 300 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
264 301 {
265 302 __spin_unlock_irqrestore(lock, flags);
266 303 }
267 304 EXPORT_SYMBOL(_spin_unlock_irqrestore);
  305 +#endif
268 306  
  307 +#ifndef _spin_unlock_irq
269 308 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
270 309 {
271 310 __spin_unlock_irq(lock);
272 311 }
273 312 EXPORT_SYMBOL(_spin_unlock_irq);
  313 +#endif
274 314  
  315 +#ifndef _spin_unlock_bh
275 316 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
276 317 {
277 318 __spin_unlock_bh(lock);
278 319 }
279 320 EXPORT_SYMBOL(_spin_unlock_bh);
  321 +#endif
280 322  
  323 +#ifndef _read_unlock_irqrestore
281 324 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
282 325 {
283 326 __read_unlock_irqrestore(lock, flags);
284 327 }
285 328 EXPORT_SYMBOL(_read_unlock_irqrestore);
  329 +#endif
286 330  
  331 +#ifndef _read_unlock_irq
287 332 void __lockfunc _read_unlock_irq(rwlock_t *lock)
288 333 {
289 334 __read_unlock_irq(lock);
290 335 }
291 336 EXPORT_SYMBOL(_read_unlock_irq);
  337 +#endif
292 338  
  339 +#ifndef _read_unlock_bh
293 340 void __lockfunc _read_unlock_bh(rwlock_t *lock)
294 341 {
295 342 __read_unlock_bh(lock);
296 343 }
297 344 EXPORT_SYMBOL(_read_unlock_bh);
  345 +#endif
298 346  
  347 +#ifndef _write_unlock_irqrestore
299 348 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
300 349 {
301 350 __write_unlock_irqrestore(lock, flags);
302 351 }
303 352 EXPORT_SYMBOL(_write_unlock_irqrestore);
  353 +#endif
304 354  
  355 +#ifndef _write_unlock_irq
305 356 void __lockfunc _write_unlock_irq(rwlock_t *lock)
306 357 {
307 358 __write_unlock_irq(lock);
308 359 }
309 360 EXPORT_SYMBOL(_write_unlock_irq);
  361 +#endif
310 362  
  363 +#ifndef _write_unlock_bh
311 364 void __lockfunc _write_unlock_bh(rwlock_t *lock)
312 365 {
313 366 __write_unlock_bh(lock);
314 367 }
315 368 EXPORT_SYMBOL(_write_unlock_bh);
  369 +#endif
316 370  
  371 +#ifndef _spin_trylock_bh
317 372 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
318 373 {
319 374 return __spin_trylock_bh(lock);
320 375 }
321 376 EXPORT_SYMBOL(_spin_trylock_bh);
  377 +#endif
322 378  
323 379 notrace int in_lock_functions(unsigned long addr)
324 380 {