Commit 69d0ee7377eef808e34ba5542b554ec97244b871

Authored by Heiko Carstens
Committed by Ingo Molnar
1 parent 0ee000e5e8

locking: Move spinlock function bodies to header file

Move spinlock function bodies to header file by creating a
static inline version of each variant. Use the inline version
on the out-of-line code.

This shouldn't make any difference besides that the spinlock
code can now be used to generate inlined spinlock code.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124417.859022429@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 3 changed files with 300 additions and 155 deletions Side-by-side Diff

include/linux/spinlock.h
... ... @@ -143,15 +143,6 @@
143 143 */
144 144 #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
145 145  
146   -/*
147   - * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
148   - */
149   -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
150   -# include <linux/spinlock_api_smp.h>
151   -#else
152   -# include <linux/spinlock_api_up.h>
153   -#endif
154   -
155 146 #ifdef CONFIG_DEBUG_SPINLOCK
156 147 extern void _raw_spin_lock(spinlock_t *lock);
157 148 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
... ... @@ -379,6 +370,15 @@
379 370 * @lock: the spinlock in question.
380 371 */
381 372 #define spin_can_lock(lock) (!spin_is_locked(lock))
  373 +
  374 +/*
  375 + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  376 + */
  377 +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  378 +# include <linux/spinlock_api_smp.h>
  379 +#else
  380 +# include <linux/spinlock_api_up.h>
  381 +#endif
382 382  
383 383 #endif /* __LINUX_SPINLOCK_H */
include/linux/spinlock_api_smp.h
... ... @@ -60,5 +60,268 @@
60 60 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 61 __releases(lock);
62 62  
  63 +static inline int __spin_trylock(spinlock_t *lock)
  64 +{
  65 + preempt_disable();
  66 + if (_raw_spin_trylock(lock)) {
  67 + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  68 + return 1;
  69 + }
  70 + preempt_enable();
  71 + return 0;
  72 +}
  73 +
  74 +static inline int __read_trylock(rwlock_t *lock)
  75 +{
  76 + preempt_disable();
  77 + if (_raw_read_trylock(lock)) {
  78 + rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
  79 + return 1;
  80 + }
  81 + preempt_enable();
  82 + return 0;
  83 +}
  84 +
  85 +static inline int __write_trylock(rwlock_t *lock)
  86 +{
  87 + preempt_disable();
  88 + if (_raw_write_trylock(lock)) {
  89 + rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  90 + return 1;
  91 + }
  92 + preempt_enable();
  93 + return 0;
  94 +}
  95 +
  96 +/*
  97 + * If lockdep is enabled then we use the non-preemption spin-ops
  98 + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  99 + * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  100 + */
  101 +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  102 +
  103 +static inline void __read_lock(rwlock_t *lock)
  104 +{
  105 + preempt_disable();
  106 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  107 + LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  108 +}
  109 +
  110 +static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
  111 +{
  112 + unsigned long flags;
  113 +
  114 + local_irq_save(flags);
  115 + preempt_disable();
  116 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  117 + /*
  118 + * On lockdep we dont want the hand-coded irq-enable of
  119 + * _raw_spin_lock_flags() code, because lockdep assumes
  120 + * that interrupts are not re-enabled during lock-acquire:
  121 + */
  122 +#ifdef CONFIG_LOCKDEP
  123 + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  124 +#else
  125 + _raw_spin_lock_flags(lock, &flags);
  126 +#endif
  127 + return flags;
  128 +}
  129 +
  130 +static inline void __spin_lock_irq(spinlock_t *lock)
  131 +{
  132 + local_irq_disable();
  133 + preempt_disable();
  134 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  135 + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  136 +}
  137 +
  138 +static inline void __spin_lock_bh(spinlock_t *lock)
  139 +{
  140 + local_bh_disable();
  141 + preempt_disable();
  142 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  143 + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  144 +}
  145 +
  146 +static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
  147 +{
  148 + unsigned long flags;
  149 +
  150 + local_irq_save(flags);
  151 + preempt_disable();
  152 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  153 + LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
  154 + _raw_read_lock_flags, &flags);
  155 + return flags;
  156 +}
  157 +
  158 +static inline void __read_lock_irq(rwlock_t *lock)
  159 +{
  160 + local_irq_disable();
  161 + preempt_disable();
  162 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  163 + LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  164 +}
  165 +
  166 +static inline void __read_lock_bh(rwlock_t *lock)
  167 +{
  168 + local_bh_disable();
  169 + preempt_disable();
  170 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  171 + LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  172 +}
  173 +
  174 +static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
  175 +{
  176 + unsigned long flags;
  177 +
  178 + local_irq_save(flags);
  179 + preempt_disable();
  180 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  181 + LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
  182 + _raw_write_lock_flags, &flags);
  183 + return flags;
  184 +}
  185 +
  186 +static inline void __write_lock_irq(rwlock_t *lock)
  187 +{
  188 + local_irq_disable();
  189 + preempt_disable();
  190 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  191 + LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  192 +}
  193 +
  194 +static inline void __write_lock_bh(rwlock_t *lock)
  195 +{
  196 + local_bh_disable();
  197 + preempt_disable();
  198 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  199 + LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  200 +}
  201 +
  202 +static inline void __spin_lock(spinlock_t *lock)
  203 +{
  204 + preempt_disable();
  205 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  206 + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  207 +}
  208 +
  209 +static inline void __write_lock(rwlock_t *lock)
  210 +{
  211 + preempt_disable();
  212 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  213 + LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  214 +}
  215 +
  216 +#endif /* CONFIG_PREEMPT */
  217 +
  218 +static inline void __spin_unlock(spinlock_t *lock)
  219 +{
  220 + spin_release(&lock->dep_map, 1, _RET_IP_);
  221 + _raw_spin_unlock(lock);
  222 + preempt_enable();
  223 +}
  224 +
  225 +static inline void __write_unlock(rwlock_t *lock)
  226 +{
  227 + rwlock_release(&lock->dep_map, 1, _RET_IP_);
  228 + _raw_write_unlock(lock);
  229 + preempt_enable();
  230 +}
  231 +
  232 +static inline void __read_unlock(rwlock_t *lock)
  233 +{
  234 + rwlock_release(&lock->dep_map, 1, _RET_IP_);
  235 + _raw_read_unlock(lock);
  236 + preempt_enable();
  237 +}
  238 +
  239 +static inline void __spin_unlock_irqrestore(spinlock_t *lock,
  240 + unsigned long flags)
  241 +{
  242 + spin_release(&lock->dep_map, 1, _RET_IP_);
  243 + _raw_spin_unlock(lock);
  244 + local_irq_restore(flags);
  245 + preempt_enable();
  246 +}
  247 +
  248 +static inline void __spin_unlock_irq(spinlock_t *lock)
  249 +{
  250 + spin_release(&lock->dep_map, 1, _RET_IP_);
  251 + _raw_spin_unlock(lock);
  252 + local_irq_enable();
  253 + preempt_enable();
  254 +}
  255 +
  256 +static inline void __spin_unlock_bh(spinlock_t *lock)
  257 +{
  258 + spin_release(&lock->dep_map, 1, _RET_IP_);
  259 + _raw_spin_unlock(lock);
  260 + preempt_enable_no_resched();
  261 + local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  262 +}
  263 +
  264 +static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  265 +{
  266 + rwlock_release(&lock->dep_map, 1, _RET_IP_);
  267 + _raw_read_unlock(lock);
  268 + local_irq_restore(flags);
  269 + preempt_enable();
  270 +}
  271 +
  272 +static inline void __read_unlock_irq(rwlock_t *lock)
  273 +{
  274 + rwlock_release(&lock->dep_map, 1, _RET_IP_);
  275 + _raw_read_unlock(lock);
  276 + local_irq_enable();
  277 + preempt_enable();
  278 +}
  279 +
  280 +static inline void __read_unlock_bh(rwlock_t *lock)
  281 +{
  282 + rwlock_release(&lock->dep_map, 1, _RET_IP_);
  283 + _raw_read_unlock(lock);
  284 + preempt_enable_no_resched();
  285 + local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  286 +}
  287 +
  288 +static inline void __write_unlock_irqrestore(rwlock_t *lock,
  289 + unsigned long flags)
  290 +{
  291 + rwlock_release(&lock->dep_map, 1, _RET_IP_);
  292 + _raw_write_unlock(lock);
  293 + local_irq_restore(flags);
  294 + preempt_enable();
  295 +}
  296 +
  297 +static inline void __write_unlock_irq(rwlock_t *lock)
  298 +{
  299 + rwlock_release(&lock->dep_map, 1, _RET_IP_);
  300 + _raw_write_unlock(lock);
  301 + local_irq_enable();
  302 + preempt_enable();
  303 +}
  304 +
  305 +static inline void __write_unlock_bh(rwlock_t *lock)
  306 +{
  307 + rwlock_release(&lock->dep_map, 1, _RET_IP_);
  308 + _raw_write_unlock(lock);
  309 + preempt_enable_no_resched();
  310 + local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  311 +}
  312 +
  313 +static inline int __spin_trylock_bh(spinlock_t *lock)
  314 +{
  315 + local_bh_disable();
  316 + preempt_disable();
  317 + if (_raw_spin_trylock(lock)) {
  318 + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  319 + return 1;
  320 + }
  321 + preempt_enable_no_resched();
  322 + local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  323 + return 0;
  324 +}
  325 +
63 326 #endif /* __LINUX_SPINLOCK_API_SMP_H */
... ... @@ -23,40 +23,19 @@
23 23  
24 24 int __lockfunc _spin_trylock(spinlock_t *lock)
25 25 {
26   - preempt_disable();
27   - if (_raw_spin_trylock(lock)) {
28   - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
29   - return 1;
30   - }
31   -
32   - preempt_enable();
33   - return 0;
  26 + return __spin_trylock(lock);
34 27 }
35 28 EXPORT_SYMBOL(_spin_trylock);
36 29  
37 30 int __lockfunc _read_trylock(rwlock_t *lock)
38 31 {
39   - preempt_disable();
40   - if (_raw_read_trylock(lock)) {
41   - rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
42   - return 1;
43   - }
44   -
45   - preempt_enable();
46   - return 0;
  32 + return __read_trylock(lock);
47 33 }
48 34 EXPORT_SYMBOL(_read_trylock);
49 35  
50 36 int __lockfunc _write_trylock(rwlock_t *lock)
51 37 {
52   - preempt_disable();
53   - if (_raw_write_trylock(lock)) {
54   - rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
55   - return 1;
56   - }
57   -
58   - preempt_enable();
59   - return 0;
  38 + return __write_trylock(lock);
60 39 }
61 40 EXPORT_SYMBOL(_write_trylock);
62 41  
63 42  
64 43  
65 44  
66 45  
67 46  
68 47  
69 48  
70 49  
71 50  
72 51  
73 52  
74 53  
75 54  
... ... @@ -69,129 +48,74 @@
69 48  
70 49 void __lockfunc _read_lock(rwlock_t *lock)
71 50 {
72   - preempt_disable();
73   - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
74   - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  51 + __read_lock(lock);
75 52 }
76 53 EXPORT_SYMBOL(_read_lock);
77 54  
78 55 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
79 56 {
80   - unsigned long flags;
81   -
82   - local_irq_save(flags);
83   - preempt_disable();
84   - spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
85   - /*
86   - * On lockdep we dont want the hand-coded irq-enable of
87   - * _raw_spin_lock_flags() code, because lockdep assumes
88   - * that interrupts are not re-enabled during lock-acquire:
89   - */
90   -#ifdef CONFIG_LOCKDEP
91   - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
92   -#else
93   - _raw_spin_lock_flags(lock, &flags);
94   -#endif
95   - return flags;
  57 + return __spin_lock_irqsave(lock);
96 58 }
97 59 EXPORT_SYMBOL(_spin_lock_irqsave);
98 60  
99 61 void __lockfunc _spin_lock_irq(spinlock_t *lock)
100 62 {
101   - local_irq_disable();
102   - preempt_disable();
103   - spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
104   - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  63 + __spin_lock_irq(lock);
105 64 }
106 65 EXPORT_SYMBOL(_spin_lock_irq);
107 66  
108 67 void __lockfunc _spin_lock_bh(spinlock_t *lock)
109 68 {
110   - local_bh_disable();
111   - preempt_disable();
112   - spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
113   - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  69 + __spin_lock_bh(lock);
114 70 }
115 71 EXPORT_SYMBOL(_spin_lock_bh);
116 72  
117 73 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
118 74 {
119   - unsigned long flags;
120   -
121   - local_irq_save(flags);
122   - preempt_disable();
123   - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
124   - LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
125   - _raw_read_lock_flags, &flags);
126   - return flags;
  75 + return __read_lock_irqsave(lock);
127 76 }
128 77 EXPORT_SYMBOL(_read_lock_irqsave);
129 78  
130 79 void __lockfunc _read_lock_irq(rwlock_t *lock)
131 80 {
132   - local_irq_disable();
133   - preempt_disable();
134   - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
135   - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  81 + __read_lock_irq(lock);
136 82 }
137 83 EXPORT_SYMBOL(_read_lock_irq);
138 84  
139 85 void __lockfunc _read_lock_bh(rwlock_t *lock)
140 86 {
141   - local_bh_disable();
142   - preempt_disable();
143   - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
144   - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  87 + __read_lock_bh(lock);
145 88 }
146 89 EXPORT_SYMBOL(_read_lock_bh);
147 90  
148 91 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
149 92 {
150   - unsigned long flags;
151   -
152   - local_irq_save(flags);
153   - preempt_disable();
154   - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
155   - LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
156   - _raw_write_lock_flags, &flags);
157   - return flags;
  93 + return __write_lock_irqsave(lock);
158 94 }
159 95 EXPORT_SYMBOL(_write_lock_irqsave);
160 96  
161 97 void __lockfunc _write_lock_irq(rwlock_t *lock)
162 98 {
163   - local_irq_disable();
164   - preempt_disable();
165   - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
166   - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  99 + __write_lock_irq(lock);
167 100 }
168 101 EXPORT_SYMBOL(_write_lock_irq);
169 102  
170 103 void __lockfunc _write_lock_bh(rwlock_t *lock)
171 104 {
172   - local_bh_disable();
173   - preempt_disable();
174   - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
175   - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  105 + __write_lock_bh(lock);
176 106 }
177 107 EXPORT_SYMBOL(_write_lock_bh);
178 108  
179 109 void __lockfunc _spin_lock(spinlock_t *lock)
180 110 {
181   - preempt_disable();
182   - spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
183   - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  111 + __spin_lock(lock);
184 112 }
185   -
186 113 EXPORT_SYMBOL(_spin_lock);
187 114  
188 115 void __lockfunc _write_lock(rwlock_t *lock)
189 116 {
190   - preempt_disable();
191   - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
192   - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  117 + __write_lock(lock);
193 118 }
194   -
195 119 EXPORT_SYMBOL(_write_lock);
196 120  
197 121 #else /* CONFIG_PREEMPT: */
198 122  
199 123  
200 124  
201 125  
202 126  
203 127  
204 128  
205 129  
206 130  
207 131  
208 132  
209 133  
... ... @@ -320,121 +244,79 @@
320 244  
321 245 void __lockfunc _spin_unlock(spinlock_t *lock)
322 246 {
323   - spin_release(&lock->dep_map, 1, _RET_IP_);
324   - _raw_spin_unlock(lock);
325   - preempt_enable();
  247 + __spin_unlock(lock);
326 248 }
327 249 EXPORT_SYMBOL(_spin_unlock);
328 250  
329 251 void __lockfunc _write_unlock(rwlock_t *lock)
330 252 {
331   - rwlock_release(&lock->dep_map, 1, _RET_IP_);
332   - _raw_write_unlock(lock);
333   - preempt_enable();
  253 + __write_unlock(lock);
334 254 }
335 255 EXPORT_SYMBOL(_write_unlock);
336 256  
337 257 void __lockfunc _read_unlock(rwlock_t *lock)
338 258 {
339   - rwlock_release(&lock->dep_map, 1, _RET_IP_);
340   - _raw_read_unlock(lock);
341   - preempt_enable();
  259 + __read_unlock(lock);
342 260 }
343 261 EXPORT_SYMBOL(_read_unlock);
344 262  
345 263 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
346 264 {
347   - spin_release(&lock->dep_map, 1, _RET_IP_);
348   - _raw_spin_unlock(lock);
349   - local_irq_restore(flags);
350   - preempt_enable();
  265 + __spin_unlock_irqrestore(lock, flags);
351 266 }
352 267 EXPORT_SYMBOL(_spin_unlock_irqrestore);
353 268  
354 269 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
355 270 {
356   - spin_release(&lock->dep_map, 1, _RET_IP_);
357   - _raw_spin_unlock(lock);
358   - local_irq_enable();
359   - preempt_enable();
  271 + __spin_unlock_irq(lock);
360 272 }
361 273 EXPORT_SYMBOL(_spin_unlock_irq);
362 274  
363 275 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
364 276 {
365   - spin_release(&lock->dep_map, 1, _RET_IP_);
366   - _raw_spin_unlock(lock);
367   - preempt_enable_no_resched();
368   - local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  277 + __spin_unlock_bh(lock);
369 278 }
370 279 EXPORT_SYMBOL(_spin_unlock_bh);
371 280  
372 281 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
373 282 {
374   - rwlock_release(&lock->dep_map, 1, _RET_IP_);
375   - _raw_read_unlock(lock);
376   - local_irq_restore(flags);
377   - preempt_enable();
  283 + __read_unlock_irqrestore(lock, flags);
378 284 }
379 285 EXPORT_SYMBOL(_read_unlock_irqrestore);
380 286  
381 287 void __lockfunc _read_unlock_irq(rwlock_t *lock)
382 288 {
383   - rwlock_release(&lock->dep_map, 1, _RET_IP_);
384   - _raw_read_unlock(lock);
385   - local_irq_enable();
386   - preempt_enable();
  289 + __read_unlock_irq(lock);
387 290 }
388 291 EXPORT_SYMBOL(_read_unlock_irq);
389 292  
390 293 void __lockfunc _read_unlock_bh(rwlock_t *lock)
391 294 {
392   - rwlock_release(&lock->dep_map, 1, _RET_IP_);
393   - _raw_read_unlock(lock);
394   - preempt_enable_no_resched();
395   - local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  295 + __read_unlock_bh(lock);
396 296 }
397 297 EXPORT_SYMBOL(_read_unlock_bh);
398 298  
399 299 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
400 300 {
401   - rwlock_release(&lock->dep_map, 1, _RET_IP_);
402   - _raw_write_unlock(lock);
403   - local_irq_restore(flags);
404   - preempt_enable();
  301 + __write_unlock_irqrestore(lock, flags);
405 302 }
406 303 EXPORT_SYMBOL(_write_unlock_irqrestore);
407 304  
408 305 void __lockfunc _write_unlock_irq(rwlock_t *lock)
409 306 {
410   - rwlock_release(&lock->dep_map, 1, _RET_IP_);
411   - _raw_write_unlock(lock);
412   - local_irq_enable();
413   - preempt_enable();
  307 + __write_unlock_irq(lock);
414 308 }
415 309 EXPORT_SYMBOL(_write_unlock_irq);
416 310  
417 311 void __lockfunc _write_unlock_bh(rwlock_t *lock)
418 312 {
419   - rwlock_release(&lock->dep_map, 1, _RET_IP_);
420   - _raw_write_unlock(lock);
421   - preempt_enable_no_resched();
422   - local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  313 + __write_unlock_bh(lock);
423 314 }
424 315 EXPORT_SYMBOL(_write_unlock_bh);
425 316  
426 317 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
427 318 {
428   - local_bh_disable();
429   - preempt_disable();
430   - if (_raw_spin_trylock(lock)) {
431   - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
432   - return 1;
433   - }
434   -
435   - preempt_enable_no_resched();
436   - local_bh_enable_ip((unsigned long)__builtin_return_address(0));
437   - return 0;
  319 + return __spin_trylock_bh(lock);
438 320 }
439 321 EXPORT_SYMBOL(_spin_trylock_bh);
440 322